diff --git a/ngraph/changes.md b/ngraph/changes.md index bd5d3c281ef..e3e7dc43922 100644 --- a/ngraph/changes.md +++ b/ngraph/changes.md @@ -103,9 +103,9 @@ methods have been decorated with deprecated warnings which may be enabled by set To update, remove the passed argument. For example, ```C++ // Old -make_shared(make_shared(element::f32, Shape{2, 4})); +make_shared(make_shared(element::Type_t::f32, Shape{2, 4})); // New (remove TensorViewType) -make_shared(element::f32, Shape{2, 4}); +make_shared(element::Type_t::f32, Shape{2, 4}); // Old make_shared(results, result_type, parameters); diff --git a/ngraph/core/builder/include/ngraph/builder/autobroadcast.hpp b/ngraph/core/builder/include/ngraph/builder/autobroadcast.hpp index a4569cb1bf8..c6b78ea4a93 100644 --- a/ngraph/core/builder/include/ngraph/builder/autobroadcast.hpp +++ b/ngraph/core/builder/include/ngraph/builder/autobroadcast.hpp @@ -169,7 +169,7 @@ namespace ngraph std::size_t start_match_axis) { auto shape_const = - op::Constant::create(element::u64, Shape{new_shape.size()}, new_shape); + op::Constant::create(element::Type_t::u64, Shape{new_shape.size()}, new_shape); return std::make_shared( value, shape_const, diff --git a/ngraph/core/builder/src/builder/autobroadcast.cpp b/ngraph/core/builder/src/builder/autobroadcast.cpp index 9ac059f47c7..129c5403357 100644 --- a/ngraph/core/builder/src/builder/autobroadcast.cpp +++ b/ngraph/core/builder/src/builder/autobroadcast.cpp @@ -177,8 +177,8 @@ namespace ngraph if (!broadcast_axes.empty()) { - auto shape_const = - op::Constant::create(element::u64, Shape{output_shape.size()}, output_shape); + auto shape_const = op::Constant::create( + element::Type_t::u64, Shape{output_shape.size()}, output_shape); broadcasted_node = make_shared( broadcasted_node, shape_const, @@ -236,8 +236,8 @@ namespace ngraph trimmed_value = builder::opset1::reshape(value, trimmed_value_shape); } - auto shape_const = - op::Constant::create(element::u64, Shape{output_shape.size()}, output_shape); + auto shape_const = op::Constant::create( + element::Type_t::u64, Shape{output_shape.size()}, output_shape); auto value_bcast = make_shared( trimmed_value, shape_const, opset1::get_axes_mapping_output(output_shape, axes)); @@ -354,7 +354,8 @@ namespace ngraph iota(begin(axes) + start_match_axis, end(axes), start_match_axis + input_shape.size()); auto axes_mapping = opset1::get_axes_mapping(output_shape, axes); - return op::Constant::create(element::i64, Shape{axes_mapping.size()}, axes_mapping); + return op::Constant::create( + element::Type_t::i64, Shape{axes_mapping.size()}, axes_mapping); } namespace opset1 @@ -434,14 +435,15 @@ namespace ngraph vector mapping(input_shape.size()); iota(begin(mapping), end(mapping), start_match_axis); - return op::Constant::create(element::i64, Shape{mapping.size()}, mapping); + return op::Constant::create(element::Type_t::i64, Shape{mapping.size()}, mapping); } Output get_axes_mapping_output(const Shape& output_shape, const AxisSet& broadcast_axes) { vector axes_mapping{get_axes_mapping(output_shape, broadcast_axes)}; - return op::Constant::create(element::i64, Shape{axes_mapping.size()}, axes_mapping); + return op::Constant::create( + element::Type_t::i64, Shape{axes_mapping.size()}, axes_mapping); } Output make_broadcast(const Output& node, @@ -450,7 +452,8 @@ namespace ngraph { return make_shared( node, - op::Constant::create(element::i64, Shape{target_shape.size()}, target_shape), + op::Constant::create( + element::Type_t::i64, Shape{target_shape.size()}, target_shape), get_axes_mapping_output(target_shape, broadcast_axes)); } @@ -460,7 +463,8 @@ namespace ngraph { return make_shared( node, - op::Constant::create(element::i64, Shape{target_shape.size()}, target_shape), + op::Constant::create( + element::Type_t::i64, Shape{target_shape.size()}, target_shape), get_axes_mapping_output(target_shape, node.get_shape(), start_match_axis)); } diff --git a/ngraph/core/builder/src/builder/reduce_ops.cpp b/ngraph/core/builder/src/builder/reduce_ops.cpp index ede1e90bce0..305171c2baf 100644 --- a/ngraph/core/builder/src/builder/reduce_ops.cpp +++ b/ngraph/core/builder/src/builder/reduce_ops.cpp @@ -49,10 +49,10 @@ namespace ngraph const auto dim_values = std::make_shared( value_shape, reduction_axes, - ngraph::opset1::Constant::create(element::i64, {}, {0})); + ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0})); return std::make_shared( - dim_values, ngraph::opset1::Constant::create(element::i64, {}, {0})); + dim_values, ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0})); } std::shared_ptr builder::opset1::mean(const Output& value, @@ -62,7 +62,7 @@ namespace ngraph std::shared_ptr elems_number; const auto value_elem_type = value.get_element_type(); const auto reduction_axes_const = ngraph::opset1::Constant::create( - element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()); + element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()); const auto value_elems_sum = std::make_shared(value, reduction_axes_const, keep_dims); if (value.get_partial_shape().is_static()) @@ -109,7 +109,7 @@ namespace ngraph diff = std::make_shared( std::make_shared(diff, diff), ngraph::opset1::Constant::create( - element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()), + element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()), false); const auto& et = value.get_element_type(); diff --git a/ngraph/core/builder/src/builder/reshape.cpp b/ngraph/core/builder/src/builder/reshape.cpp index cc52942cea5..fe5500ad9ec 100644 --- a/ngraph/core/builder/src/builder/reshape.cpp +++ b/ngraph/core/builder/src/builder/reshape.cpp @@ -47,13 +47,13 @@ shared_ptr builder::opset1::reshape(const Output& value, const Shape auto value_rank = value.get_shape().size(); AxisVector axes_vector(value_rank); std::iota(axes_vector.begin(), axes_vector.end(), 0); - auto axes = op::Constant::create(element::i64, Shape{value_rank}, axes_vector); + auto axes = op::Constant::create(element::Type_t::i64, Shape{value_rank}, axes_vector); return std::make_shared(value, axes); } else { auto out_pattern = op::Constant::create( - element::i64, Shape{shape.size()}, vector(shape.begin(), shape.end())); + element::Type_t::i64, Shape{shape.size()}, vector(shape.begin(), shape.end())); return make_shared(value, out_pattern, false) ->add_provenance_group_members_above({value}); @@ -63,7 +63,7 @@ shared_ptr builder::opset1::reshape(const Output& value, const Shape shared_ptr builder::opset1::reorder_axes(const Output& value, vector axes_order) { const auto axes_order_const = - op::Constant::create(element::i64, + op::Constant::create(element::Type_t::i64, Shape{axes_order.size()}, vector(axes_order.begin(), axes_order.end())); return make_shared(value, axes_order_const) @@ -83,7 +83,7 @@ shared_ptr builder::opset1::transpose(const Output& value) const auto input_rank = std::make_shared(std::make_shared(value)); - const auto neg_one = ngraph::opset1::Constant::create(element::i64, Shape{}, {-1}); + const auto neg_one = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{}, {-1}); const auto start_node = std::make_shared(input_rank, neg_one); const auto reverse_axes_order = std::make_shared(reshape(start_node, Shape{}), // start @@ -114,7 +114,7 @@ namespace ngraph get_normalized_axis_node(const std::shared_ptr node_rank, int64_t axis) { auto axis_node = - ngraph::opset1::Constant::create(element::i64, Shape{1}, {axis}); + ngraph::opset1::Constant::create(element::Type_t::i64, Shape{1}, {axis}); // shortcut for alredy positive value if (axis >= 0) { @@ -138,11 +138,11 @@ shared_ptr builder::opset1::flatten(const Output& value, int axis) shared_ptr output_shape; if (axis == 0) { - output_shape = ngraph::opset1::Constant::create(element::i64, Shape{2}, {1, -1}); + output_shape = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{2}, {1, -1}); } else if (axis == 1) { - output_shape = ngraph::opset1::Constant::create(element::i64, Shape{2}, {0, -1}); + output_shape = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{2}, {0, -1}); } else { @@ -152,15 +152,15 @@ shared_ptr builder::opset1::flatten(const Output& value, int axis) const auto first_part_dims = make_shared( value_shape, - ngraph::opset1::Constant::create(element::i64, {1}, {0}), + ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {0}), axis_node, vector{}, vector{}); const auto first_part_dims_length = make_shared( - first_part_dims, ngraph::opset1::Constant::create(element::i64, {}, {0}), true); + first_part_dims, ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0}), true); const auto remaining_part_length = - ngraph::opset1::Constant::create(element::i64, {1}, {-1}); + ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {-1}); output_shape = make_shared( OutputVector{first_part_dims_length, remaining_part_length}, 0); @@ -230,19 +230,21 @@ shared_ptr builder::opset1::collapse(const Output& value, const auto rank = make_shared(shape); // Split lengths used in VariadicSplit - const auto start_axis_node = ngraph::opset1::Constant::create(element::i64, {1}, {start_axis}); - const auto end_axis_node = ngraph::opset1::Constant::create(element::i64, {1}, {end_axis + 1}); + const auto start_axis_node = + ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {start_axis}); + const auto end_axis_node = + ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {end_axis + 1}); const auto collapsed_axis = make_shared(end_axis_node, start_axis_node); const auto post_axis = make_shared(rank, end_axis_node); const auto split_lengths = make_shared( OutputVector{start_axis_node, collapsed_axis, post_axis}, 0); - const auto split_axis = ngraph::opset1::Constant::create(element::i64, {}, {0}); + const auto split_axis = ngraph::opset1::Constant::create(element::Type_t::i64, {}, {0}); const auto split_node = make_shared(shape, split_axis, split_lengths); - const auto reduced_axis = ngraph::opset1::Constant::create(element::i64, {1}, {0}); + const auto reduced_axis = ngraph::opset1::Constant::create(element::Type_t::i64, {1}, {0}); const auto collapsed_axis_size = make_shared(split_node->output(1), reduced_axis, true); diff --git a/ngraph/core/builder/src/builder/split.cpp b/ngraph/core/builder/src/builder/split.cpp index 7b254d3f075..3e47f07a2e5 100644 --- a/ngraph/core/builder/src/builder/split.cpp +++ b/ngraph/core/builder/src/builder/split.cpp @@ -25,9 +25,9 @@ OutputVector builder::opset1::split(const Output& value, const std::vector& split_lengths, int64_t axis) { - const auto axis_node = ngraph::opset1::Constant::create(element::i64, Shape{}, {axis}); - const auto split_lengths_node = - ngraph::opset1::Constant::create(element::u64, Shape{split_lengths.size()}, split_lengths); + const auto axis_node = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{}, {axis}); + const auto split_lengths_node = ngraph::opset1::Constant::create( + element::Type_t::u64, Shape{split_lengths.size()}, split_lengths); const auto variadic_split = std::make_shared(value, axis_node, split_lengths_node); @@ -36,7 +36,7 @@ OutputVector builder::opset1::split(const Output& value, OutputVector builder::opset1::split(const Output& value, size_t num_splits, int64_t axis) { - const auto axis_node = ngraph::opset1::Constant::create(element::i64, Shape{}, {axis}); + const auto axis_node = ngraph::opset1::Constant::create(element::Type_t::i64, Shape{}, {axis}); const auto split = std::make_shared(value, axis_node, num_splits); return split->outputs(); diff --git a/ngraph/core/include/ngraph/op/bucketize.hpp b/ngraph/core/include/ngraph/op/bucketize.hpp index 1d9452aeb4d..5449da11a79 100644 --- a/ngraph/core/include/ngraph/op/bucketize.hpp +++ b/ngraph/core/include/ngraph/op/bucketize.hpp @@ -40,7 +40,7 @@ namespace ngraph /// edge of interval. default true = includes right edge Bucketize(const Output& data, const Output& buckets, - const element::Type output_type = element::i64, + const element::Type output_type = element::Type_t::i64, const bool with_right_bound = true); virtual void validate_and_infer_types() override; diff --git a/ngraph/core/include/ngraph/op/constant.hpp b/ngraph/core/include/ngraph/op/constant.hpp index f5e97b71f03..22c90b3e383 100644 --- a/ngraph/core/include/ngraph/op/constant.hpp +++ b/ngraph/core/include/ngraph/op/constant.hpp @@ -273,31 +273,31 @@ namespace ngraph } /// \brief Returns the value of the constant node as a Shape object - /// Can only be used on element::i64 nodes and interprets + /// Can only be used on element::Type_t::i64 nodes and interprets /// negative values as zeros. Shape get_shape_val() const; /// \brief Returns the value of the constant node as a Strides /// object - /// Can only be used on element::i64 nodes and interprets + /// Can only be used on element::Type_t::i64 nodes and interprets /// negative values as zeros. Strides get_strides_val() const; /// \brief Returns the value of the constant node as a Coordinate /// object - /// Can only be used on element::i64 nodes and interprets + /// Can only be used on element::Type_t::i64 nodes and interprets /// negative values as zeros. Coordinate get_coordinate_val() const; /// \brief Returns the value of the constant node as a /// CoordinateDiff object - /// Can only be used on element::i64 nodes. + /// Can only be used on element::Type_t::i64 nodes. CoordinateDiff get_coordinate_diff_val() const; /// \brief Returns the value of the constant node as an AxisVector /// object - /// Can only be used on element::i64 nodes and interprets + /// Can only be used on element::Type_t::i64 nodes and interprets /// negative values as zeros. AxisVector get_axis_vector_val() const; /// \brief Returns the value of the constant node as an AxisSet /// object - /// Can only be used on element::i64 nodes and interprets + /// Can only be used on element::Type_t::i64 nodes and interprets /// negative values as zeros. /// Repeated values are allowed. AxisSet get_axis_set_val() const; diff --git a/ngraph/core/include/ngraph/op/lstm_sequence.hpp b/ngraph/core/include/ngraph/op/lstm_sequence.hpp index 81cf782ac40..fd7a946c103 100644 --- a/ngraph/core/include/ngraph/op/lstm_sequence.hpp +++ b/ngraph/core/include/ngraph/op/lstm_sequence.hpp @@ -117,7 +117,7 @@ namespace ngraph R, B, Constant::create( - element::f32, + element::Type_t::f32, Shape{(lstm_direction == direction::BIDIRECTIONAL ? 2UL : 1UL), 3UL * static_cast(hidden_size)}, std::vector{0.f}), diff --git a/ngraph/core/include/ngraph/op/non_max_suppression.hpp b/ngraph/core/include/ngraph/op/non_max_suppression.hpp index b6a93610f62..0154cf3733f 100644 --- a/ngraph/core/include/ngraph/op/non_max_suppression.hpp +++ b/ngraph/core/include/ngraph/op/non_max_suppression.hpp @@ -125,14 +125,15 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); /// \brief Constructs a NonMaxSuppression operation with default values for the last /// 3 inputs @@ -143,11 +144,12 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); bool visit_attributes(AttributeVisitor& visitor) override; void validate_and_infer_types() override; @@ -176,7 +178,7 @@ namespace ngraph protected: BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; bool m_sort_result_descending = true; - ngraph::element::Type m_output_type = ngraph::element::i64; + ngraph::element::Type m_output_type = ngraph::element::Type_t::i64; void validate(); int64_t max_boxes_output_from_input() const; }; @@ -205,14 +207,15 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); /// \brief Constructs a NonMaxSuppression operation with default values for the last /// 3 inputs @@ -223,11 +226,12 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); void validate_and_infer_types() override; @@ -261,11 +265,12 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); /// \brief Constructs a NonMaxSuppression operation with default values in the last. /// 3 inputs. @@ -278,12 +283,13 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); /// \brief Constructs a NonMaxSuppression operation with default values in the last. /// 2 inputs. @@ -297,13 +303,14 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); /// \brief Constructs a NonMaxSuppression operation with default value in the last. /// input. @@ -318,14 +325,15 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); /// \brief Constructs a NonMaxSuppression operation. /// @@ -340,15 +348,16 @@ namespace ngraph /// \param sort_result_descending Specifies whether it is necessary to sort selected /// boxes across batches /// \param output_type Specifies the output tensor type - NonMaxSuppression(const Output& boxes, - const Output& scores, - const Output& max_output_boxes_per_class, - const Output& iou_threshold, - const Output& score_threshold, - const Output& soft_nms_sigma, - const BoxEncodingType box_encoding = BoxEncodingType::CORNER, - const bool sort_result_descending = true, - const ngraph::element::Type& output_type = ngraph::element::i64); + NonMaxSuppression( + const Output& boxes, + const Output& scores, + const Output& max_output_boxes_per_class, + const Output& iou_threshold, + const Output& score_threshold, + const Output& soft_nms_sigma, + const BoxEncodingType box_encoding = BoxEncodingType::CORNER, + const bool sort_result_descending = true, + const ngraph::element::Type& output_type = ngraph::element::Type_t::i64); bool visit_attributes(AttributeVisitor& visitor) override; void validate_and_infer_types() override; @@ -382,7 +391,7 @@ namespace ngraph protected: BoxEncodingType m_box_encoding = BoxEncodingType::CORNER; bool m_sort_result_descending = true; - ngraph::element::Type m_output_type = ngraph::element::i64; + ngraph::element::Type m_output_type = ngraph::element::Type_t::i64; void validate(); }; } // namespace v5 diff --git a/ngraph/core/include/ngraph/op/non_zero.hpp b/ngraph/core/include/ngraph/op/non_zero.hpp index 9f7886c79c3..2f0053431f6 100644 --- a/ngraph/core/include/ngraph/op/non_zero.hpp +++ b/ngraph/core/include/ngraph/op/non_zero.hpp @@ -74,7 +74,7 @@ namespace ngraph const HostTensorVector& inputs) const override; protected: - element::Type m_output_type = element::i64; + element::Type m_output_type = element::Type_t::i64; }; } using v3::NonZero; diff --git a/ngraph/core/include/ngraph/op/scatter_nd_update.hpp b/ngraph/core/include/ngraph/op/scatter_nd_update.hpp index 18947044160..5df323666be 100644 --- a/ngraph/core/include/ngraph/op/scatter_nd_update.hpp +++ b/ngraph/core/include/ngraph/op/scatter_nd_update.hpp @@ -33,7 +33,8 @@ namespace ngraph const NodeTypeInfo& get_type_info() const override { return type_info; } ScatterNDUpdate() = default; /// \param inputs Tensor - /// \param indices Index tensor: Data type must be `element::i32` or `element::i64` + /// \param indices Index tensor: Data type must be `element::Type_t::i32` or + /// `element::Type_t::i64` /// \param updates Tensor: Must have same type as inputs ScatterNDUpdate(const Output& inputs, const Output& indices, diff --git a/ngraph/core/include/ngraph/op/shape_of.hpp b/ngraph/core/include/ngraph/op/shape_of.hpp index 38aa6d3b31c..cc322eafb8d 100644 --- a/ngraph/core/include/ngraph/op/shape_of.hpp +++ b/ngraph/core/include/ngraph/op/shape_of.hpp @@ -32,7 +32,8 @@ namespace ngraph const NodeTypeInfo& get_type_info() const override { return type_info; } ShapeOf() = default; /// \brief Constructs a shape-of operation. - ShapeOf(const Output& arg, const element::Type output_type = element::i64); + ShapeOf(const Output& arg, + const element::Type output_type = element::Type_t::i64); bool visit_attributes(AttributeVisitor& visitor) override; virtual std::shared_ptr diff --git a/ngraph/core/include/ngraph/op/topk.hpp b/ngraph/core/include/ngraph/op/topk.hpp index 8a6b13da13d..c35830b7e25 100644 --- a/ngraph/core/include/ngraph/op/topk.hpp +++ b/ngraph/core/include/ngraph/op/topk.hpp @@ -57,14 +57,14 @@ namespace ngraph const int64_t axis, const std::string& mode, const std::string& sort, - const element::Type& index_element_type = element::i32); + const element::Type& index_element_type = element::Type_t::i32); TopK(const Output& data, const Output& k, const int64_t axis, const Mode mode, const SortType sort, - const element::Type& index_element_type = element::i32); + const element::Type& index_element_type = element::Type_t::i32); bool visit_attributes(AttributeVisitor& visitor) override; void validate_and_infer_types() override; @@ -104,7 +104,7 @@ namespace ngraph uint64_t m_normalized_axis; Mode m_mode; SortType m_sort; - element::Type m_index_element_type{element::i32}; + element::Type m_index_element_type{element::Type_t::i32}; virtual size_t read_k_from_constant_node(const std::shared_ptr& node, const element::Type& k_element_type) const; @@ -146,14 +146,14 @@ namespace ngraph const int64_t axis, const std::string& mode, const std::string& sort, - const element::Type& index_element_type = element::i32); + const element::Type& index_element_type = element::Type_t::i32); TopK(const Output& data, const Output& k, const int64_t axis, const Mode mode, const SortType sort, - const element::Type& index_element_type = element::i32); + const element::Type& index_element_type = element::Type_t::i32); bool visit_attributes(AttributeVisitor& visitor) override; void validate_and_infer_types() override; virtual std::shared_ptr diff --git a/ngraph/core/include/ngraph/pattern/op/branch.hpp b/ngraph/core/include/ngraph/pattern/op/branch.hpp index 4afcd128af8..d73f6baa0a7 100644 --- a/ngraph/core/include/ngraph/pattern/op/branch.hpp +++ b/ngraph/core/include/ngraph/pattern/op/branch.hpp @@ -44,7 +44,7 @@ namespace ngraph Branch() : Pattern(OutputVector{}) { - set_output_type(0, element::f32, Shape{}); + set_output_type(0, element::Type_t::f32, Shape{}); } void set_destination(const Output& destination) diff --git a/ngraph/core/include/ngraph/pattern/op/label.hpp b/ngraph/core/include/ngraph/pattern/op/label.hpp index e172f970282..9ced55996a0 100644 --- a/ngraph/core/include/ngraph/pattern/op/label.hpp +++ b/ngraph/core/include/ngraph/pattern/op/label.hpp @@ -47,7 +47,7 @@ namespace ngraph /// Example: /// \code{.cpp} /// auto add = a + b; // a and b are op::Parameter in this example - /// auto label = std::make_shared(element::f32, + /// auto label = std::make_shared(element::Type_t::f32, /// Shape{2,2}, /// nullptr, /// OutputVector{add}); @@ -61,7 +61,7 @@ namespace ngraph set_output_type(0, type, s); } - explicit Label(const element::Type& type = element::dynamic, + explicit Label(const element::Type& type = element::Type_t::dynamic, const PartialShape& s = PartialShape::dynamic()) : Label(type, s, [](const Output&) { return true; }, OutputVector()) { diff --git a/ngraph/core/include/ngraph/specialize_function.hpp b/ngraph/core/include/ngraph/specialize_function.hpp index 2270e132a8b..820d6fc5a44 100644 --- a/ngraph/core/include/ngraph/specialize_function.hpp +++ b/ngraph/core/include/ngraph/specialize_function.hpp @@ -76,10 +76,12 @@ namespace ngraph /// because when we reconstruct the new x node, it will see that the shapes are inconsistent /// for elementwise add. /// - /// Specialization of element types is also possible: `element::dynamic` can be specialized + /// Specialization of element types is also possible: `element::Type_t::dynamic` can be + /// specialized /// to a concrete element type or left dynamic; but a concrete element type can only be - /// specialized to itself (e.g., specialization does not allow you to change `element::i32` - /// to `element::i64`). + /// specialized to itself (e.g., specialization does not allow you to change + /// `element::Type_t::i32` + /// to `element::Type_t::i64`). /// /// Finally, it is possible to specialize parameter values. If the ith element of /// `parameter_values` is not `nullptr`, and fully static element type and shape has been diff --git a/ngraph/core/include/ngraph/type/element_type.hpp b/ngraph/core/include/ngraph/type/element_type.hpp index 9bd5edbca84..34ce17e48be 100644 --- a/ngraph/core/include/ngraph/type/element_type.hpp +++ b/ngraph/core/include/ngraph/type/element_type.hpp @@ -91,11 +91,12 @@ namespace ngraph size_t bitwidth() const; // The name of this type, the enum name of this type const std::string& get_type_name() const; + bool operator==(const Type_t& other) const; + bool operator!=(const Type_t& other) const { return !(*this == other); } bool operator==(const Type& other) const; bool operator!=(const Type& other) const { return !(*this == other); } bool operator<(const Type& other) const; friend NGRAPH_API std::ostream& operator<<(std::ostream&, const Type&); - static std::vector get_known_types(); /// \brief Checks whether this element type is merge-compatible with `t`. /// \param t The element type to compare this element type to. @@ -130,21 +131,50 @@ namespace ngraph typedef std::vector TypeVector; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::undefined instead.") extern NGRAPH_API const Type undefined; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::dynamic instead.") extern NGRAPH_API const Type dynamic; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::boolean instead.") extern NGRAPH_API const Type boolean; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::bf16 instead.") extern NGRAPH_API const Type bf16; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::f16 instead.") extern NGRAPH_API const Type f16; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::f32 instead.") extern NGRAPH_API const Type f32; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::f64 instead.") extern NGRAPH_API const Type f64; + NGRAPH_DEPRECATED("This global element type was deprecated. Please use Type_t::i8 instead.") extern NGRAPH_API const Type i8; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::i16 instead.") extern NGRAPH_API const Type i16; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::i32 instead.") extern NGRAPH_API const Type i32; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::i64 instead.") extern NGRAPH_API const Type i64; + NGRAPH_DEPRECATED("This global element type was deprecated. Please use Type_t::u1 instead.") extern NGRAPH_API const Type u1; + NGRAPH_DEPRECATED("This global element type was deprecated. Please use Type_t::u8 instead.") extern NGRAPH_API const Type u8; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::u16 instead.") extern NGRAPH_API const Type u16; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::u32 instead.") extern NGRAPH_API const Type u32; + NGRAPH_DEPRECATED( + "This global element type was deprecated. Please use Type_t::u64 instead.") extern NGRAPH_API const Type u64; template diff --git a/ngraph/core/reference/src/runtime/reference/loop.cpp b/ngraph/core/reference/src/runtime/reference/loop.cpp index 9731e8a659a..d520387838a 100644 --- a/ngraph/core/reference/src/runtime/reference/loop.cpp +++ b/ngraph/core/reference/src/runtime/reference/loop.cpp @@ -49,8 +49,8 @@ namespace ngraph input_descs.size() + (cur_iter_idx >= 0 ? !cur_iter_initial_value_exist : 0); HostTensorVector inputs_to_body; for (int64_t i = 0; i < inputs_count; ++i) - inputs_to_body.push_back( - std::make_shared(element::dynamic, PartialShape::dynamic())); + inputs_to_body.push_back(std::make_shared(element::Type_t::dynamic, + PartialShape::dynamic())); if (cur_iter_idx >= 0 && !cur_iter_initial_value_exist) { const auto& cur_iter = func->get_parameters().at(cur_iter_idx); @@ -90,12 +90,12 @@ namespace ngraph // Get TripCount int64_t trip_count = 0; - if (args[0]->get_element_type() == ngraph::element::i32) + if (args[0]->get_element_type() == ngraph::element::Type_t::i32) { auto* trip_count_p = args[0]->get_data_ptr(); trip_count = trip_count_p[0]; } - else if (args[0]->get_element_type() == ngraph::element::i64) + else if (args[0]->get_element_type() == ngraph::element::Type_t::i64) { auto* trip_count_p = args[0]->get_data_ptr(); trip_count = trip_count_p[0]; @@ -204,10 +204,10 @@ namespace ngraph { const auto& cur_iter_param = func->get_parameters().at(cur_iter_idx); int64_t iter_num = cur_iter + 1; - if (cur_iter_param->get_element_type() == element::i64) + if (cur_iter_param->get_element_type() == element::Type_t::i64) inputs_to_body.at(cur_iter_idx) ->write(&iter_num, cur_iter_param->get_element_type().size()); - else if (cur_iter_param->get_element_type() == element::i32) + else if (cur_iter_param->get_element_type() == element::Type_t::i32) { int32_t iter_num_i32 = static_cast(iter_num); inputs_to_body.at(cur_iter_idx) diff --git a/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp b/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp index 55719a597cc..8c950c0b807 100644 --- a/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp +++ b/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp @@ -326,7 +326,7 @@ namespace ngraph size_t selected_size = valid_outputs * 3; - if (output_type == ngraph::element::i64) + if (output_type == ngraph::element::Type_t::i64) { int64_t* indices_ptr = outputs[0]->get_data_ptr(); memcpy(indices_ptr, selected_indices.data(), selected_size * sizeof(int64_t)); @@ -381,7 +381,7 @@ namespace ngraph return; } - if (output_type == ngraph::element::i64) + if (output_type == ngraph::element::Type_t::i64) { int64_t* valid_outputs_ptr = outputs[2]->get_data_ptr(); *valid_outputs_ptr = valid_outputs; diff --git a/ngraph/core/reference/src/runtime/reference/tensor_iterator.cpp b/ngraph/core/reference/src/runtime/reference/tensor_iterator.cpp index 08f80cd70f6..c6e12f562b8 100644 --- a/ngraph/core/reference/src/runtime/reference/tensor_iterator.cpp +++ b/ngraph/core/reference/src/runtime/reference/tensor_iterator.cpp @@ -35,8 +35,8 @@ namespace ngraph { HostTensorVector inputs_to_body; for (int64_t i = 0; i < input_descs.size(); ++i) - inputs_to_body.push_back( - std::make_shared(element::dynamic, PartialShape::dynamic())); + inputs_to_body.push_back(std::make_shared(element::Type_t::dynamic, + PartialShape::dynamic())); // Port map processing: inputs and back edges struct BackEdge diff --git a/ngraph/core/src/graph_util.cpp b/ngraph/core/src/graph_util.cpp index c0f0461686e..a7c10582a3e 100644 --- a/ngraph/core/src/graph_util.cpp +++ b/ngraph/core/src/graph_util.cpp @@ -586,7 +586,7 @@ std::shared_ptr ngraph::make_zero(const element::Type& element_type, const if (shape.size() > 0) { return std::make_shared( - zero, op::Constant::create(element::u64, Shape{shape.size()}, shape)); + zero, op::Constant::create(element::Type_t::u64, Shape{shape.size()}, shape)); } return zero; } diff --git a/ngraph/core/src/node.cpp b/ngraph/core/src/node.cpp index 489df366205..3913daa5a7f 100644 --- a/ngraph/core/src/node.cpp +++ b/ngraph/core/src/node.cpp @@ -213,8 +213,8 @@ descriptor::Output& Node::get_output_descriptor(size_t position) while (m_outputs.size() <= position) { size_t i = m_outputs.size(); - auto tensor_descriptor = - make_shared(element::dynamic, PartialShape::dynamic(), this, i); + auto tensor_descriptor = make_shared( + element::Type_t::dynamic, PartialShape::dynamic(), this, i); m_outputs.emplace_back(this, i, tensor_descriptor); } return m_outputs.at(position); diff --git a/ngraph/core/src/op/broadcast.cpp b/ngraph/core/src/op/broadcast.cpp index 4f91709a846..71db716778d 100644 --- a/ngraph/core/src/op/broadcast.cpp +++ b/ngraph/core/src/op/broadcast.cpp @@ -260,7 +260,7 @@ op::v1::Broadcast::Broadcast(const Output& arg, const AutoBroadcastSpec& broadcast_spec) : util::BroadcastBase{arg, target_shape, - op::v0::Constant::create(element::u8, Shape{}, {0})->output(0), + op::v0::Constant::create(element::Type_t::u8, Shape{}, {0})->output(0), to_broadcast_mode(broadcast_spec)} , m_broadcast_spec{broadcast_spec} { diff --git a/ngraph/core/src/op/bucketize.cpp b/ngraph/core/src/op/bucketize.cpp index fb1bd237fea..38ae363ce1c 100644 --- a/ngraph/core/src/op/bucketize.cpp +++ b/ngraph/core/src/op/bucketize.cpp @@ -45,7 +45,8 @@ void op::v3::Bucketize::validate_and_infer_types() const PartialShape& buckets_pshape = get_input_partial_shape(1); NODE_VALIDATION_CHECK(this, - m_output_type == element::i64 || m_output_type == element::i32, + m_output_type == element::Type_t::i64 || + m_output_type == element::Type_t::i32, "Output type must be i32 or i64. Default is i64"); if (buckets_pshape.is_static()) diff --git a/ngraph/core/src/op/concat.cpp b/ngraph/core/src/op/concat.cpp index aa993f2377b..e6bfad1d0bc 100644 --- a/ngraph/core/src/op/concat.cpp +++ b/ngraph/core/src/op/concat.cpp @@ -50,7 +50,7 @@ void op::Concat::validate_and_infer_types() NODE_VALIDATION_CHECK(this, get_input_size() >= 1, "At least one argument required."); PartialShape inputs_shape_scheme{PartialShape::dynamic()}; - element::Type inputs_et{element::dynamic}; + element::Type inputs_et{element::Type_t::dynamic}; Dimension concatenation_axis_output_dim{0}; for (uint64_t i = 0; i < get_input_size(); i++) diff --git a/ngraph/core/src/op/constant.cpp b/ngraph/core/src/op/constant.cpp index b3026a388e5..133fcb3fc27 100644 --- a/ngraph/core/src/op/constant.cpp +++ b/ngraph/core/src/op/constant.cpp @@ -482,7 +482,7 @@ Shape op::Constant::get_shape_val() const Strides op::Constant::get_strides_val() const { - NGRAPH_CHECK(m_element_type == element::i64); + NGRAPH_CHECK(m_element_type == element::Type_t::i64); std::vector out_strides = cast_vector(); Strides output_strides(shape_size(m_shape)); std::transform(out_strides.begin(), @@ -494,7 +494,7 @@ Strides op::Constant::get_strides_val() const Coordinate op::Constant::get_coordinate_val() const { - NGRAPH_CHECK(m_element_type == element::i64); + NGRAPH_CHECK(m_element_type == element::Type_t::i64); std::vector out_coordinate = cast_vector(); Coordinate output_coordinate(shape_size(m_shape)); std::transform(out_coordinate.begin(), @@ -506,7 +506,7 @@ Coordinate op::Constant::get_coordinate_val() const CoordinateDiff op::Constant::get_coordinate_diff_val() const { - NGRAPH_CHECK(m_element_type == element::i64); + NGRAPH_CHECK(m_element_type == element::Type_t::i64); std::vector out_coordinate_diff = cast_vector(); CoordinateDiff output_coordinate_diff(shape_size(m_shape)); std::transform(out_coordinate_diff.begin(), diff --git a/ngraph/core/src/op/cum_sum.cpp b/ngraph/core/src/op/cum_sum.cpp index c00b80766e3..86fc0085e36 100644 --- a/ngraph/core/src/op/cum_sum.cpp +++ b/ngraph/core/src/op/cum_sum.cpp @@ -37,7 +37,7 @@ op::v0::CumSum::CumSum(const Output& arg, } op::v0::CumSum::CumSum(const Output& arg, const bool exclusive, const bool reverse) - : Op({arg, op::Constant::create(element::i32, Shape{}, {0})}) + : Op({arg, op::Constant::create(element::Type_t::i32, Shape{}, {0})}) , m_exclusive(exclusive) , m_reverse(reverse) { @@ -65,7 +65,7 @@ void op::v0::CumSum::validate_and_infer_types() const auto& axis_type = get_input_element_type(1); NODE_VALIDATION_CHECK(this, - axis_type == element::i32 || axis_type == element::i64, + axis_type == element::Type_t::i32 || axis_type == element::Type_t::i64, "axis element type must be either int64_t or int32_t but got (", axis_type, ")."); diff --git a/ngraph/core/src/op/detection_output.cpp b/ngraph/core/src/op/detection_output.cpp index 86a107deb5d..41c04467255 100644 --- a/ngraph/core/src/op/detection_output.cpp +++ b/ngraph/core/src/op/detection_output.cpp @@ -49,11 +49,11 @@ void op::DetectionOutput::validate_and_infer_types() { auto box_logits_shape = get_input_partial_shape(0).to_shape(); set_output_type( - 0, element::f32, Shape{1, 1, m_attrs.keep_top_k[0] * box_logits_shape[0], 7}); + 0, element::Type_t::f32, Shape{1, 1, m_attrs.keep_top_k[0] * box_logits_shape[0], 7}); } else { - set_output_type(0, element::f32, PartialShape::dynamic()); + set_output_type(0, element::Type_t::f32, PartialShape::dynamic()); } } diff --git a/ngraph/core/src/op/embedding_segments_sum.cpp b/ngraph/core/src/op/embedding_segments_sum.cpp index 6a2eca7a92b..528b49b1e97 100644 --- a/ngraph/core/src/op/embedding_segments_sum.cpp +++ b/ngraph/core/src/op/embedding_segments_sum.cpp @@ -56,18 +56,18 @@ op::v3::EmbeddingSegmentsSum::EmbeddingSegmentsSum(const Output& emb_table void op::v3::EmbeddingSegmentsSum::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, - get_input_element_type(SEGMENT_IDS) == element::i64 || - get_input_element_type(SEGMENT_IDS) == element::i32, + get_input_element_type(SEGMENT_IDS) == element::Type_t::i64 || + get_input_element_type(SEGMENT_IDS) == element::Type_t::i32, "SEGMENT_IDS type must be i32 or i64"); NODE_VALIDATION_CHECK(this, - get_input_element_type(INDICES) == element::i64 || - get_input_element_type(INDICES) == element::i32, + get_input_element_type(INDICES) == element::Type_t::i64 || + get_input_element_type(INDICES) == element::Type_t::i32, "INDICES type must be i32 or i64"); NODE_VALIDATION_CHECK(this, - get_input_element_type(NUM_SEGMENTS) == element::i64 || - get_input_element_type(NUM_SEGMENTS) == element::i32, + get_input_element_type(NUM_SEGMENTS) == element::Type_t::i64 || + get_input_element_type(NUM_SEGMENTS) == element::Type_t::i32, "NUM_SEGMENTS type must be i32 or i64"); NODE_VALIDATION_CHECK( @@ -110,8 +110,8 @@ void op::v3::EmbeddingSegmentsSum::validate_and_infer_types() if (get_input_size() >= 5) { NODE_VALIDATION_CHECK(this, - get_input_element_type(DEFAULT_INDEX) == element::i64 || - get_input_element_type(DEFAULT_INDEX) == element::i32, + get_input_element_type(DEFAULT_INDEX) == element::Type_t::i64 || + get_input_element_type(DEFAULT_INDEX) == element::Type_t::i32, "DEFAULT_INDEX type must be i32 or i64"); NODE_VALIDATION_CHECK( diff --git a/ngraph/core/src/op/equal.cpp b/ngraph/core/src/op/equal.cpp index 1627e5f05f6..bb93b8fb1e6 100644 --- a/ngraph/core/src/op/equal.cpp +++ b/ngraph/core/src/op/equal.cpp @@ -65,7 +65,7 @@ namespace equal const op::AutoBroadcastSpec& broadcast_spec) { bool rc = true; - out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); + out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); switch (arg0->get_element_type()) { TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); diff --git a/ngraph/core/src/op/fake_quantize.cpp b/ngraph/core/src/op/fake_quantize.cpp index fec59b9fd92..5ed3f6fd7a9 100644 --- a/ngraph/core/src/op/fake_quantize.cpp +++ b/ngraph/core/src/op/fake_quantize.cpp @@ -134,7 +134,7 @@ OutputVector op::FakeQuantize::decompose_op() const const auto dequant_scale = (output_high - output_low) / levels_minus_one; // zero_point type needs to match the quantization output type - const auto zero_point = Constant::create(element::i32, data.get_shape(), {0.0}); + const auto zero_point = Constant::create(element::Type_t::i32, data.get_shape(), {0.0}); const auto axes = get_default_order(input_data_shape); // clip the input data to the range @@ -148,7 +148,7 @@ OutputVector op::FakeQuantize::decompose_op() const make_shared(data, quant_scale, zero_point, - element::i32, + element::Type_t::i32, axes, op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_EVEN); diff --git a/ngraph/core/src/op/gather.cpp b/ngraph/core/src/op/gather.cpp index 45c971797bf..82e7b6ec405 100644 --- a/ngraph/core/src/op/gather.cpp +++ b/ngraph/core/src/op/gather.cpp @@ -167,7 +167,7 @@ namespace gather out->set_shape(out_shape); - if (arg1->get_element_type() == element::i64) + if (arg1->get_element_type() == element::Type_t::i64) { runtime::reference::gather(arg0->get_data_ptr(), arg1->get_data_ptr(), @@ -177,7 +177,7 @@ namespace gather out->get_shape(), axis); } - else if (arg1->get_element_type() == element::i32) + else if (arg1->get_element_type() == element::Type_t::i32) { runtime::reference::gather(arg0->get_data_ptr(), arg1->get_data_ptr(), @@ -280,7 +280,7 @@ namespace gather if (indices_shape.empty()) { // gathering a scalar - const auto axes = op::Constant::create(element::i64, Shape{1}, {0}); + const auto axes = op::Constant::create(element::Type_t::i64, Shape{1}, {0}); gathered = make_shared(gathered_concat_input, axes); } diff --git a/ngraph/core/src/op/greater.cpp b/ngraph/core/src/op/greater.cpp index e84dd2ea474..ae7a0afeaa7 100644 --- a/ngraph/core/src/op/greater.cpp +++ b/ngraph/core/src/op/greater.cpp @@ -65,7 +65,7 @@ namespace greaterop const op::AutoBroadcastSpec& broadcast_spec) { bool rc = true; - out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); + out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); switch (arg0->get_element_type()) { TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); diff --git a/ngraph/core/src/op/greater_eq.cpp b/ngraph/core/src/op/greater_eq.cpp index 97dc3caf21a..f3ce8cbb180 100644 --- a/ngraph/core/src/op/greater_eq.cpp +++ b/ngraph/core/src/op/greater_eq.cpp @@ -65,7 +65,7 @@ namespace greater_equalop const op::AutoBroadcastSpec& broadcast_spec) { bool rc = true; - out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); + out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); switch (arg0->get_element_type()) { TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); diff --git a/ngraph/core/src/op/grn.cpp b/ngraph/core/src/op/grn.cpp index 3668d227238..3710b2bb6c6 100644 --- a/ngraph/core/src/op/grn.cpp +++ b/ngraph/core/src/op/grn.cpp @@ -78,7 +78,7 @@ OutputVector op::GRN::decompose_op() const data = builder::opset1::reshape(data, data_shape); } - const auto axis_set_const = op::Constant::create(element::i64, {}, {1}); + const auto axis_set_const = op::Constant::create(element::Type_t::i64, {}, {1}); // Calculate l2 norm across channels. shared_ptr norm = builder::opset1::l2_norm(data, axis_set_const, m_bias); // Get back reduced axis. diff --git a/ngraph/core/src/op/gru_cell.cpp b/ngraph/core/src/op/gru_cell.cpp index f84c4dee2ae..d70e115c7db 100644 --- a/ngraph/core/src/op/gru_cell.cpp +++ b/ngraph/core/src/op/gru_cell.cpp @@ -119,7 +119,7 @@ void op::v3::GRUCell::validate_and_infer_types() } auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; // Get input partial shape for all inputs const auto& x_pshape = get_input_partial_shape(0); diff --git a/ngraph/core/src/op/gru_sequence.cpp b/ngraph/core/src/op/gru_sequence.cpp index fc7cb620d3d..4446c3fb7fc 100644 --- a/ngraph/core/src/op/gru_sequence.cpp +++ b/ngraph/core/src/op/gru_sequence.cpp @@ -74,7 +74,7 @@ void op::v5::GRUSequence::validate_and_infer_types() auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); auto merged_num_directions = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; auto x_pshape = get_input_partial_shape(0); auto ht_pshape = get_input_partial_shape(1); diff --git a/ngraph/core/src/op/interpolate.cpp b/ngraph/core/src/op/interpolate.cpp index 3cffb5e3e26..14b58b9381b 100644 --- a/ngraph/core/src/op/interpolate.cpp +++ b/ngraph/core/src/op/interpolate.cpp @@ -221,8 +221,8 @@ void op::v4::Interpolate::validate_and_infer_types() { element::Type input_et = get_input_element_type(0); NODE_VALIDATION_CHECK(this, - input_et == element::f32 || input_et == element::f16 || - input_et == element::i8, + input_et == element::Type_t::f32 || input_et == element::Type_t::f16 || + input_et == element::Type_t::i8, "Input element type must be f32, f16, or i8"); PartialShape input_shape = PartialShape(get_input_partial_shape(0)); diff --git a/ngraph/core/src/op/less.cpp b/ngraph/core/src/op/less.cpp index 468a78c7e87..61ac88cba1c 100644 --- a/ngraph/core/src/op/less.cpp +++ b/ngraph/core/src/op/less.cpp @@ -65,7 +65,7 @@ namespace lessop const op::AutoBroadcastSpec& broadcast_spec) { bool rc = true; - out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); + out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); switch (arg0->get_element_type()) { TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); diff --git a/ngraph/core/src/op/less_eq.cpp b/ngraph/core/src/op/less_eq.cpp index 7ec143ee4d0..5aa4acf11d6 100644 --- a/ngraph/core/src/op/less_eq.cpp +++ b/ngraph/core/src/op/less_eq.cpp @@ -65,7 +65,7 @@ namespace less_equalop const op::AutoBroadcastSpec& broadcast_spec) { bool rc = true; - out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); + out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); switch (arg0->get_element_type()) { TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); diff --git a/ngraph/core/src/op/lrn.cpp b/ngraph/core/src/op/lrn.cpp index 0ebe097acde..a28694ffc14 100644 --- a/ngraph/core/src/op/lrn.cpp +++ b/ngraph/core/src/op/lrn.cpp @@ -25,7 +25,7 @@ using namespace ngraph; constexpr NodeTypeInfo op::LRN::type_info; op::LRN::LRN(const Output& arg, double alpha, double beta, double bias, size_t size) - : LRN(arg, op::Constant::create(element::i64, Shape{1}, {1}), alpha, beta, bias, size) + : LRN(arg, op::Constant::create(element::Type_t::i64, Shape{1}, {1}), alpha, beta, bias, size) { add_provenance_group_member(input_value(1).get_node_shared_ptr()); } diff --git a/ngraph/core/src/op/lstm_cell.cpp b/ngraph/core/src/op/lstm_cell.cpp index 0d2b24d53ea..235763125f1 100644 --- a/ngraph/core/src/op/lstm_cell.cpp +++ b/ngraph/core/src/op/lstm_cell.cpp @@ -156,7 +156,7 @@ void op::v0::LSTMCell::validate_and_infer_types() auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; // Copy all inputs without peephole (7th input) and initial_cell_state (2nd input) information // for further validation @@ -457,7 +457,7 @@ void op::v4::LSTMCell::validate_and_infer_types() } auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; // Get input partial shape for all inputs const auto& x_pshape = get_input_partial_shape(0); diff --git a/ngraph/core/src/op/lstm_sequence.cpp b/ngraph/core/src/op/lstm_sequence.cpp index ab3607c425e..7994cae95da 100644 --- a/ngraph/core/src/op/lstm_sequence.cpp +++ b/ngraph/core/src/op/lstm_sequence.cpp @@ -131,8 +131,10 @@ shared_ptr op::v0::LSTMSequence::get_masked_node(const Output& data, // Create predicate nodes. The condition is whether current time step value // is greater than sequence length for respective batch inputs. - shared_ptr curr_time_step_node = opset1::Constant::create( - element::i32, data.get_shape(), vector(shape_size(data.get_shape()), time_step)); + shared_ptr curr_time_step_node = + opset1::Constant::create(element::Type_t::i32, + data.get_shape(), + vector(shape_size(data.get_shape()), time_step)); Output batch_seq_length = builder::opset1::legacy_broadcast_for_binary_operation( curr_time_step_node, input_value(3).get_node_shared_ptr(), batch_axis); @@ -270,7 +272,7 @@ void op::v0::LSTMSequence::validate_and_infer_types() auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); auto merged_num_directions = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; // Copy all inputs without peephole and initial_cell_state information for further validation for (size_t i = 0; i < get_input_size() - 1; i++) @@ -468,7 +470,7 @@ void op::v5::LSTMSequence::validate_and_infer_types() auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); auto merged_num_directions = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; // Copy all inputs without initial_cell_state information for further validation for (size_t i = 0; i < get_input_size(); i++) diff --git a/ngraph/core/src/op/mod.cpp b/ngraph/core/src/op/mod.cpp index 30284534137..ff573124da8 100644 --- a/ngraph/core/src/op/mod.cpp +++ b/ngraph/core/src/op/mod.cpp @@ -52,8 +52,9 @@ OutputVector op::v1::Mod::decompose_op() const const auto divisor = make_shared(input_value(1)); // truncated(a / b) - auto division = make_shared( - make_shared(dividend, divisor, m_auto_broadcast), ngraph::element::i64); + auto division = + make_shared(make_shared(dividend, divisor, m_auto_broadcast), + ngraph::element::Type_t::i64); division = make_shared(division, dividend_et); // truncated(a / b) * b const auto multiplication = make_shared(division, divisor, m_auto_broadcast); diff --git a/ngraph/core/src/op/non_max_suppression.cpp b/ngraph/core/src/op/non_max_suppression.cpp index d5e715b6865..2e158f30b20 100644 --- a/ngraph/core/src/op/non_max_suppression.cpp +++ b/ngraph/core/src/op/non_max_suppression.cpp @@ -52,9 +52,9 @@ op::v1::NonMaxSuppression::NonMaxSuppression( const bool sort_result_descending) : Op({boxes, scores, - op::Constant::create(element::i64, Shape{}, {0}), - op::Constant::create(element::f32, Shape{}, {.0f}), - op::Constant::create(element::f32, Shape{}, {.0f})}) + op::Constant::create(element::Type_t::i64, Shape{}, {0}), + op::Constant::create(element::Type_t::f32, Shape{}, {.0f}), + op::Constant::create(element::Type_t::f32, Shape{}, {.0f})}) , m_box_encoding{box_encoding} , m_sort_result_descending{sort_result_descending} { @@ -71,13 +71,13 @@ std::shared_ptr const auto& arg2 = new_args.size() > 2 ? new_args.at(2) - : ngraph::op::Constant::create(element::i32, Shape{}, {0}); + : ngraph::op::Constant::create(element::Type_t::i32, Shape{}, {0}); const auto& arg3 = new_args.size() > 3 ? new_args.at(3) - : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + : ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); const auto& arg4 = new_args.size() > 4 ? new_args.at(4) - : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + : ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); return std::make_shared( new_args.at(0), new_args.at(1), arg2, arg3, arg4, m_box_encoding, m_sort_result_descending); @@ -98,7 +98,7 @@ void op::v1::NonMaxSuppression::validate_and_infer_types() // the spec doesn't say what exact type should be used for the output of this op // that's why we're setting it to 64-bit integer to provide the maximum range of values support // this will be changed (configurable) in the next version of this op - const auto& output_element_type = element::i64; + const auto& output_element_type = element::Type_t::i64; // NonMaxSuppression produces triplets // that have the following format: [batch_index, class_index, box_index] @@ -249,9 +249,9 @@ op::v3::NonMaxSuppression::NonMaxSuppression( const element::Type& output_type) : Op({boxes, scores, - op::Constant::create(element::i64, Shape{}, {0}), - op::Constant::create(element::f32, Shape{}, {.0f}), - op::Constant::create(element::f32, Shape{}, {.0f})}) + op::Constant::create(element::Type_t::i64, Shape{}, {0}), + op::Constant::create(element::Type_t::f32, Shape{}, {.0f}), + op::Constant::create(element::Type_t::f32, Shape{}, {.0f})}) , m_box_encoding{box_encoding} , m_sort_result_descending{sort_result_descending} , m_output_type{output_type} @@ -269,13 +269,13 @@ std::shared_ptr const auto& arg2 = new_args.size() > 2 ? new_args.at(2) - : ngraph::op::Constant::create(element::i32, Shape{}, {0}); + : ngraph::op::Constant::create(element::Type_t::i32, Shape{}, {0}); const auto& arg3 = new_args.size() > 3 ? new_args.at(3) - : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + : ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); const auto& arg4 = new_args.size() > 4 ? new_args.at(4) - : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + : ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); return std::make_shared(new_args.at(0), new_args.at(1), @@ -301,7 +301,8 @@ void op::v3::NonMaxSuppression::validate() const auto scores_ps = get_input_partial_shape(1); NODE_VALIDATION_CHECK(this, - m_output_type == element::i64 || m_output_type == element::i32, + m_output_type == element::Type_t::i64 || + m_output_type == element::Type_t::i32, "Output type must be i32 or i64"); if (boxes_ps.is_dynamic() || scores_ps.is_dynamic()) @@ -468,9 +469,9 @@ op::v4::NonMaxSuppression::NonMaxSuppression( const element::Type& output_type) : op::v3::NonMaxSuppression(boxes, scores, - op::Constant::create(element::i64, Shape{}, {0}), - op::Constant::create(element::f32, Shape{}, {.0f}), - op::Constant::create(element::f32, Shape{}, {.0f}), + op::Constant::create(element::Type_t::i64, Shape{}, {0}), + op::Constant::create(element::Type_t::f32, Shape{}, {.0f}), + op::Constant::create(element::Type_t::f32, Shape{}, {.0f}), box_encoding, sort_result_descending, output_type) @@ -488,13 +489,13 @@ std::shared_ptr const auto& arg2 = new_args.size() > 2 ? new_args.at(2) - : ngraph::op::Constant::create(element::i32, Shape{}, {0}); + : ngraph::op::Constant::create(element::Type_t::i32, Shape{}, {0}); const auto& arg3 = new_args.size() > 3 ? new_args.at(3) - : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + : ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); const auto& arg4 = new_args.size() > 4 ? new_args.at(4) - : ngraph::op::Constant::create(element::f32, Shape{}, {.0f}); + : ngraph::op::Constant::create(element::Type_t::f32, Shape{}, {.0f}); return std::make_shared(new_args.at(0), new_args.at(1), @@ -693,7 +694,7 @@ namespace inline bool is_float_type_admissible(const element::Type& t) { - return t == element::f32 || t == element::f16 || t == element::bf16; + return t == element::Type_t::f32 || t == element::Type_t::f16 || t == element::Type_t::bf16; } inline bool is_scalar_or_1d_tensor_with_1_element(const PartialShape& p) @@ -715,7 +716,8 @@ void op::v5::NonMaxSuppression::validate() const auto scores_ps = get_input_partial_shape(1); NODE_VALIDATION_CHECK(this, - m_output_type == element::i64 || m_output_type == element::i32, + m_output_type == element::Type_t::i64 || + m_output_type == element::Type_t::i32, "Output type must be i32 or i64"); if (boxes_ps.is_dynamic() || scores_ps.is_dynamic()) @@ -920,7 +922,7 @@ void op::v5::NonMaxSuppression::validate_and_infer_types() } set_output_type(0, m_output_type, out_shape); - set_output_type(1, element::f32, out_shape); + set_output_type(1, element::Type_t::f32, out_shape); set_output_type(2, m_output_type, Shape{1}); } diff --git a/ngraph/core/src/op/non_zero.cpp b/ngraph/core/src/op/non_zero.cpp index 9e544abc013..55831236118 100644 --- a/ngraph/core/src/op/non_zero.cpp +++ b/ngraph/core/src/op/non_zero.cpp @@ -62,7 +62,8 @@ void op::v3::NonZero::validate_and_infer_types() "NonZero input data type needs to be a numeric type. Got: ", input_et); NODE_VALIDATION_CHECK(this, - m_output_type == element::i64 || m_output_type == element::i32, + m_output_type == element::Type_t::i64 || + m_output_type == element::Type_t::i32, "Output type must be i32 or i64"); // For scalar non-zero value case, onnx test case expects output shape {1, 1} diff --git a/ngraph/core/src/op/not_equal.cpp b/ngraph/core/src/op/not_equal.cpp index 643e92bbe0f..44dae5c95cc 100644 --- a/ngraph/core/src/op/not_equal.cpp +++ b/ngraph/core/src/op/not_equal.cpp @@ -65,7 +65,7 @@ namespace not_equalop const op::AutoBroadcastSpec& broadcast_spec) { bool rc = true; - out->set_broadcast(broadcast_spec, arg0, arg1, element::boolean); + out->set_broadcast(broadcast_spec, arg0, arg1, element::Type_t::boolean); switch (arg0->get_element_type()) { TYPE_CASE(boolean)(arg0, arg1, out, broadcast_spec); diff --git a/ngraph/core/src/op/prior_box.cpp b/ngraph/core/src/op/prior_box.cpp index 437678880c9..5e0a5580070 100644 --- a/ngraph/core/src/op/prior_box.cpp +++ b/ngraph/core/src/op/prior_box.cpp @@ -72,14 +72,14 @@ void op::PriorBox::validate_and_infer_types() auto layer_shape = const_shape->get_shape_val(); set_output_type(0, - element::f32, + element::Type_t::f32, Shape{2, 4 * layer_shape[0] * layer_shape[1] * static_cast(number_of_priors(m_attrs))}); } else { - set_output_type(0, element::f32, PartialShape::dynamic()); + set_output_type(0, element::Type_t::f32, PartialShape::dynamic()); } } diff --git a/ngraph/core/src/op/prior_box_clustered.cpp b/ngraph/core/src/op/prior_box_clustered.cpp index 4b173c6a007..ec41d3b074d 100644 --- a/ngraph/core/src/op/prior_box_clustered.cpp +++ b/ngraph/core/src/op/prior_box_clustered.cpp @@ -80,11 +80,11 @@ void op::PriorBoxClustered::validate_and_infer_types() // {Prior boxes, variances-adjusted prior boxes} const auto num_priors = m_attrs.widths.size(); set_output_type( - 0, element::f32, Shape{2, 4 * layer_shape[0] * layer_shape[1] * num_priors}); + 0, element::Type_t::f32, Shape{2, 4 * layer_shape[0] * layer_shape[1] * num_priors}); } else { - set_output_type(0, element::f32, PartialShape::dynamic()); + set_output_type(0, element::Type_t::f32, PartialShape::dynamic()); } } diff --git a/ngraph/core/src/op/range.cpp b/ngraph/core/src/op/range.cpp index b8083cafff0..0da2373ef95 100644 --- a/ngraph/core/src/op/range.cpp +++ b/ngraph/core/src/op/range.cpp @@ -363,7 +363,7 @@ void op::v0::Range::validate_and_infer_types() set_input_is_relevant_to_shape(1); set_input_is_relevant_to_shape(2); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; NODE_VALIDATION_CHECK( this, @@ -373,7 +373,7 @@ void op::v0::Range::validate_and_infer_types() "Element types for start, stop, and step do not match."); NODE_VALIDATION_CHECK(this, - result_et != element::boolean, + result_et != element::Type_t::boolean, "Element type for start, stop, and step, must not be boolean."); NODE_VALIDATION_CHECK( diff --git a/ngraph/core/src/op/reduce_logical_and.cpp b/ngraph/core/src/op/reduce_logical_and.cpp index a83d94200bb..666b818efb7 100644 --- a/ngraph/core/src/op/reduce_logical_and.cpp +++ b/ngraph/core/src/op/reduce_logical_and.cpp @@ -76,7 +76,7 @@ bool op::v1::ReduceLogicalAnd::evaluate(const HostTensorVector& outputs, const auto& axes = inputs[1]; const auto& out = outputs[0]; - if (data->get_element_type() != element::boolean || + if (data->get_element_type() != element::Type_t::boolean || !axes->get_element_type().is_integral_number()) { return false; diff --git a/ngraph/core/src/op/reduce_logical_or.cpp b/ngraph/core/src/op/reduce_logical_or.cpp index ba3efba782f..f1c731cc249 100644 --- a/ngraph/core/src/op/reduce_logical_or.cpp +++ b/ngraph/core/src/op/reduce_logical_or.cpp @@ -76,7 +76,7 @@ bool op::v1::ReduceLogicalOr::evaluate(const HostTensorVector& outputs, const auto& axes = inputs[1]; const auto& out = outputs[0]; - if (data->get_element_type() != element::boolean || + if (data->get_element_type() != element::Type_t::boolean || !axes->get_element_type().is_integral_number()) { return false; diff --git a/ngraph/core/src/op/reverse.cpp b/ngraph/core/src/op/reverse.cpp index fe929235617..212a6befe04 100644 --- a/ngraph/core/src/op/reverse.cpp +++ b/ngraph/core/src/op/reverse.cpp @@ -59,7 +59,7 @@ void op::v1::Reverse::validate_and_infer_types() if (m_mode == Mode::MASK) { NODE_VALIDATION_CHECK(this, - get_input_element_type(1) == element::boolean, + get_input_element_type(1) == element::Type_t::boolean, "In 'mask' mode the second input must contain boolean values."); } diff --git a/ngraph/core/src/op/rnn_cell.cpp b/ngraph/core/src/op/rnn_cell.cpp index 80dba75a894..482929fd03e 100644 --- a/ngraph/core/src/op/rnn_cell.cpp +++ b/ngraph/core/src/op/rnn_cell.cpp @@ -92,7 +92,7 @@ void op::v0::RNNCell::validate_and_infer_types() } auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; // Get input partial shape for all inputs const auto& x_pshape = get_input_partial_shape(0); diff --git a/ngraph/core/src/op/rnn_sequence.cpp b/ngraph/core/src/op/rnn_sequence.cpp index 5087b631d1e..cfbbb1d7f95 100644 --- a/ngraph/core/src/op/rnn_sequence.cpp +++ b/ngraph/core/src/op/rnn_sequence.cpp @@ -71,7 +71,7 @@ void op::v5::RNNSequence::validate_and_infer_types() auto merged_batch_size = Dimension::dynamic(); auto merged_hidden_size = Dimension::dynamic(); auto merged_num_directions = Dimension::dynamic(); - auto result_et = element::dynamic; + element::Type result_et = element::Type_t::dynamic; auto x_pshape = get_input_partial_shape(0); auto ht_pshape = get_input_partial_shape(1); diff --git a/ngraph/core/src/op/select.cpp b/ngraph/core/src/op/select.cpp index e8f0c0a4407..7352ec5be7b 100644 --- a/ngraph/core/src/op/select.cpp +++ b/ngraph/core/src/op/select.cpp @@ -46,7 +46,7 @@ void op::v1::Select::validate_and_infer_types() // Condition element type check NODE_VALIDATION_CHECK(this, get_input_element_type(0).is_dynamic() || - get_input_element_type(0) == element::boolean, + get_input_element_type(0) == element::Type_t::boolean, "Argument 0 must have boolean element type (element type: ", get_input_element_type(0), ")."); @@ -184,7 +184,7 @@ void op::v0::Select::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, get_input_element_type(0).is_dynamic() || - get_input_element_type(0) == element::boolean, + get_input_element_type(0) == element::Type_t::boolean, "Argument 0 must have boolean element type (element type: ", get_input_element_type(0), ")."); diff --git a/ngraph/core/src/op/shape_of.cpp b/ngraph/core/src/op/shape_of.cpp index 78923352831..84134080bfb 100644 --- a/ngraph/core/src/op/shape_of.cpp +++ b/ngraph/core/src/op/shape_of.cpp @@ -42,7 +42,8 @@ op::v3::ShapeOf::ShapeOf(const Output& arg, element::Type output_type) void op::v3::ShapeOf::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, - m_output_type == element::i64 || m_output_type == element::i32, + m_output_type == element::Type_t::i64 || + m_output_type == element::Type_t::i32, "Output type must be i32 or i64"); set_input_is_relevant_to_value(0, false); set_output_type(0, m_output_type, PartialShape{get_input_partial_shape(0).rank()}); @@ -141,7 +142,7 @@ namespace shape_of auto index = std::make_shared( output_type, Shape{1}, std::vector{i}); auto axis = std::make_shared( - element::i64, Shape{}, std::vector{0}); + element::Type_t::i64, Shape{}, std::vector{0}); auto temp = make_shared(shape_of, index, axis); temp->set_friendly_name("DynDim/" + temp->get_name()); dimensions.push_back(temp); @@ -182,7 +183,7 @@ op::v0::ShapeOf::ShapeOf(const Output& arg) void op::v0::ShapeOf::validate_and_infer_types() { set_input_is_relevant_to_value(0, false); - set_output_type(0, element::i64, PartialShape{get_input_partial_shape(0).rank()}); + set_output_type(0, element::Type_t::i64, PartialShape{get_input_partial_shape(0).rank()}); } bool ngraph::op::v0::ShapeOf::visit_attributes(AttributeVisitor& visitor) diff --git a/ngraph/core/src/op/squeeze.cpp b/ngraph/core/src/op/squeeze.cpp index 7adfe93ce7a..5cf640d2932 100644 --- a/ngraph/core/src/op/squeeze.cpp +++ b/ngraph/core/src/op/squeeze.cpp @@ -126,7 +126,7 @@ OutputVector op::Squeeze::decompose_op() const auto output_data_shape = get_output_shape(0); return {make_shared( data, - op::Constant::create(element::u64, {output_data_shape.size()}, output_data_shape), + op::Constant::create(element::Type_t::u64, {output_data_shape.size()}, output_data_shape), false)}; } diff --git a/ngraph/core/src/op/strided_slice.cpp b/ngraph/core/src/op/strided_slice.cpp index 8dc5ca05b97..b4af01c84c4 100644 --- a/ngraph/core/src/op/strided_slice.cpp +++ b/ngraph/core/src/op/strided_slice.cpp @@ -77,12 +77,13 @@ namespace { NGRAPH_CHECK(begin_pshape.rank().is_static() && begin_pshape.rank().get_length() == 1, "Begin input must be 1D"); - return std::make_shared(op::Constant::create(element::i64, {}, {1}), - std::make_shared(begin)); + return std::make_shared( + op::Constant::create(element::Type_t::i64, {}, {1}), + std::make_shared(begin)); } return op::Constant::create( - element::i64, Shape{strides_length}, vector(strides_length, 1)); + element::Type_t::i64, Shape{strides_length}, vector(strides_length, 1)); } } diff --git a/ngraph/core/src/op/topk.cpp b/ngraph/core/src/op/topk.cpp index 9a47674e57d..e6b3bab5977 100644 --- a/ngraph/core/src/op/topk.cpp +++ b/ngraph/core/src/op/topk.cpp @@ -320,8 +320,9 @@ size_t op::v1::TopK::read_k_from_constant_node(const shared_ptr& node, const element::Type& k_element_type) const { NODE_VALIDATION_CHECK(this, - k_element_type == element::i8 || k_element_type == element::i32 || - k_element_type == element::i64, + k_element_type == element::Type_t::i8 || + k_element_type == element::Type_t::i32 || + k_element_type == element::Type_t::i64, "K input element type must be i8, i32 or i64 (got ", k_element_type, ")."); @@ -400,7 +401,7 @@ size_t op::v1::TopK::get_k() const void op::v1::TopK::set_k(size_t k) { this->input(1).replace_source_output( - op::Constant::create(element::i64, Shape{}, {k})->output(0)); + op::Constant::create(element::Type_t::i64, Shape{}, {k})->output(0)); } bool op::v1::TopK::evaluate(const HostTensorVector& outputs, const HostTensorVector& inputs) const diff --git a/ngraph/core/src/op/util/arithmetic_reduction.cpp b/ngraph/core/src/op/util/arithmetic_reduction.cpp index 09b17f95297..dac51ff7724 100644 --- a/ngraph/core/src/op/util/arithmetic_reduction.cpp +++ b/ngraph/core/src/op/util/arithmetic_reduction.cpp @@ -29,7 +29,7 @@ op::util::ArithmeticReduction::ArithmeticReduction(const Output& arg, const AxisSet& reduction_axes) : Op({arg, op::Constant::create( - element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) + element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) ->output(0)}) { add_provenance_group_member(input_value(1).get_node_shared_ptr()); @@ -62,9 +62,10 @@ const AxisSet op::util::ArithmeticReduction::get_reduction_axes() const void op::util::ArithmeticReduction::set_reduction_axes(const AxisSet& reduction_axes) { - this->input(1).replace_source_output( - op::Constant::create(element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) - ->output(0)); + this->input(1).replace_source_output(op::Constant::create(element::Type_t::i64, + Shape{reduction_axes.size()}, + reduction_axes.to_vector()) + ->output(0)); } void op::util::ArithmeticReduction::validate_and_infer_types() diff --git a/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp b/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp index 7f9b4afbeec..18af758956f 100644 --- a/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp +++ b/ngraph/core/src/op/util/binary_elementwise_arithmetic.cpp @@ -44,7 +44,7 @@ void op::util::BinaryElementwiseArithmetic::validate_and_infer_elementwise_arith PartialShape& args_pshape = std::get<1>(args_et_pshape); NODE_VALIDATION_CHECK(this, - args_et.is_dynamic() || args_et != element::boolean, + args_et.is_dynamic() || args_et != element::Type_t::boolean, "Arguments cannot have boolean element type (argument element type: ", args_et, ")."); diff --git a/ngraph/core/src/op/util/binary_elementwise_comparison.cpp b/ngraph/core/src/op/util/binary_elementwise_comparison.cpp index f8f35d99721..74c4e239dfb 100644 --- a/ngraph/core/src/op/util/binary_elementwise_comparison.cpp +++ b/ngraph/core/src/op/util/binary_elementwise_comparison.cpp @@ -39,7 +39,7 @@ void op::util::BinaryElementwiseComparison::validate_and_infer_types() auto args_et_pshape = op::util::validate_and_infer_elementwise_args(this, m_autob); PartialShape& args_pshape = std::get<1>(args_et_pshape); - set_output_type(0, element::boolean, args_pshape); + set_output_type(0, element::Type_t::boolean, args_pshape); } bool op::util::BinaryElementwiseComparison::visit_attributes(AttributeVisitor& visitor) diff --git a/ngraph/core/src/op/util/binary_elementwise_logical.cpp b/ngraph/core/src/op/util/binary_elementwise_logical.cpp index 6c7dc0bf51f..666b8c1daa8 100644 --- a/ngraph/core/src/op/util/binary_elementwise_logical.cpp +++ b/ngraph/core/src/op/util/binary_elementwise_logical.cpp @@ -44,12 +44,12 @@ void op::util::BinaryElementwiseLogical::validate_and_infer_elementwise_logical( NODE_VALIDATION_CHECK( this, - args_et.is_dynamic() || args_et == element::boolean, + args_et.is_dynamic() || args_et == element::Type_t::boolean, "Operands for logical operators must have boolean element type but have element type ", args_et, "."); - set_output_type(0, element::boolean, args_pshape); + set_output_type(0, element::Type_t::boolean, args_pshape); } void op::util::BinaryElementwiseLogical::validate_and_infer_types() diff --git a/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp b/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp index 3fa1b09ba78..8834496a2cb 100644 --- a/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp +++ b/ngraph/core/src/op/util/embeddingbag_offsets_base.cpp @@ -52,13 +52,13 @@ op::util::EmbeddingBagOffsetsBase::EmbeddingBagOffsetsBase(const Output& e void op::util::EmbeddingBagOffsetsBase::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, - get_input_element_type(OFFSETS) == element::i64 || - get_input_element_type(OFFSETS) == element::i32, + get_input_element_type(OFFSETS) == element::Type_t::i64 || + get_input_element_type(OFFSETS) == element::Type_t::i32, "OFFSETS type must be i32 or i64"); NODE_VALIDATION_CHECK(this, - get_input_element_type(INDICES) == element::i64 || - get_input_element_type(INDICES) == element::i32, + get_input_element_type(INDICES) == element::Type_t::i64 || + get_input_element_type(INDICES) == element::Type_t::i32, "INDICES type must be i32 or i64"); NODE_VALIDATION_CHECK( @@ -83,8 +83,8 @@ void op::util::EmbeddingBagOffsetsBase::validate_and_infer_types() if (get_input_size() >= 4) { NODE_VALIDATION_CHECK(this, - get_input_element_type(DEFAULT_INDEX) == element::i64 || - get_input_element_type(DEFAULT_INDEX) == element::i32, + get_input_element_type(DEFAULT_INDEX) == element::Type_t::i64 || + get_input_element_type(DEFAULT_INDEX) == element::Type_t::i32, "DEFAULT_INDEX type must be i32 or i64"); NODE_VALIDATION_CHECK( diff --git a/ngraph/core/src/op/util/embeddingbag_packed_base.cpp b/ngraph/core/src/op/util/embeddingbag_packed_base.cpp index 7b9afd0f7ad..48d7e5d1963 100644 --- a/ngraph/core/src/op/util/embeddingbag_packed_base.cpp +++ b/ngraph/core/src/op/util/embeddingbag_packed_base.cpp @@ -40,8 +40,8 @@ op::util::EmbeddingBagPackedBase::EmbeddingBagPackedBase(const Output& emb void op::util::EmbeddingBagPackedBase::validate_and_infer_types() { NODE_VALIDATION_CHECK(this, - get_input_element_type(INDICES) == element::i64 || - get_input_element_type(INDICES) == element::i32, + get_input_element_type(INDICES) == element::Type_t::i64 || + get_input_element_type(INDICES) == element::Type_t::i32, "INDICES type must be i32 or i64"); NODE_VALIDATION_CHECK(this, diff --git a/ngraph/core/src/op/util/index_reduction.cpp b/ngraph/core/src/op/util/index_reduction.cpp index f4fd0ab5dc1..f0e11361c7c 100644 --- a/ngraph/core/src/op/util/index_reduction.cpp +++ b/ngraph/core/src/op/util/index_reduction.cpp @@ -68,8 +68,8 @@ void op::util::IndexReduction::validate_and_infer_types() rank, ")."); NODE_VALIDATION_CHECK(this, - m_index_element_type == element::i32 || - m_index_element_type == element::i64, + m_index_element_type == element::Type_t::i32 || + m_index_element_type == element::Type_t::i64, "Index element is neither i64 or i32."); PartialShape output_shape{PartialShape::dynamic()}; diff --git a/ngraph/core/src/op/util/logical_reduction.cpp b/ngraph/core/src/op/util/logical_reduction.cpp index dbb12c3e025..c53f68be53b 100644 --- a/ngraph/core/src/op/util/logical_reduction.cpp +++ b/ngraph/core/src/op/util/logical_reduction.cpp @@ -28,7 +28,7 @@ op::util::LogicalReduction::LogicalReduction() op::util::LogicalReduction::LogicalReduction(const Output& arg, const AxisSet& reduction_axes) : Op({arg, op::Constant::create( - element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) + element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) ->output(0)}) { add_provenance_group_member(input_value(1).get_node_shared_ptr()); @@ -57,9 +57,10 @@ const AxisSet op::util::LogicalReduction::get_reduction_axes() const void op::util::LogicalReduction::set_reduction_axes(const AxisSet& reduction_axes) { - this->input(1).replace_source_output( - op::Constant::create(element::i64, Shape{reduction_axes.size()}, reduction_axes.to_vector()) - ->output(0)); + this->input(1).replace_source_output(op::Constant::create(element::Type_t::i64, + Shape{reduction_axes.size()}, + reduction_axes.to_vector()) + ->output(0)); } void op::util::LogicalReduction::validate_and_infer_types() @@ -111,8 +112,8 @@ void op::util::LogicalReduction::validate_and_infer_types() set_input_is_relevant_to_shape(1); NODE_VALIDATION_CHECK(this, - get_input_element_type(0).compatible(element::boolean), + get_input_element_type(0).compatible(element::Type_t::boolean), "Input element type must be boolean."); - set_output_type(0, element::boolean, result_shape); + set_output_type(0, element::Type_t::boolean, result_shape); } diff --git a/ngraph/core/src/op/util/rnn_cell_base.cpp b/ngraph/core/src/op/util/rnn_cell_base.cpp index 9a9c56e018d..12ae26565aa 100644 --- a/ngraph/core/src/op/util/rnn_cell_base.cpp +++ b/ngraph/core/src/op/util/rnn_cell_base.cpp @@ -46,7 +46,7 @@ std::shared_ptr ngraph::op::util::convert_lstm_node_format(const Output(element::i64, Shape{}, axis); + auto axis_const = std::make_shared(element::Type_t::i64, Shape{}, axis); OutputVector splitted_node = std::make_shared(node, axis_const, num_gates)->outputs(); OutputVector nodes_in_new_format(num_gates); diff --git a/ngraph/core/src/op/util/scatter_nd_base.cpp b/ngraph/core/src/op/util/scatter_nd_base.cpp index 2bb6b9cb8af..7a95b0a35fa 100644 --- a/ngraph/core/src/op/util/scatter_nd_base.cpp +++ b/ngraph/core/src/op/util/scatter_nd_base.cpp @@ -50,7 +50,7 @@ void op::util::ScatterNDBase::validate_and_infer_types() const PartialShape& updates_shape = get_input_partial_shape(UPDATES); NODE_VALIDATION_CHECK(this, - indices_et == element::i32 || indices_et == element::i64, + indices_et == element::Type_t::i32 || indices_et == element::Type_t::i64, "Indices element type must be i64 or i32"); NODE_VALIDATION_CHECK( diff --git a/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp b/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp index 6ececc9b273..1c79c1e7657 100644 --- a/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp +++ b/ngraph/core/src/op/util/unary_elementwise_arithmetic.cpp @@ -36,7 +36,7 @@ void op::util::UnaryElementwiseArithmetic::validate_and_infer_elementwise_arithm PartialShape& args_pshape = std::get<1>(args_et_pshape); NODE_VALIDATION_CHECK(this, - args_et.is_dynamic() || args_et != element::boolean, + args_et.is_dynamic() || args_et != element::Type_t::boolean, "Arguments cannot have boolean element type (argument element type: ", args_et, ")."); diff --git a/ngraph/core/src/pass/convert_fp32_to_fp16.cpp b/ngraph/core/src/pass/convert_fp32_to_fp16.cpp index 60d87ed5c1d..8a908bb3cb3 100644 --- a/ngraph/core/src/pass/convert_fp32_to_fp16.cpp +++ b/ngraph/core/src/pass/convert_fp32_to_fp16.cpp @@ -25,8 +25,8 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::ConvertFP32ToFP16, "ConvertFP32ToFP16", 0); void pass::ConvertFP32ToFP16::convert_constants_precision() { - auto constant = - std::make_shared(element::f32, Shape{1}, std::vector{0}); + auto constant = std::make_shared( + element::Type_t::f32, Shape{1}, std::vector{0}); ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) { auto constant = std::dynamic_pointer_cast(m.get_match_root()); @@ -35,7 +35,7 @@ void pass::ConvertFP32ToFP16::convert_constants_precision() return false; } - if (constant->get_element_type() == element::f32) + if (constant->get_element_type() == element::Type_t::f32) { auto data = constant->get_vector(); std::vector new_data(data.size()); @@ -44,7 +44,7 @@ void pass::ConvertFP32ToFP16::convert_constants_precision() new_data[i] = ngraph::float16(data[i]); } auto new_const = std::make_shared( - element::f16, constant->get_shape(), new_data); + element::Type_t::f16, constant->get_shape(), new_data); new_const->set_friendly_name(constant->get_friendly_name()); ngraph::replace_node(constant, new_const); return true; @@ -60,13 +60,13 @@ void pass::ConvertFP32ToFP16::convert_constants_precision() void pass::ConvertFP32ToFP16::convert_parameters_precision() { - auto constant = std::make_shared(element::f32, Shape{1}); + auto constant = std::make_shared(element::Type_t::f32, Shape{1}); ngraph::graph_rewrite_callback callback = [](pattern::Matcher& m) { auto parameter = std::dynamic_pointer_cast(m.get_match_root()); - if (parameter && parameter->get_element_type() == element::f32) + if (parameter && parameter->get_element_type() == element::Type_t::f32) { - parameter->set_element_type(element::f16); + parameter->set_element_type(element::Type_t::f16); return true; } return false; diff --git a/ngraph/core/src/pattern/op/label.cpp b/ngraph/core/src/pattern/op/label.cpp index 52d807afa74..129e9d5c57a 100644 --- a/ngraph/core/src/pattern/op/label.cpp +++ b/ngraph/core/src/pattern/op/label.cpp @@ -68,5 +68,6 @@ std::shared_ptr pattern::any_input() std::shared_ptr pattern::any_input(const pattern::op::ValuePredicate& pred) { - return std::make_shared(element::dynamic, PartialShape::dynamic(), pred); + return std::make_shared( + element::Type_t::dynamic, PartialShape::dynamic(), pred); } diff --git a/ngraph/core/src/runtime/host_tensor.cpp b/ngraph/core/src/runtime/host_tensor.cpp index 5a8c7fe8505..2af9bcd4b39 100644 --- a/ngraph/core/src/runtime/host_tensor.cpp +++ b/ngraph/core/src/runtime/host_tensor.cpp @@ -62,7 +62,7 @@ runtime::HostTensor::HostTensor(const element::Type& element_type, } runtime::HostTensor::HostTensor(const std::string& name) - : HostTensor(element::dynamic, PartialShape::dynamic()) + : HostTensor(element::Type_t::dynamic, PartialShape::dynamic()) { } diff --git a/ngraph/core/src/type/element_type.cpp b/ngraph/core/src/type/element_type.cpp index 828c3b7c787..81a3d01345c 100644 --- a/ngraph/core/src/type/element_type.cpp +++ b/ngraph/core/src/type/element_type.cpp @@ -26,6 +26,7 @@ using namespace ngraph; using namespace std; +NGRAPH_SUPPRESS_DEPRECATED_START const element::Type element::undefined(element::Type_t::undefined); const element::Type element::dynamic(element::Type_t::dynamic); const element::Type element::boolean(element::Type_t::boolean); @@ -42,6 +43,7 @@ const element::Type element::u8(element::Type_t::u8); const element::Type element::u16(element::Type_t::u16); const element::Type element::u32(element::Type_t::u32); const element::Type element::u64(element::Type_t::u64); +NGRAPH_SUPPRESS_DEPRECATED_END constexpr DiscreteTypeInfo AttributeAdapter::type_info; @@ -102,26 +104,6 @@ static const element_types_map_t& get_type_info_map() return s_type_info_map; }; -std::vector element::Type::get_known_types() -{ - std::vector rc = {&element::dynamic, - &element::boolean, - &element::bf16, - &element::f16, - &element::f32, - &element::f64, - &element::i8, - &element::i16, - &element::i32, - &element::i64, - &element::u1, - &element::u8, - &element::u16, - &element::u32, - &element::u64}; - return rc; -} - element::Type::Type(size_t bitwidth, bool is_real, bool is_signed, @@ -145,6 +127,11 @@ const std::string& element::Type::c_type_string() const return get_type_info_map().at(m_type).m_cname; } +bool element::Type::operator==(const element::Type_t& other) const +{ + return m_type == other; +} + bool element::Type::operator==(const element::Type& other) const { return m_type == other.m_type; @@ -292,7 +279,7 @@ bool element::Type::is_real() const bool element::Type::is_integral_number() const { - return is_integral() && (m_type != element::boolean); + return is_integral() && (m_type != element::Type_t::boolean); } bool element::Type::is_signed() const diff --git a/ngraph/core/src/util.cpp b/ngraph/core/src/util.cpp index 5cedea190ac..6ab6f7aef64 100644 --- a/ngraph/core/src/util.cpp +++ b/ngraph/core/src/util.cpp @@ -481,7 +481,7 @@ vector read_float_vector(shared_ptr tv) vector float_vec; element::Type element_type = tv->get_element_type(); - if (element_type == element::boolean) + if (element_type == element::Type_t::boolean) { vector vec = read_vector(tv); // Changed from vector ctor to explicit for loop to add static_cast @@ -491,12 +491,12 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::bf16) + else if (element_type == element::Type_t::bf16) { vector vec = read_vector(tv); float_vec = bfloat16::to_float_vector(vec); } - else if (element_type == element::f16) + else if (element_type == element::Type_t::f16) { vector vec = read_vector(tv); for (float16 value : vec) @@ -504,7 +504,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::f32) + else if (element_type == element::Type_t::f32) { vector vec = read_vector(tv); for (float value : vec) @@ -512,7 +512,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::f64) + else if (element_type == element::Type_t::f64) { vector vec = read_vector(tv); for (double value : vec) @@ -520,7 +520,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::i8) + else if (element_type == element::Type_t::i8) { vector vec = read_vector(tv); for (int8_t value : vec) @@ -528,7 +528,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::i16) + else if (element_type == element::Type_t::i16) { vector vec = read_vector(tv); for (int16_t value : vec) @@ -536,7 +536,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::i32) + else if (element_type == element::Type_t::i32) { vector vec = read_vector(tv); for (int32_t value : vec) @@ -544,7 +544,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::i64) + else if (element_type == element::Type_t::i64) { vector vec = read_vector(tv); for (int64_t value : vec) @@ -552,7 +552,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::u8) + else if (element_type == element::Type_t::u8) { vector vec = read_vector(tv); for (uint8_t value : vec) @@ -560,7 +560,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::u16) + else if (element_type == element::Type_t::u16) { vector vec = read_vector(tv); for (uint16_t value : vec) @@ -568,7 +568,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::u32) + else if (element_type == element::Type_t::u32) { vector vec = read_vector(tv); for (uint32_t value : vec) @@ -576,7 +576,7 @@ vector read_float_vector(shared_ptr tv) float_vec.push_back(static_cast(value)); } } - else if (element_type == element::u64) + else if (element_type == element::Type_t::u64) { vector vec = read_vector(tv); for (uint64_t value : vec) @@ -597,7 +597,7 @@ vector read_index_vector(shared_ptr tv) vector index_vec; element::Type element_type = tv->get_element_type(); - if (element_type == element::boolean) + if (element_type == element::Type_t::boolean) { vector vec = read_vector(tv); // Changed from vector ctor to explicit for loop to add static_cast @@ -607,7 +607,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::bf16) + else if (element_type == element::Type_t::bf16) { vector vec = read_vector(tv); vector float_vec = bfloat16::to_float_vector(vec); @@ -616,7 +616,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::f16) + else if (element_type == element::Type_t::f16) { vector vec = read_vector(tv); for (float16 value : vec) @@ -624,7 +624,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(static_cast(value))); } } - else if (element_type == element::f32) + else if (element_type == element::Type_t::f32) { vector vec = read_vector(tv); for (float value : vec) @@ -632,7 +632,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::f64) + else if (element_type == element::Type_t::f64) { vector vec = read_vector(tv); for (double value : vec) @@ -640,7 +640,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::i8) + else if (element_type == element::Type_t::i8) { vector vec = read_vector(tv); for (int8_t value : vec) @@ -648,7 +648,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::i16) + else if (element_type == element::Type_t::i16) { vector vec = read_vector(tv); for (int16_t value : vec) @@ -656,7 +656,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::i32) + else if (element_type == element::Type_t::i32) { vector vec = read_vector(tv); for (int32_t value : vec) @@ -664,11 +664,11 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::i64) + else if (element_type == element::Type_t::i64) { index_vec = read_vector(tv); } - else if (element_type == element::u8) + else if (element_type == element::Type_t::u8) { vector vec = read_vector(tv); for (uint8_t value : vec) @@ -676,7 +676,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::u16) + else if (element_type == element::Type_t::u16) { vector vec = read_vector(tv); for (uint16_t value : vec) @@ -684,7 +684,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::u32) + else if (element_type == element::Type_t::u32) { vector vec = read_vector(tv); for (uint32_t value : vec) @@ -692,7 +692,7 @@ vector read_index_vector(shared_ptr tv) index_vec.push_back(static_cast(value)); } } - else if (element_type == element::u64) + else if (element_type == element::Type_t::u64) { vector vec = read_vector(tv); for (uint64_t value : vec) diff --git a/ngraph/frontend/onnx_import/include/onnx_import/core/tensor.hpp b/ngraph/frontend/onnx_import/include/onnx_import/core/tensor.hpp index 67890b719b5..d8415d54319 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/core/tensor.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/core/tensor.hpp @@ -531,7 +531,7 @@ namespace ngraph return static_cast(m_tensor_proto->data_type()); } - const element::Type& get_ng_type() const + element::Type get_ng_type() const { if (!m_tensor_proto->has_data_type()) { @@ -540,29 +540,29 @@ namespace ngraph switch (m_tensor_proto->data_type()) { case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BOOL: - return element::boolean; + return element::Type_t::boolean; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT: - return element::f32; + return element::Type_t::f32; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16: - return element::f16; + return element::Type_t::f16; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_DOUBLE: - return element::f64; + return element::Type_t::f64; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8: - return element::i8; + return element::Type_t::i8; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT16: - return element::i16; + return element::Type_t::i16; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32: - return element::i32; + return element::Type_t::i32; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64: - return element::i64; + return element::Type_t::i64; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8: - return element::u8; + return element::Type_t::u8; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT16: - return element::u16; + return element::Type_t::u16; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT32: - return element::u32; + return element::Type_t::u32; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT64: - return element::u64; + return element::Type_t::u64; case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UNDEFINED: throw error::tensor::data_type_undefined{}; default: throw error::tensor::unsupported_data_type{m_tensor_proto->data_type()}; @@ -575,29 +575,29 @@ namespace ngraph switch (m_tensor_proto->data_type()) { case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_BOOL: - return make_ng_constant(element::boolean); + return make_ng_constant(element::Type_t::boolean); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT: - return make_ng_constant(element::f32); + return make_ng_constant(element::Type_t::f32); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_FLOAT16: - return make_ng_constant(element::f16); + return make_ng_constant(element::Type_t::f16); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_DOUBLE: - return make_ng_constant(element::f64); + return make_ng_constant(element::Type_t::f64); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT8: - return make_ng_constant(element::i8); + return make_ng_constant(element::Type_t::i8); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT16: - return make_ng_constant(element::i16); + return make_ng_constant(element::Type_t::i16); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT32: - return make_ng_constant(element::i32); + return make_ng_constant(element::Type_t::i32); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_INT64: - return make_ng_constant(element::i64); + return make_ng_constant(element::Type_t::i64); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT8: - return make_ng_constant(element::u8); + return make_ng_constant(element::Type_t::u8); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT16: - return make_ng_constant(element::u16); + return make_ng_constant(element::Type_t::u16); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT32: - return make_ng_constant(element::u32); + return make_ng_constant(element::Type_t::u32); case ONNX_NAMESPACE::TensorProto_DataType::TensorProto_DataType_UINT64: - return make_ng_constant(element::u64); + return make_ng_constant(element::Type_t::u64); default: throw error::tensor::unsupported_data_type{m_tensor_proto->data_type()}; } } diff --git a/ngraph/frontend/onnx_import/include/onnx_import/core/value_info.hpp b/ngraph/frontend/onnx_import/include/onnx_import/core/value_info.hpp index 1d98c16d364..c45287d988b 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/core/value_info.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/core/value_info.hpp @@ -75,7 +75,7 @@ namespace ngraph const std::string& get_name() const { return m_value_info_proto->name(); } const PartialShape& get_shape() const { return m_partial_shape; } - const element::Type& get_element_type() const + element::Type get_element_type() const { if (!m_value_info_proto->type().tensor_type().has_elem_type()) { diff --git a/ngraph/frontend/onnx_import/include/onnx_import/op/gather.hpp b/ngraph/frontend/onnx_import/include/onnx_import/op/gather.hpp index 762d3b6c916..6556db9864c 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/op/gather.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/op/gather.hpp @@ -43,7 +43,8 @@ namespace ngraph return {std::make_shared( data, indices, - default_opset::Constant::create(element::i64, Shape{}, {valid_axis}))}; + default_opset::Constant::create( + element::Type_t::i64, Shape{}, {valid_axis}))}; } } // namespace set_1 diff --git a/ngraph/frontend/onnx_import/include/onnx_import/op/identity.hpp b/ngraph/frontend/onnx_import/include/onnx_import/op/identity.hpp index 079148225dc..3a4ab0174a0 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/op/identity.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/op/identity.hpp @@ -33,10 +33,10 @@ namespace ngraph inline OutputVector identity(const Node& node) { auto input = node.get_ng_inputs().at(0); - if (input.get_element_type() == ngraph::element::boolean) + if (input.get_element_type() == ngraph::element::Type_t::boolean) { - const auto logic_zero = - default_opset::Constant::create(ngraph::element::boolean, {}, {false}); + const auto logic_zero = default_opset::Constant::create( + ngraph::element::Type_t::boolean, {}, {false}); return {std::make_shared(input, logic_zero)}; } const auto zero = diff --git a/ngraph/frontend/onnx_import/include/onnx_import/utils/common.hpp b/ngraph/frontend/onnx_import/include/onnx_import/utils/common.hpp index a0157a2c0f9..45ef95a7328 100644 --- a/ngraph/frontend/onnx_import/include/onnx_import/utils/common.hpp +++ b/ngraph/frontend/onnx_import/include/onnx_import/utils/common.hpp @@ -37,7 +37,7 @@ namespace ngraph { namespace common { - const ngraph::element::Type& get_ngraph_element_type(std::int64_t onnx_type); + const ngraph::element::Type get_ngraph_element_type(std::int64_t onnx_type); /// \brief Return a monotonic sequence. /// diff --git a/ngraph/frontend/onnx_import/src/op/constant.cpp b/ngraph/frontend/onnx_import/src/op/constant.cpp index 3a1718f6154..2a33e52232a 100644 --- a/ngraph/frontend/onnx_import/src/op/constant.cpp +++ b/ngraph/frontend/onnx_import/src/op/constant.cpp @@ -62,84 +62,84 @@ namespace ngraph inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::f16, tensor); + return __make_ng_constant(element::Type_t::f16, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::f32, tensor); + return __make_ng_constant(element::Type_t::f32, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::f64, tensor); + return __make_ng_constant(element::Type_t::f64, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::i8, tensor); + return __make_ng_constant(element::Type_t::i8, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::i16, tensor); + return __make_ng_constant(element::Type_t::i16, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::i32, tensor); + return __make_ng_constant(element::Type_t::i32, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::i64, tensor); + return __make_ng_constant(element::Type_t::i64, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::u8, tensor); + return __make_ng_constant(element::Type_t::u8, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::u16, tensor); + return __make_ng_constant(element::Type_t::u16, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::u32, tensor); + return __make_ng_constant(element::Type_t::u32, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::u64, tensor); + return __make_ng_constant(element::Type_t::u64, tensor); } template <> inline std::shared_ptr make_ng_constant(const Tensor& tensor) { - return __make_ng_constant(element::boolean, tensor); + return __make_ng_constant(element::Type_t::boolean, tensor); } inline std::shared_ptr diff --git a/ngraph/frontend/onnx_import/src/op/constant_of_shape.cpp b/ngraph/frontend/onnx_import/src/op/constant_of_shape.cpp index cf6b91f1097..8b33b027fd9 100644 --- a/ngraph/frontend/onnx_import/src/op/constant_of_shape.cpp +++ b/ngraph/frontend/onnx_import/src/op/constant_of_shape.cpp @@ -39,7 +39,8 @@ namespace ngraph } else { - constant_value = default_opset::Constant::create(element::f32, {}, {0}); + constant_value = + default_opset::Constant::create(element::Type_t::f32, {}, {0}); } return {std::make_shared(constant_value, node.get_ng_inputs().at(0))}; diff --git a/ngraph/frontend/onnx_import/src/op/conv_integer.cpp b/ngraph/frontend/onnx_import/src/op/conv_integer.cpp index 76d55a15618..e6b52ea5acb 100644 --- a/ngraph/frontend/onnx_import/src/op/conv_integer.cpp +++ b/ngraph/frontend/onnx_import/src/op/conv_integer.cpp @@ -63,10 +63,11 @@ namespace ngraph padding_above); const Strides default_data_dilation_strides(input.get_shape().size() - 2, 1); - auto scale_one = make_constant(ngraph::element::f32, Shape{}, 1); + auto scale_one = make_constant(ngraph::element::Type_t::f32, Shape{}, 1); auto input_zero_point = make_constant(input.get_element_type(), Shape{}, 0); auto filters_zero_point = make_constant(filters.get_element_type(), Shape{}, 0); - auto output_zero_point = make_constant(ngraph::element::i32, Shape{}, 0); + auto output_zero_point = + make_constant(ngraph::element::Type_t::i32, Shape{}, 0); if (num_inputs == 2) { @@ -84,7 +85,7 @@ namespace ngraph filters_zero_point, scale_one, output_zero_point, - ngraph::element::i32, + ngraph::element::Type_t::i32, ngraph::AxisSet{}, ngraph::AxisSet{}, ngraph::AxisSet{})}; @@ -110,7 +111,7 @@ namespace ngraph filters_zero_point, scale_one, output_zero_point, - ngraph::element::i32, + ngraph::element::Type_t::i32, ngraph::AxisSet{}, ngraph::AxisSet{}, ngraph::AxisSet{})}; diff --git a/ngraph/frontend/onnx_import/src/op/conv_transpose.cpp b/ngraph/frontend/onnx_import/src/op/conv_transpose.cpp index 8b7b2ea7516..3bc7974a1a6 100644 --- a/ngraph/frontend/onnx_import/src/op/conv_transpose.cpp +++ b/ngraph/frontend/onnx_import/src/op/conv_transpose.cpp @@ -74,7 +74,7 @@ namespace ngraph data, filters, default_opset::Constant::create( - element::i64, Shape{output_shape.size()}, output_shape), + element::Type_t::i64, Shape{output_shape.size()}, output_shape), strides, dilations, auto_pad_type, @@ -113,7 +113,7 @@ namespace ngraph data, filters, default_opset::Constant::create( - element::i64, Shape{output_shape.size()}, output_shape), + element::Type_t::i64, Shape{output_shape.size()}, output_shape), strides, pads_begin, pads_end, @@ -144,10 +144,10 @@ namespace ngraph std::make_shared(filters); const auto filters_rank = std::make_shared(filters_shape); - const auto one_node = - default_opset::Constant::create(element::i64, Shape{1}, {1}); - const auto zero_node = - default_opset::Constant::create(element::i64, Shape{1}, {0}); + const auto one_node = default_opset::Constant::create( + element::Type_t::i64, Shape{1}, {1}); + const auto zero_node = default_opset::Constant::create( + element::Type_t::i64, Shape{1}, {0}); std::shared_ptr in_c_dim = std::make_shared( @@ -166,8 +166,8 @@ namespace ngraph std::vector{0}); // end mask // Apply shape layout transformation: - const auto groups_node = - default_opset::Constant::create(element::i64, Shape{1}, {groups}); + const auto groups_node = default_opset::Constant::create( + element::Type_t::i64, Shape{1}, {groups}); in_c_dim = std::make_shared(in_c_dim, groups_node); @@ -192,7 +192,7 @@ namespace ngraph new_bias_shape[1] = conv_pshape[1].get_length(); bias_shape_node = default_opset::Constant::create( - element::i64, Shape{new_bias_shape.size()}, new_bias_shape); + element::Type_t::i64, Shape{new_bias_shape.size()}, new_bias_shape); } else { @@ -201,10 +201,10 @@ namespace ngraph std::make_shared(conv_shape); // Prepare new bias shape base: [1, 1, 1, 1, ... ] - const auto one_node = - default_opset::Constant::create(element::i64, Shape{1}, {1}); - const auto two_node = - default_opset::Constant::create(element::i64, Shape{1}, {2}); + const auto one_node = default_opset::Constant::create( + element::Type_t::i64, Shape{1}, {1}); + const auto two_node = default_opset::Constant::create( + element::Type_t::i64, Shape{1}, {2}); const auto remaining_shape_length = std::make_shared(conv_rank, two_node); const auto remaining_bias_shape_ones = diff --git a/ngraph/frontend/onnx_import/src/op/cum_sum.cpp b/ngraph/frontend/onnx_import/src/op/cum_sum.cpp index 3397f666b96..06337928d74 100644 --- a/ngraph/frontend/onnx_import/src/op/cum_sum.cpp +++ b/ngraph/frontend/onnx_import/src/op/cum_sum.cpp @@ -41,8 +41,8 @@ namespace ngraph } else { - axis = - default_opset::Constant::create(element::i64, Shape{}, {0}); // default + axis = default_opset::Constant::create( + element::Type_t::i64, Shape{}, {0}); // default } return OutputVector{ std::make_shared(data, axis, exclusive, reverse)}; diff --git a/ngraph/frontend/onnx_import/src/op/dequantize_linear.cpp b/ngraph/frontend/onnx_import/src/op/dequantize_linear.cpp index cbe0c49e529..9ea2340ba03 100644 --- a/ngraph/frontend/onnx_import/src/op/dequantize_linear.cpp +++ b/ngraph/frontend/onnx_import/src/op/dequantize_linear.cpp @@ -41,17 +41,17 @@ namespace ngraph { auto zero_point = inputs[2]; - if (zero_point.get_element_type() != element::f32) + if (zero_point.get_element_type() != element::Type_t::f32) { - zero_point = - std::make_shared(zero_point, element::f32); + zero_point = std::make_shared( + zero_point, element::Type_t::f32); } return zero_point; } else { - return default_opset::Constant::create(element::f32, Shape{}, {0}); + return default_opset::Constant::create(element::Type_t::f32, Shape{}, {0}); } } } @@ -70,12 +70,13 @@ namespace ngraph const auto scale = inputs[1]; const auto zero_point = get_zero_point(inputs); - common::validate_scalar_input( - "Dequantization scale", scale.get_node_shared_ptr(), {element::f32}); + common::validate_scalar_input("Dequantization scale", + scale.get_node_shared_ptr(), + {element::Type_t::f32}); common::validate_scalar_input("Zero point", zero_point.get_node_shared_ptr()); const auto converted_x = - std::make_shared(x, element::f32); + std::make_shared(x, element::Type_t::f32); return {std::make_shared( std::make_shared(converted_x, zero_point), scale)}; @@ -163,7 +164,7 @@ namespace ngraph } const auto target_shape = default_opset::Constant::create( - element::i64, Shape{target_dims.size()}, target_dims); + element::Type_t::i64, Shape{target_dims.size()}, target_dims); return std::make_shared(input, target_shape, true); } @@ -198,7 +199,7 @@ namespace ngraph zero_point = reshape_input(zero_point, axis, x_shape); const auto converted_x = - std::make_shared(x, element::f32); + std::make_shared(x, element::Type_t::f32); return {std::make_shared( std::make_shared(converted_x, zero_point), scale)}; diff --git a/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp b/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp index 30b6d4b317f..8b68552de15 100644 --- a/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp +++ b/ngraph/frontend/onnx_import/src/op/global_average_pool.cpp @@ -57,7 +57,7 @@ namespace ngraph auto reduce_axes_vector = std::vector(data_spatial_rank); std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2); auto reduce_axes = default_opset::Constant::create( - element::i64, Shape{data_spatial_rank}, reduce_axes_vector); + element::Type_t::i64, Shape{data_spatial_rank}, reduce_axes_vector); return {std::make_shared(data, reduce_axes, true)}; } diff --git a/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp b/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp index 53af9d60114..9b92ee22eb9 100644 --- a/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp +++ b/ngraph/frontend/onnx_import/src/op/global_max_pool.cpp @@ -57,7 +57,7 @@ namespace ngraph auto reduce_axes_vector = std::vector(data_spatial_rank); std::iota(reduce_axes_vector.begin(), reduce_axes_vector.end(), 2); auto reduce_axes = default_opset::Constant::create( - element::i64, Shape{data_spatial_rank}, reduce_axes_vector); + element::Type_t::i64, Shape{data_spatial_rank}, reduce_axes_vector); return {std::make_shared(data, reduce_axes, true)}; } diff --git a/ngraph/frontend/onnx_import/src/op/hardmax.cpp b/ngraph/frontend/onnx_import/src/op/hardmax.cpp index 0f4ea157b58..9baf0dcfe76 100644 --- a/ngraph/frontend/onnx_import/src/op/hardmax.cpp +++ b/ngraph/frontend/onnx_import/src/op/hardmax.cpp @@ -50,22 +50,22 @@ namespace ngraph std::make_shared(coerced_tensor); Output row_size = std::make_shared( coerced_tensor_shape, - default_opset::Constant::create(element::i64, {1}, {1}), - default_opset::Constant::create(element::i64, {}, {0})); + default_opset::Constant::create(element::Type_t::i64, {1}, {1}), + default_opset::Constant::create(element::Type_t::i64, {}, {0})); row_size = ngraph::onnx_import::reshape::interpret_as_scalar(row_size); const auto indices_axis = 1; const auto topk = std::make_shared( coerced_tensor, - default_opset::Constant::create(ngraph::element::i64, Shape{}, {1}), + default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{}, {1}), indices_axis, default_opset::TopK::Mode::MAX, default_opset::TopK::SortType::NONE); const auto on_value = - default_opset::Constant::create(ngraph::element::i64, Shape{}, {1}); + default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{}, {1}); const auto off_value = - default_opset::Constant::create(ngraph::element::i64, Shape{}, {0}); + default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{}, {0}); const auto results = std::make_shared( topk->output(1), row_size, on_value, off_value, indices_axis); diff --git a/ngraph/frontend/onnx_import/src/op/instance_norm.cpp b/ngraph/frontend/onnx_import/src/op/instance_norm.cpp index 9516ea52a9a..4a1ca7aba6a 100644 --- a/ngraph/frontend/onnx_import/src/op/instance_norm.cpp +++ b/ngraph/frontend/onnx_import/src/op/instance_norm.cpp @@ -99,7 +99,7 @@ namespace ngraph if (data_pshape.is_static()) { data_shape_node = std::make_shared( - element::i64, + element::Type_t::i64, Shape{static_cast(data_pshape.rank().get_length())}, data_pshape.to_shape()); } @@ -112,11 +112,13 @@ namespace ngraph scale = std::make_shared( scale, data_shape_node, - std::make_shared(element::i64, Shape{1}, 1)); + std::make_shared( + element::Type_t::i64, Shape{1}, 1)); bias = std::make_shared( bias, data_shape_node, - std::make_shared(element::i64, Shape{1}, 1)); + std::make_shared( + element::Type_t::i64, Shape{1}, 1)); // scale * mvn + bias std::shared_ptr result = diff --git a/ngraph/frontend/onnx_import/src/op/log_softmax.cpp b/ngraph/frontend/onnx_import/src/op/log_softmax.cpp index c19ca2b86c0..a083613e2be 100644 --- a/ngraph/frontend/onnx_import/src/op/log_softmax.cpp +++ b/ngraph/frontend/onnx_import/src/op/log_softmax.cpp @@ -32,7 +32,8 @@ namespace ngraph { const auto coerced_data = ngraph::builder::opset1::flatten(data, axis); - const auto axis_1 = default_opset::Constant::create(element::i64, Shape{1}, {1}); + const auto axis_1 = + default_opset::Constant::create(element::Type_t::i64, Shape{1}, {1}); const auto max = std::make_shared(coerced_data, axis_1, true); diff --git a/ngraph/frontend/onnx_import/src/op/loop.cpp b/ngraph/frontend/onnx_import/src/op/loop.cpp index 2039b12b46f..519b28c6318 100644 --- a/ngraph/frontend/onnx_import/src/op/loop.cpp +++ b/ngraph/frontend/onnx_import/src/op/loop.cpp @@ -62,7 +62,7 @@ namespace ngraph ->input_value(1) .get_node_shared_ptr(); if (ngraph::op::is_constant(second_input) && - second_input->get_element_type() == element::boolean && + second_input->get_element_type() == element::Type_t::boolean && as_type_ptr(second_input) ->cast_vector() .at(0) == false) @@ -90,7 +90,8 @@ namespace ngraph if (ngraph::op::is_null(ng_inputs.at(0))) // trip count skipped { // -1 means infinite Loop - trip_count = ngraph::op::Constant::create(ngraph::element::i64, {1}, {-1}); + trip_count = + ngraph::op::Constant::create(ngraph::element::Type_t::i64, {1}, {-1}); } else { @@ -102,8 +103,8 @@ namespace ngraph if (ngraph::op::is_null( ng_inputs.at(1).get_node_shared_ptr())) // termination condition skipped { - termination_cond = - ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true}); + termination_cond = ngraph::op::Constant::create( + ngraph::element::Type_t::boolean, {1}, {true}); } else if (ngraph::op::is_constant(ng_inputs.at(1).get_node_shared_ptr()) && as_type_ptr( @@ -130,8 +131,8 @@ namespace ngraph } const int64_t concat_axis = 0; - const auto concat_axis_const = - ngraph::op::Constant::create(ngraph::element::i64, {1}, {concat_axis}); + const auto concat_axis_const = ngraph::op::Constant::create( + ngraph::element::Type_t::i64, {1}, {concat_axis}); // provide scalar handing for scan outputs for (size_t i = loop_carried_dependencies.size() + 1; i < body_outputs.size(); ++i) @@ -149,8 +150,8 @@ namespace ngraph // optimization allow to improve nG Loop shape inference if (is_termination_condition_always_true(body_loop_out_cond)) { - body_outputs[0] = - ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true}); + body_outputs[0] = ngraph::op::Constant::create( + ngraph::element::Type_t::boolean, {1}, {true}); } CHECK_VALID_NODE(node, diff --git a/ngraph/frontend/onnx_import/src/op/lp_norm.cpp b/ngraph/frontend/onnx_import/src/op/lp_norm.cpp index 3bdd9a71a67..11f941fcf13 100644 --- a/ngraph/frontend/onnx_import/src/op/lp_norm.cpp +++ b/ngraph/frontend/onnx_import/src/op/lp_norm.cpp @@ -58,12 +58,14 @@ namespace ngraph "Only normalization of 1st or 2nd order is supported."); const auto normalize_axis_const = - default_opset::Constant::create(element::i64, {}, {normalize_axis}); + default_opset::Constant::create(element::Type_t::i64, {}, {normalize_axis}); std::shared_ptr norm = ngraph::builder::opset1::lp_norm( data, normalize_axis_const, static_cast(p_norm)); - const auto target_shape = default_opset::Constant::create( - element::i64, Shape{size_t(data_rank_value)}, data_shape.to_shape()); + const auto target_shape = + default_opset::Constant::create(element::Type_t::i64, + Shape{size_t(data_rank_value)}, + data_shape.to_shape()); // Create a default axes order matching the data tensor rank and erase the // element at the 'normalize_axis' position. The erased element indicates the @@ -74,7 +76,7 @@ namespace ngraph axes_values.erase(axes_values.begin() + normalize_axis); const auto axes_mapping = default_opset::Constant::create( - element::i64, Shape{axes_values.size()}, axes_values); + element::Type_t::i64, Shape{axes_values.size()}, axes_values); norm = std::make_shared( norm, target_shape, axes_mapping); diff --git a/ngraph/frontend/onnx_import/src/op/lp_pool.cpp b/ngraph/frontend/onnx_import/src/op/lp_pool.cpp index aa7337572d0..65ab066240c 100644 --- a/ngraph/frontend/onnx_import/src/op/lp_pool.cpp +++ b/ngraph/frontend/onnx_import/src/op/lp_pool.cpp @@ -75,7 +75,7 @@ namespace ngraph output_shape.at(0) = data_shape[0].get_length(); const auto reshape_pattern = default_opset::Constant::create( - element::i64, Shape{output_shape.size()}, output_shape); + element::Type_t::i64, Shape{output_shape.size()}, output_shape); slice = std::make_shared(slice, reshape_pattern, false); diff --git a/ngraph/frontend/onnx_import/src/op/lstm.cpp b/ngraph/frontend/onnx_import/src/op/lstm.cpp index c67b260e78c..e53963ba0b5 100644 --- a/ngraph/frontend/onnx_import/src/op/lstm.cpp +++ b/ngraph/frontend/onnx_import/src/op/lstm.cpp @@ -211,7 +211,7 @@ namespace ngraph m_input_map[LSTMInput::LSTM_INPUT_SEQ_LENGTHS] = default_opset::Constant::create( - element::i32, + element::Type_t::i32, Shape{m_dim_map[LSTMInputDimension::BATCH_SIZE]}, std::vector( m_dim_map[LSTMInputDimension::BATCH_SIZE], diff --git a/ngraph/frontend/onnx_import/src/op/non_max_suppression.cpp b/ngraph/frontend/onnx_import/src/op/non_max_suppression.cpp index b41b409a136..3a338391328 100644 --- a/ngraph/frontend/onnx_import/src/op/non_max_suppression.cpp +++ b/ngraph/frontend/onnx_import/src/op/non_max_suppression.cpp @@ -49,7 +49,7 @@ namespace ngraph else { max_output_boxes_per_class = - default_opset::Constant::create(element::i64, Shape{}, {0}); + default_opset::Constant::create(element::Type_t::i64, Shape{}, {0}); } Output iou_threshold; @@ -61,7 +61,7 @@ namespace ngraph else { iou_threshold = - default_opset::Constant::create(element::f32, Shape{}, {.0f}); + default_opset::Constant::create(element::Type_t::f32, Shape{}, {.0f}); } Output score_threshold; @@ -73,7 +73,7 @@ namespace ngraph else { score_threshold = - default_opset::Constant::create(element::f32, Shape{}, {.0f}); + default_opset::Constant::create(element::Type_t::f32, Shape{}, {.0f}); } const auto center_point_box = diff --git a/ngraph/frontend/onnx_import/src/op/non_zero.cpp b/ngraph/frontend/onnx_import/src/op/non_zero.cpp index 2c96ec1c106..e72b5da9208 100644 --- a/ngraph/frontend/onnx_import/src/op/non_zero.cpp +++ b/ngraph/frontend/onnx_import/src/op/non_zero.cpp @@ -30,7 +30,7 @@ namespace ngraph OutputVector non_zero(const Node& node) { const auto data = node.get_ng_inputs().at(0); - return {std::make_shared(data, element::i64)}; + return {std::make_shared(data, element::Type_t::i64)}; } } // namespace set_1 diff --git a/ngraph/frontend/onnx_import/src/op/onehot.cpp b/ngraph/frontend/onnx_import/src/op/onehot.cpp index 229b4ed6d90..018b4d99fbe 100644 --- a/ngraph/frontend/onnx_import/src/op/onehot.cpp +++ b/ngraph/frontend/onnx_import/src/op/onehot.cpp @@ -32,13 +32,14 @@ namespace ngraph OutputVector onehot(const Node& node) { OutputVector inputs{node.get_ng_inputs()}; - auto indices = - std::make_shared(inputs.at(0), element::i64); + auto indices = std::make_shared(inputs.at(0), + element::Type_t::i64); auto depth = reshape::interpret_as_scalar(inputs.at(1)); // Rank 1 tensor containing exactly two elements: [off_value, on_value] auto values = inputs.at(2); - auto split_axis = default_opset::Constant::create(element::i64, {}, {0}); + auto split_axis = + default_opset::Constant::create(element::Type_t::i64, {}, {0}); auto off_on_values = std::make_shared(values, split_axis, 2); auto off_value = reshape::interpret_as_scalar(off_on_values->output(0)); diff --git a/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/group_norm.cpp b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/group_norm.cpp index bdc0294d92f..225c6ddc1f5 100644 --- a/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/group_norm.cpp +++ b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/group_norm.cpp @@ -55,13 +55,13 @@ namespace ngraph new_shape.push_back(shape[i]); } return default_opset::Constant::create( - element::i64, Shape{new_shape.size()}, new_shape); + element::Type_t::i64, Shape{new_shape.size()}, new_shape); } auto shape = std::make_shared(data); auto splits = builder::opset1::split(shape, rank_size); - auto num_groups_const = - default_opset::Constant::create(element::i64, Shape{1}, {num_groups}); + auto num_groups_const = default_opset::Constant::create( + element::Type_t::i64, Shape{1}, {num_groups}); NodeVector new_shape{ splits[0].get_node_shared_ptr(), num_groups_const, @@ -98,7 +98,7 @@ namespace ngraph { auto shape = data_pshape.to_shape(); data_shape_node = default_opset::Constant::create( - element::u64, Shape{shape.size()}, shape); + element::Type_t::u64, Shape{shape.size()}, shape); } else { diff --git a/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/normalize.cpp b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/normalize.cpp index 226658d7f55..ffec771b142 100644 --- a/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/normalize.cpp +++ b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/normalize.cpp @@ -66,7 +66,7 @@ namespace ngraph weights_shape.push_back(1); } auto new_shape = std::make_shared( - element::i64, Shape{weights_shape.size()}, weights_shape); + element::Type_t::i64, Shape{weights_shape.size()}, weights_shape); weights = std::make_shared(inputs[1], new_shape, true); } @@ -75,7 +75,7 @@ namespace ngraph if (!across_spatial) { axes = std::make_shared( - element::i64, Shape{1}, std::vector{1}); + element::Type_t::i64, Shape{1}, std::vector{1}); } else { diff --git a/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/prior_box.cpp b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/prior_box.cpp index 33ae3dc25a4..222e84cf598 100644 --- a/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/prior_box.cpp +++ b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/prior_box.cpp @@ -36,9 +36,9 @@ namespace ngraph return std::make_shared( node, default_opset::Constant::create( - element::i64, Shape{1}, std::vector{start}), + element::Type_t::i64, Shape{1}, std::vector{start}), default_opset::Constant::create( - element::i64, Shape{1}, std::vector{end}), + element::Type_t::i64, Shape{1}, std::vector{end}), std::vector{0}, // begin mask std::vector{0}); // end mask } @@ -75,7 +75,7 @@ namespace ngraph attrs.density = node.get_attribute_value>("density", {}); auto axes = default_opset::Constant::create( - element::i64, Shape{1}, std::vector{0}); + element::Type_t::i64, Shape{1}, std::vector{0}); return {std::make_shared( std::make_shared( diff --git a/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/swish.cpp b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/swish.cpp index 486551f24e4..0856d25b223 100644 --- a/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/swish.cpp +++ b/ngraph/frontend/onnx_import/src/op/org.openvinotoolkit/swish.cpp @@ -40,7 +40,8 @@ namespace ngraph } else { - beta = default_opset::Constant::create(element::f32, Shape{}, {1.0}); + beta = + default_opset::Constant::create(element::Type_t::f32, Shape{}, {1.0}); } return {std::make_shared(ng_inputs.at(0), beta)}; diff --git a/ngraph/frontend/onnx_import/src/op/pad.cpp b/ngraph/frontend/onnx_import/src/op/pad.cpp index 2143d194f4a..73b26489317 100644 --- a/ngraph/frontend/onnx_import/src/op/pad.cpp +++ b/ngraph/frontend/onnx_import/src/op/pad.cpp @@ -83,9 +83,13 @@ namespace ngraph return {std::make_shared( data, std::make_shared( - element::i64, ngraph::Shape{padding_below.size()}, padding_below), + element::Type_t::i64, + ngraph::Shape{padding_below.size()}, + padding_below), std::make_shared( - element::i64, ngraph::Shape{padding_above.size()}, padding_above), + element::Type_t::i64, + ngraph::Shape{padding_above.size()}, + padding_above), std::make_shared( data.get_element_type(), ngraph::Shape{}, std::vector{value}), pad_mode)}; @@ -125,20 +129,20 @@ namespace ngraph pads_vector.begin() + half_size, pads_vector.end()); padding_begin = default_opset::Constant::create( - element::i64, ngraph::Shape{half_size}, padding_begin_values); + element::Type_t::i64, ngraph::Shape{half_size}, padding_begin_values); padding_end = default_opset::Constant::create( - element::i64, ngraph::Shape{half_size}, padding_end_values); + element::Type_t::i64, ngraph::Shape{half_size}, padding_end_values); } else { - auto axis = - default_opset::Constant::create(element::i64, ngraph::Shape{}, {0}); + auto axis = default_opset::Constant::create( + element::Type_t::i64, ngraph::Shape{}, {0}); OutputVector padding = builder::opset1::split(pads, 2, 0); - padding_begin = - std::make_shared(padding.at(0), element::i64); - padding_end = - std::make_shared(padding.at(1), element::i64); + padding_begin = std::make_shared( + padding.at(0), element::Type_t::i64); + padding_end = std::make_shared( + padding.at(1), element::Type_t::i64); } const std::string mode = diff --git a/ngraph/frontend/onnx_import/src/op/quant_conv.cpp b/ngraph/frontend/onnx_import/src/op/quant_conv.cpp index a7478b9a8dc..042679e0c21 100644 --- a/ngraph/frontend/onnx_import/src/op/quant_conv.cpp +++ b/ngraph/frontend/onnx_import/src/op/quant_conv.cpp @@ -69,15 +69,15 @@ namespace ngraph const Output& bias = nullptr) { ngraph::element::Type output_type; - if (data.get_element_type() == ngraph::element::u8 && - filters.get_element_type() == ngraph::element::i8) + if (data.get_element_type() == ngraph::element::Type_t::u8 && + filters.get_element_type() == ngraph::element::Type_t::i8) { - output_type = ngraph::element::i8; + output_type = ngraph::element::Type_t::i8; } - else if (data.get_element_type() == ngraph::element::u8 && - filters.get_element_type() == ngraph::element::u8) + else if (data.get_element_type() == ngraph::element::Type_t::u8 && + filters.get_element_type() == ngraph::element::Type_t::u8) { - output_type = ngraph::element::u8; + output_type = ngraph::element::Type_t::u8; } if (groups > 1) { diff --git a/ngraph/frontend/onnx_import/src/op/quantize_linear.cpp b/ngraph/frontend/onnx_import/src/op/quantize_linear.cpp index 4115b9c62bb..5f4126f667d 100644 --- a/ngraph/frontend/onnx_import/src/op/quantize_linear.cpp +++ b/ngraph/frontend/onnx_import/src/op/quantize_linear.cpp @@ -48,7 +48,7 @@ namespace ngraph else { return std::make_shared( - element::u8, Shape{1}, std::uint8_t(0)); + element::Type_t::u8, Shape{1}, std::uint8_t(0)); } } @@ -59,7 +59,8 @@ namespace ngraph CHECK_VALID_NODE( onnx_node, y_zero_point_et.is_static() && - (y_zero_point_et == element::u8 || y_zero_point_et == element::i8), + (y_zero_point_et == element::Type_t::u8 || + y_zero_point_et == element::Type_t::i8), "\"y_zero_point\" input data type must be static and of 8-bit " "integer type."); } @@ -72,9 +73,10 @@ namespace ngraph CHECK_VALID_NODE(onnx_node, y_scale_et.is_static(), "\"y_scale\" input data type must be static."); - if (y_scale_et != element::f32) + if (y_scale_et != element::Type_t::f32) { - return std::make_shared(y_scale, element::f32); + return std::make_shared(y_scale, + element::Type_t::f32); } return y_scale; } @@ -87,9 +89,10 @@ namespace ngraph data_et.is_static(), "\"x\" input data type must be static."); - if (data_et != element::f32) + if (data_et != element::Type_t::f32) { - return std::make_shared(data, element::f32); + return std::make_shared(data, + element::Type_t::f32); } return data; } @@ -101,7 +104,7 @@ namespace ngraph std::shared_ptr output_low; std::shared_ptr output_high; - if (destination_type == element::i8) + if (destination_type == element::Type_t::i8) { output_low = std::make_shared( data_type, Shape{1}, -128); diff --git a/ngraph/frontend/onnx_import/src/op/reduce.cpp b/ngraph/frontend/onnx_import/src/op/reduce.cpp index 28058c697e2..9ee53014cf3 100644 --- a/ngraph/frontend/onnx_import/src/op/reduce.cpp +++ b/ngraph/frontend/onnx_import/src/op/reduce.cpp @@ -61,7 +61,7 @@ namespace ngraph } return default_opset::Constant::create( - element::i64, Shape{reduction_axes.size()}, reduction_axes); + element::Type_t::i64, Shape{reduction_axes.size()}, reduction_axes); } template diff --git a/ngraph/frontend/onnx_import/src/op/reshape.cpp b/ngraph/frontend/onnx_import/src/op/reshape.cpp index df893c954e8..83e84ad78d4 100644 --- a/ngraph/frontend/onnx_import/src/op/reshape.cpp +++ b/ngraph/frontend/onnx_import/src/op/reshape.cpp @@ -51,7 +51,7 @@ namespace ngraph node.get_attribute_value>("shape", {}); pattern = default_opset::Constant::create( - element::i64, Shape{output_shape.size()}, output_shape); + element::Type_t::i64, Shape{output_shape.size()}, output_shape); } return {std::make_shared(data, pattern, true)}; diff --git a/ngraph/frontend/onnx_import/src/op/resize.cpp b/ngraph/frontend/onnx_import/src/op/resize.cpp index ff288d82d3f..d84084b833e 100644 --- a/ngraph/frontend/onnx_import/src/op/resize.cpp +++ b/ngraph/frontend/onnx_import/src/op/resize.cpp @@ -166,7 +166,7 @@ namespace ngraph std::floor(data_static_shape.at(i) * scales_vector.at(i))); } auto output_shape_const = default_opset::Constant::create( - element::u64, Shape({output_shape.size()}), output_shape); + element::Type_t::u64, Shape({output_shape.size()}), output_shape); return output_shape_const; } @@ -175,8 +175,8 @@ namespace ngraph std::make_shared(data), scales.get_element_type()); const auto multiply = std::make_shared(shape_of_data, scales); - const auto output_shape = - std::make_shared(multiply, ngraph::element::i64); + const auto output_shape = std::make_shared( + multiply, ngraph::element::Type_t::i64); return output_shape; } @@ -207,19 +207,20 @@ namespace ngraph scales.push_back(scale); } auto scales_const = default_opset::Constant::create( - element::f32, Shape({scales.size()}), scales); + element::Type_t::f32, Shape({scales.size()}), scales); return scales_const; } const auto shape_of_data = std::make_shared( - std::make_shared(data), ngraph::element::f32); - const auto converted_sizes = - std::make_shared(sizes, ngraph::element::f32); + std::make_shared(data), + ngraph::element::Type_t::f32); + const auto converted_sizes = std::make_shared( + sizes, ngraph::element::Type_t::f32); const auto divide = std::make_shared(converted_sizes, shape_of_data); const auto eps_node = std::make_shared( - ngraph::element::f32, Shape{}, epsilon); + ngraph::element::Type_t::f32, Shape{}, epsilon); const auto scales = std::make_shared(divide, eps_node); return scales; diff --git a/ngraph/frontend/onnx_import/src/op/reverse_sequence.cpp b/ngraph/frontend/onnx_import/src/op/reverse_sequence.cpp index ad61af22bda..1f7d45ae6e0 100644 --- a/ngraph/frontend/onnx_import/src/op/reverse_sequence.cpp +++ b/ngraph/frontend/onnx_import/src/op/reverse_sequence.cpp @@ -38,7 +38,7 @@ namespace ngraph const auto sequence_lengths = node.get_ng_inputs().at(1); // nGraph supports only int32 type of sequence_lengths const auto sequence_lengths_i32 = std::make_shared( - node.get_ng_inputs().at(1), element::i32); + node.get_ng_inputs().at(1), element::Type_t::i32); const auto data_rank = data.get_partial_shape().rank(); const auto batch_axis = node.get_attribute_value("batch_axis", 1); diff --git a/ngraph/frontend/onnx_import/src/op/scatter_elements.cpp b/ngraph/frontend/onnx_import/src/op/scatter_elements.cpp index 984c6f1b9a8..60142950808 100644 --- a/ngraph/frontend/onnx_import/src/op/scatter_elements.cpp +++ b/ngraph/frontend/onnx_import/src/op/scatter_elements.cpp @@ -36,7 +36,7 @@ namespace ngraph const auto axis = node.get_attribute_value("axis", 0); const auto axis_node = - default_opset::Constant::create(element::i64, Shape{}, {axis}); + default_opset::Constant::create(element::Type_t::i64, Shape{}, {axis}); return {std::make_shared( data, indices, updates, axis_node)}; diff --git a/ngraph/frontend/onnx_import/src/op/shape.cpp b/ngraph/frontend/onnx_import/src/op/shape.cpp index c02df889f9a..f6643a972e1 100644 --- a/ngraph/frontend/onnx_import/src/op/shape.cpp +++ b/ngraph/frontend/onnx_import/src/op/shape.cpp @@ -39,7 +39,7 @@ namespace ngraph { const auto static_data_shape = data_shape.to_shape(); - return {default_opset::Constant::create(ngraph::element::i64, + return {default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{static_data_shape.size()}, static_data_shape)}; } diff --git a/ngraph/frontend/onnx_import/src/op/size.cpp b/ngraph/frontend/onnx_import/src/op/size.cpp index b1331f3c3af..1c892087489 100644 --- a/ngraph/frontend/onnx_import/src/op/size.cpp +++ b/ngraph/frontend/onnx_import/src/op/size.cpp @@ -38,7 +38,7 @@ namespace ngraph static_cast(shape_size(data.get_shape()))}; return {std::make_shared( - ngraph::element::i64, + ngraph::element::Type_t::i64, Shape{}, std::vector{tensor_elements_count})}; } diff --git a/ngraph/frontend/onnx_import/src/op/slice.cpp b/ngraph/frontend/onnx_import/src/op/slice.cpp index 20478b52341..20ae2a65e21 100644 --- a/ngraph/frontend/onnx_import/src/op/slice.cpp +++ b/ngraph/frontend/onnx_import/src/op/slice.cpp @@ -139,15 +139,16 @@ namespace ngraph // expected_output_shape: {3, 3, 1, 1} OutputVector adjusted_indices(slice_indices_length); std::vector target_axes(axes); - const auto gather_axis = default_opset::Constant::create(element::i64, {}, {0}); + const auto gather_axis = + default_opset::Constant::create(element::Type_t::i64, {}, {0}); int added_indices_number = 0; for (int i = 0; i < slice_indices_length; ++i) { if (std::find(std::begin(axes), std::end(axes), i) == axes.end()) { - adjusted_indices[i] = - default_opset::Constant::create(element::i64, {1}, {fill_in_value}); + adjusted_indices[i] = default_opset::Constant::create( + element::Type_t::i64, {1}, {fill_in_value}); target_axes.insert(std::next(target_axes.begin(), i), i); ++added_indices_number; } @@ -156,7 +157,7 @@ namespace ngraph adjusted_indices[i] = std::make_shared( indices, default_opset::Constant::create( - element::i64, {1}, {i - added_indices_number}), + element::Type_t::i64, {1}, {i - added_indices_number}), gather_axis); } } @@ -202,7 +203,7 @@ namespace ngraph "Data rank must be static when axes input is not provided"); const size_t data_rank_value = data_rank.get_length(); axes = default_opset::Constant::create( - element::i64, + element::Type_t::i64, {data_rank_value}, common::get_monotonic_range(data_rank_value)); } @@ -225,7 +226,7 @@ namespace ngraph else { steps = default_opset::Constant::create( - element::i64, + element::Type_t::i64, {slice_indices_length}, std::vector(slice_indices_length, 1)); } @@ -252,9 +253,9 @@ namespace ngraph std::shared_ptr starts = std::make_shared( - element::i64, Shape{starts_atr.size()}, starts_atr); + element::Type_t::i64, Shape{starts_atr.size()}, starts_atr); std::shared_ptr ends = std::make_shared( - element::i64, Shape{ends_atr.size()}, ends_atr); + element::Type_t::i64, Shape{ends_atr.size()}, ends_atr); auto axes = node.get_attribute_value>( "axes", std::vector()); @@ -277,7 +278,7 @@ namespace ngraph const auto begin_end_mask = axes_to_mask(normalized_axes, slice_indices_length); std::shared_ptr strides = default_opset::Constant::create( - element::i64, + element::Type_t::i64, Shape{slice_indices_length}, std::vector(slice_indices_length, 1)); diff --git a/ngraph/frontend/onnx_import/src/op/softmax.cpp b/ngraph/frontend/onnx_import/src/op/softmax.cpp index 87c7e5192f7..24daa2cd1c6 100644 --- a/ngraph/frontend/onnx_import/src/op/softmax.cpp +++ b/ngraph/frontend/onnx_import/src/op/softmax.cpp @@ -32,7 +32,8 @@ namespace ngraph { const auto coerced_data = ngraph::builder::opset1::flatten(data, axis); - const auto axis_1 = default_opset::Constant::create(element::i64, Shape{1}, {1}); + const auto axis_1 = + default_opset::Constant::create(element::Type_t::i64, Shape{1}, {1}); const auto max = std::make_shared(coerced_data, axis_1, true); diff --git a/ngraph/frontend/onnx_import/src/op/squeeze.cpp b/ngraph/frontend/onnx_import/src/op/squeeze.cpp index 035f5902957..8dc6ac87b00 100644 --- a/ngraph/frontend/onnx_import/src/op/squeeze.cpp +++ b/ngraph/frontend/onnx_import/src/op/squeeze.cpp @@ -39,7 +39,7 @@ namespace ngraph std::vector normalized_axes = ngraph::normalize_axes(node.get_description(), axes, data_rank); auto axes_node = std::make_shared( - element::u64, Shape{normalized_axes.size()}, normalized_axes); + element::Type_t::u64, Shape{normalized_axes.size()}, normalized_axes); return {std::make_shared(data, axes_node)}; } diff --git a/ngraph/frontend/onnx_import/src/op/tile.cpp b/ngraph/frontend/onnx_import/src/op/tile.cpp index e14af18e726..2d9faa381c7 100644 --- a/ngraph/frontend/onnx_import/src/op/tile.cpp +++ b/ngraph/frontend/onnx_import/src/op/tile.cpp @@ -35,7 +35,8 @@ namespace ngraph // Workaround for backends which require repeats to be i64. // Remove the following line when no longer needed. - repeats = std::make_shared(repeats, element::i64); + repeats = + std::make_shared(repeats, element::Type_t::i64); return {std::make_shared(input, repeats)}; } diff --git a/ngraph/frontend/onnx_import/src/op/topk.cpp b/ngraph/frontend/onnx_import/src/op/topk.cpp index 8dfb1ecb4ec..3267b97f479 100644 --- a/ngraph/frontend/onnx_import/src/op/topk.cpp +++ b/ngraph/frontend/onnx_import/src/op/topk.cpp @@ -63,7 +63,8 @@ namespace ngraph { auto data = node.get_ng_inputs().at(0); std::int64_t k{node.get_attribute_value("k")}; - auto k_node = default_opset::Constant::create(element::i64, Shape{}, {k}); + auto k_node = + default_opset::Constant::create(element::Type_t::i64, Shape{}, {k}); auto axis = get_axis(node); std::shared_ptr top_k = std::make_shared( @@ -72,7 +73,7 @@ namespace ngraph axis, default_opset::TopK::Mode::MAX, default_opset::TopK::SortType::SORT_VALUES, - element::i64); + element::Type_t::i64); return {top_k->output(0), top_k->output(1)}; } @@ -92,7 +93,7 @@ namespace ngraph axis, default_opset::TopK::Mode::MAX, default_opset::TopK::SortType::SORT_VALUES, - element::i64); + element::Type_t::i64); return {top_k->output(0), top_k->output(1)}; } @@ -120,7 +121,7 @@ namespace ngraph : default_opset::TopK::Mode::MIN; std::shared_ptr top_k = std::make_shared( - data, k, axis, mode, sort_type, element::i64); + data, k, axis, mode, sort_type, element::Type_t::i64); return {top_k->output(0), top_k->output(1)}; } diff --git a/ngraph/frontend/onnx_import/src/op/unsqueeze.cpp b/ngraph/frontend/onnx_import/src/op/unsqueeze.cpp index ba2a64778e8..150dd5684db 100644 --- a/ngraph/frontend/onnx_import/src/op/unsqueeze.cpp +++ b/ngraph/frontend/onnx_import/src/op/unsqueeze.cpp @@ -35,7 +35,7 @@ namespace ngraph auto data = node.get_ng_inputs().at(0); auto axes = node.get_attribute_value>("axes", {}); auto axes_node = std::make_shared( - element::i64, Shape{axes.size()}, axes); + element::Type_t::i64, Shape{axes.size()}, axes); return {std::make_shared(data, axes_node)}; } diff --git a/ngraph/frontend/onnx_import/src/op/upsample.cpp b/ngraph/frontend/onnx_import/src/op/upsample.cpp index ff749771b97..5c635d71501 100644 --- a/ngraph/frontend/onnx_import/src/op/upsample.cpp +++ b/ngraph/frontend/onnx_import/src/op/upsample.cpp @@ -111,24 +111,26 @@ namespace ngraph std::floor(data_static_shape.at(i) * scales.at(i))); } auto output_shape_const = default_opset::Constant::create( - element::u64, Shape({output_shape.size()}), output_shape); + element::Type_t::u64, Shape({output_shape.size()}), output_shape); const auto scales_const = default_opset::Constant::create( - ngraph::element::f32, Shape({scales.size()}), scales); + ngraph::element::Type_t::f32, Shape({scales.size()}), scales); return {std::make_shared( data, output_shape_const, scales_const, attrs)}; } const auto scales_const = default_opset::Constant::create( - ngraph::element::f32, Shape({scales.size()}), scales); + ngraph::element::Type_t::f32, Shape({scales.size()}), scales); auto shape_of_data = std::make_shared( - std::make_shared(data), ngraph::element::f32); + std::make_shared(data), + ngraph::element::Type_t::f32); auto multiply = std::make_shared(shape_of_data, scales_const); auto output_shape = std::make_shared( - std::make_shared(multiply), ngraph::element::i64); + std::make_shared(multiply), + ngraph::element::Type_t::i64); return {std::make_shared( data, output_shape, scales_const, attrs)}; @@ -188,18 +190,20 @@ namespace ngraph std::floor(data_static_shape.at(i) * scales_vector.at(i))); } auto output_shape_const = default_opset::Constant::create( - element::u64, Shape({output_shape.size()}), output_shape); + element::Type_t::u64, Shape({output_shape.size()}), output_shape); return {std::make_shared( data, output_shape_const, scales, attrs)}; } auto shape_of_data = std::make_shared( - std::make_shared(data), ngraph::element::f32); + std::make_shared(data), + ngraph::element::Type_t::f32); auto multiply = std::make_shared(shape_of_data, scales); auto output_shape = std::make_shared( - std::make_shared(multiply), ngraph::element::i64); + std::make_shared(multiply), + ngraph::element::Type_t::i64); return {std::make_shared( data, output_shape, scales, attrs)}; diff --git a/ngraph/frontend/onnx_import/src/utils/arg_min_max_factory.cpp b/ngraph/frontend/onnx_import/src/utils/arg_min_max_factory.cpp index c8695011ea9..ef7649e41a1 100644 --- a/ngraph/frontend/onnx_import/src/utils/arg_min_max_factory.cpp +++ b/ngraph/frontend/onnx_import/src/utils/arg_min_max_factory.cpp @@ -45,20 +45,22 @@ namespace ngraph ArgMinMaxFactory::make_topk_subgraph(default_opset::TopK::Mode mode) const { const auto k_node = - default_opset::Constant::create(ngraph::element::i64, Shape{}, {1}); + default_opset::Constant::create(ngraph::element::Type_t::i64, Shape{}, {1}); const auto topk = std::make_shared( m_input_node, k_node, m_axis, mode, default_opset::TopK::SortType::NONE); if (m_keep_dims == 0) { - const auto axis_to_remove = - default_opset::Constant::create(element::u64, Shape{}, {topk->get_axis()}); + const auto axis_to_remove = default_opset::Constant::create( + element::Type_t::u64, Shape{}, {topk->get_axis()}); const auto reshaped_indices = std::make_shared(topk->output(1), axis_to_remove); - return std::make_shared(reshaped_indices, element::i64); + return std::make_shared(reshaped_indices, + element::Type_t::i64); } - return std::make_shared(topk->output(1), element::i64); + return std::make_shared(topk->output(1), + element::Type_t::i64); } } } diff --git a/ngraph/frontend/onnx_import/src/utils/common.cpp b/ngraph/frontend/onnx_import/src/utils/common.cpp index a25248e2fba..882914fa490 100644 --- a/ngraph/frontend/onnx_import/src/utils/common.cpp +++ b/ngraph/frontend/onnx_import/src/utils/common.cpp @@ -25,23 +25,24 @@ namespace ngraph { namespace common { - const ngraph::element::Type& get_ngraph_element_type(int64_t onnx_type) + const ngraph::element::Type get_ngraph_element_type(int64_t onnx_type) { switch (onnx_type) { - case ONNX_NAMESPACE::TensorProto_DataType_BOOL: return element::boolean; - case ONNX_NAMESPACE::TensorProto_DataType_DOUBLE: return element::f64; - case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16: return element::f16; - case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: return element::f32; - case ONNX_NAMESPACE::TensorProto_DataType_INT8: return element::i8; - case ONNX_NAMESPACE::TensorProto_DataType_INT16: return element::i16; - case ONNX_NAMESPACE::TensorProto_DataType_INT32: return element::i32; - case ONNX_NAMESPACE::TensorProto_DataType_INT64: return element::i64; - case ONNX_NAMESPACE::TensorProto_DataType_UINT8: return element::u8; - case ONNX_NAMESPACE::TensorProto_DataType_UINT16: return element::u16; - case ONNX_NAMESPACE::TensorProto_DataType_UINT32: return element::u32; - case ONNX_NAMESPACE::TensorProto_DataType_UINT64: return element::u64; - case ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED: return element::dynamic; + case ONNX_NAMESPACE::TensorProto_DataType_BOOL: return element::Type_t::boolean; + case ONNX_NAMESPACE::TensorProto_DataType_DOUBLE: return element::Type_t::f64; + case ONNX_NAMESPACE::TensorProto_DataType_FLOAT16: return element::Type_t::f16; + case ONNX_NAMESPACE::TensorProto_DataType_FLOAT: return element::Type_t::f32; + case ONNX_NAMESPACE::TensorProto_DataType_INT8: return element::Type_t::i8; + case ONNX_NAMESPACE::TensorProto_DataType_INT16: return element::Type_t::i16; + case ONNX_NAMESPACE::TensorProto_DataType_INT32: return element::Type_t::i32; + case ONNX_NAMESPACE::TensorProto_DataType_INT64: return element::Type_t::i64; + case ONNX_NAMESPACE::TensorProto_DataType_UINT8: return element::Type_t::u8; + case ONNX_NAMESPACE::TensorProto_DataType_UINT16: return element::Type_t::u16; + case ONNX_NAMESPACE::TensorProto_DataType_UINT32: return element::Type_t::u32; + case ONNX_NAMESPACE::TensorProto_DataType_UINT64: return element::Type_t::u64; + case ONNX_NAMESPACE::TensorProto_DataType_UNDEFINED: + return element::Type_t::dynamic; } #ifdef NGRAPH_USE_PROTOBUF_LITE throw ngraph_error("unsupported element type"); @@ -61,15 +62,15 @@ namespace ngraph const auto range_value = get_monotonic_range( value.get_partial_shape().rank().get_length(), start_value, step); return default_opset::Constant::create( - element::i64, {range_value.size()}, range_value); + element::Type_t::i64, {range_value.size()}, range_value); } const auto value_shape = std::make_shared(value); return std::make_shared( - default_opset::Constant::create(element::i64, {}, {start_value}), + default_opset::Constant::create(element::Type_t::i64, {}, {start_value}), std::make_shared(value_shape), - default_opset::Constant::create(element::i64, {}, {step}), - element::i64); + default_opset::Constant::create(element::Type_t::i64, {}, {step}), + element::Type_t::i64); } void validate_scalar_input(const char* input_name, diff --git a/ngraph/frontend/onnx_import/src/utils/recurrent.cpp b/ngraph/frontend/onnx_import/src/utils/recurrent.cpp index 3ddc467b1a6..8ebd20b893c 100644 --- a/ngraph/frontend/onnx_import/src/utils/recurrent.cpp +++ b/ngraph/frontend/onnx_import/src/utils/recurrent.cpp @@ -81,7 +81,9 @@ namespace ngraph else { m_map[OpInput::SEQ_LENGTHS] = std::make_shared( - element::i32, Shape{batch_size}, m_map[OpInput::X].get_shape().at(1)); + element::Type_t::i32, + Shape{batch_size}, + m_map[OpInput::X].get_shape().at(1)); } // The initial value of the hidden. if (ng_inputs.size() > 5 && !ngraph::op::is_null(ng_inputs.at(5))) diff --git a/ngraph/frontend/onnx_import/src/utils/reshape.cpp b/ngraph/frontend/onnx_import/src/utils/reshape.cpp index ddd4674a868..4f42aa4573f 100644 --- a/ngraph/frontend/onnx_import/src/utils/reshape.cpp +++ b/ngraph/frontend/onnx_import/src/utils/reshape.cpp @@ -126,8 +126,10 @@ namespace ngraph // reshape the node with shape {C} to {1, C, 1, 1, ..., 1} std::vector reshape_pattern_values(expected_rank, 1U); reshape_pattern_values[1] = node.get_shape().front(); - const auto reshape_pattern = default_opset::Constant::create( - element::u64, Shape{reshape_pattern_values.size()}, reshape_pattern_values); + const auto reshape_pattern = + default_opset::Constant::create(element::Type_t::u64, + Shape{reshape_pattern_values.size()}, + reshape_pattern_values); return std::make_shared(node, reshape_pattern, false); } return node; diff --git a/ngraph/python/src/pyngraph/ops/constant.cpp b/ngraph/python/src/pyngraph/ops/constant.cpp index 1f6dd6b0850..4e061767912 100644 --- a/ngraph/python/src/pyngraph/ops/constant.cpp +++ b/ngraph/python/src/pyngraph/ops/constant.cpp @@ -117,51 +117,52 @@ void regclass_pyngraph_op_Constant(py::module m) constant.def("get_vector", [](const ngraph::op::Constant& self) { auto element_type = self.get_element_type(); - if (element_type == ngraph::element::boolean) + if (element_type == ngraph::element::Type_t::boolean) { return _cast_vector(self); } - else if (element_type == ngraph::element::f16) + else if (element_type == ngraph::element::Type_t::f16) { return _cast_vector(self); } - else if (element_type == ngraph::element::f32) + else if (element_type == ngraph::element::Type_t::f32) { return _cast_vector(self); } - else if (element_type == ngraph::element::f64) + else if (element_type == ngraph::element::Type_t::f64) { return _cast_vector(self); } - else if (element_type == ngraph::element::i8) + else if (element_type == ngraph::element::Type_t::i8) { return _cast_vector(self); } - else if (element_type == ngraph::element::i16) + else if (element_type == ngraph::element::Type_t::i16) { return _cast_vector(self); } - else if (element_type == ngraph::element::i32) + else if (element_type == ngraph::element::Type_t::i32) { return _cast_vector(self); } - else if (element_type == ngraph::element::i64) + else if (element_type == ngraph::element::Type_t::i64) { return _cast_vector(self); } - else if (element_type == ngraph::element::u8 || element_type == ngraph::element::u1) + else if (element_type == ngraph::element::Type_t::u8 || + element_type == ngraph::element::Type_t::u1) { return _cast_vector(self); } - else if (element_type == ngraph::element::u16) + else if (element_type == ngraph::element::Type_t::u16) { return _cast_vector(self); } - else if (element_type == ngraph::element::u32) + else if (element_type == ngraph::element::Type_t::u32) { return _cast_vector(self); } - else if (element_type == ngraph::element::u64) + else if (element_type == ngraph::element::Type_t::u64) { return _cast_vector(self); } @@ -174,51 +175,52 @@ void regclass_pyngraph_op_Constant(py::module m) // Provide buffer access constant.def_buffer([](const ngraph::op::Constant& self) -> py::buffer_info { auto element_type = self.get_element_type(); - if (element_type == ngraph::element::boolean) + if (element_type == ngraph::element::Type_t::boolean) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::f16) + else if (element_type == ngraph::element::Type_t::f16) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::f32) + else if (element_type == ngraph::element::Type_t::f32) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::f64) + else if (element_type == ngraph::element::Type_t::f64) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::i8) + else if (element_type == ngraph::element::Type_t::i8) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::i16) + else if (element_type == ngraph::element::Type_t::i16) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::i32) + else if (element_type == ngraph::element::Type_t::i32) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::i64) + else if (element_type == ngraph::element::Type_t::i64) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::u8 || element_type == ngraph::element::u1) + else if (element_type == ngraph::element::Type_t::u8 || + element_type == ngraph::element::Type_t::u1) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::u16) + else if (element_type == ngraph::element::Type_t::u16) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::u32) + else if (element_type == ngraph::element::Type_t::u32) { return _get_buffer_info(self); } - else if (element_type == ngraph::element::u64) + else if (element_type == ngraph::element::Type_t::u64) { return _get_buffer_info(self); } diff --git a/ngraph/python/src/pyngraph/types/element_type.cpp b/ngraph/python/src/pyngraph/types/element_type.cpp index ce72aacc715..d5f25dad35b 100644 --- a/ngraph/python/src/pyngraph/types/element_type.cpp +++ b/ngraph/python/src/pyngraph/types/element_type.cpp @@ -27,19 +27,19 @@ void regclass_pyngraph_Type(py::module m) { py::class_> type(m, "Type"); type.doc() = "ngraph.impl.Type wraps ngraph::element::Type"; - type.attr("boolean") = ngraph::element::boolean; - type.attr("f16") = ngraph::element::f16; - type.attr("f32") = ngraph::element::f32; - type.attr("f64") = ngraph::element::f64; - type.attr("i8") = ngraph::element::i8; - type.attr("i16") = ngraph::element::i16; - type.attr("i32") = ngraph::element::i32; - type.attr("i64") = ngraph::element::i64; - type.attr("u1") = ngraph::element::u1; - type.attr("u8") = ngraph::element::u8; - type.attr("u16") = ngraph::element::u16; - type.attr("u32") = ngraph::element::u32; - type.attr("u64") = ngraph::element::u64; + type.attr("boolean") = ngraph::element::Type(ngraph::element::Type_t::boolean); + type.attr("f16") = ngraph::element::Type(ngraph::element::Type_t::f16); + type.attr("f32") = ngraph::element::Type(ngraph::element::Type_t::f32); + type.attr("f64") = ngraph::element::Type(ngraph::element::Type_t::f64); + type.attr("i8") = ngraph::element::Type(ngraph::element::Type_t::i8); + type.attr("i16") = ngraph::element::Type(ngraph::element::Type_t::i16); + type.attr("i32") = ngraph::element::Type(ngraph::element::Type_t::i32); + type.attr("i64") = ngraph::element::Type(ngraph::element::Type_t::i64); + type.attr("u1") = ngraph::element::Type(ngraph::element::Type_t::u1); + type.attr("u8") = ngraph::element::Type(ngraph::element::Type_t::u8); + type.attr("u16") = ngraph::element::Type(ngraph::element::Type_t::u16); + type.attr("u32") = ngraph::element::Type(ngraph::element::Type_t::u32); + type.attr("u64") = ngraph::element::Type(ngraph::element::Type_t::u64); type.def("__repr__", [](const ngraph::element::Type& self) { std::string bitwidth = std::to_string(self.bitwidth()); diff --git a/ngraph/test/attributes.cpp b/ngraph/test/attributes.cpp index 88efac34f63..34867de416b 100644 --- a/ngraph/test/attributes.cpp +++ b/ngraph/test/attributes.cpp @@ -268,7 +268,7 @@ public: m_result_vector); } - void validate_and_infer_types() override { set_output_type(0, element::i64, {}); } + void validate_and_infer_types() override { set_output_type(0, element::Type_t::i64, {}); } bool visit_attributes(AttributeVisitor& visitor) override { visitor.on_attribute("turing_model", m_turing_model); @@ -348,13 +348,13 @@ constexpr NodeTypeInfo Oracle::type_info; TEST(attributes, user_op) { FactoryRegistry::get().register_factory(); - auto program = make_shared(element::i32, Shape{200}); - auto data = make_shared(element::i32, Shape{200}); + auto program = make_shared(element::Type_t::i32, Shape{200}); + auto data = make_shared(element::Type_t::i32, Shape{200}); auto result = make_shared(data); auto oracle = make_shared(program, data, TuringModel::XL1200, - element::f32, + element::Type_t::f32, element::Type_t::i64, "12AU7", true, @@ -438,8 +438,8 @@ TEST(attributes, user_op) TEST(attributes, matmul_op) { FactoryRegistry::get().register_factory(); - auto A = make_shared(element::f32, Shape{0, 2}); - auto B = make_shared(element::f32, Shape{2, 0}); + auto A = make_shared(element::Type_t::f32, Shape{0, 2}); + auto B = make_shared(element::Type_t::f32, Shape{2, 0}); bool transpose_a = true; bool transpose_b = true; @@ -492,7 +492,7 @@ TEST(attributes, partial_shape) TEST(attributes, max_pool_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{64, 3, 5}); + auto data = make_shared(element::Type_t::f32, Shape{64, 3, 5}); auto strides = Strides{2}; auto pads_begin = Shape{1}; @@ -517,8 +517,8 @@ TEST(attributes, max_pool_op) TEST(attributes, mod_op) { FactoryRegistry::get().register_factory(); - auto A = make_shared(element::f32, Shape{0, 2}); - auto B = make_shared(element::f32, Shape{2, 0}); + auto A = make_shared(element::Type_t::f32, Shape{0, 2}); + auto B = make_shared(element::Type_t::f32, Shape{2, 0}); auto auto_broadcast = op::AutoBroadcastType::NUMPY; @@ -532,8 +532,8 @@ TEST(attributes, mod_op) TEST(attributes, non_max_suppression_op_custom_attributes) { FactoryRegistry::get().register_factory(); - auto boxes = make_shared(element::f32, Shape{1, 1, 4}); - auto scores = make_shared(element::f32, Shape{1, 1, 1}); + auto boxes = make_shared(element::Type_t::f32, Shape{1, 1, 4}); + auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 1}); auto box_encoding = opset1::NonMaxSuppression::BoxEncodingType::CENTER; bool sort_result_descending = false; @@ -550,8 +550,8 @@ TEST(attributes, non_max_suppression_op_custom_attributes) TEST(attributes, non_max_suppression_op_default_attributes) { FactoryRegistry::get().register_factory(); - auto boxes = make_shared(element::f32, Shape{1, 1, 4}); - auto scores = make_shared(element::f32, Shape{1, 1, 1}); + auto boxes = make_shared(element::Type_t::f32, Shape{1, 1, 4}); + auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 1}); auto nms = make_shared(boxes, scores); NodeBuilder builder(nms); @@ -564,12 +564,12 @@ TEST(attributes, non_max_suppression_op_default_attributes) TEST(attributes, non_max_suppression_v3_op_custom_attributes) { FactoryRegistry::get().register_factory(); - auto boxes = make_shared(element::f32, Shape{1, 1, 4}); - auto scores = make_shared(element::f32, Shape{1, 1, 1}); + auto boxes = make_shared(element::Type_t::f32, Shape{1, 1, 4}); + auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 1}); auto box_encoding = opset3::NonMaxSuppression::BoxEncodingType::CENTER; bool sort_result_descending = false; - element::Type output_type = element::i32; + element::Type output_type = element::Type_t::i32; auto nms = make_shared( boxes, scores, box_encoding, sort_result_descending, output_type); @@ -584,8 +584,8 @@ TEST(attributes, non_max_suppression_v3_op_custom_attributes) TEST(attributes, non_max_suppression_v3_op_default_attributes) { FactoryRegistry::get().register_factory(); - auto boxes = make_shared(element::f32, Shape{1, 1, 4}); - auto scores = make_shared(element::f32, Shape{1, 1, 1}); + auto boxes = make_shared(element::Type_t::f32, Shape{1, 1, 4}); + auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 1}); auto nms = make_shared(boxes, scores); NodeBuilder builder(nms); @@ -599,8 +599,8 @@ TEST(attributes, non_max_suppression_v3_op_default_attributes) TEST(attributes, normalize_l2_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{1}); - const auto axes = make_shared(element::i32, Shape{}, vector{0}); + auto data = make_shared(element::Type_t::i32, Shape{1}); + const auto axes = make_shared(element::Type_t::i32, Shape{}, vector{0}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -616,10 +616,10 @@ TEST(attributes, normalize_l2_op) TEST(attributes, one_hot_op) { FactoryRegistry::get().register_factory(); - auto indices = make_shared(element::i64, Shape{1, 3, 2, 3}); - auto depth = op::Constant::create(element::i64, Shape{}, {4}); - auto on_value = op::Constant::create(element::f32, Shape{}, {1.0f}); - auto off_value = op::Constant::create(element::f32, Shape{}, {0.0f}); + auto indices = make_shared(element::Type_t::i64, Shape{1, 3, 2, 3}); + auto depth = op::Constant::create(element::Type_t::i64, Shape{}, {4}); + auto on_value = op::Constant::create(element::Type_t::f32, Shape{}, {1.0f}); + auto off_value = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); int64_t axis = 3; @@ -633,9 +633,9 @@ TEST(attributes, one_hot_op) TEST(attributes, pad_op) { FactoryRegistry::get().register_factory(); - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{1}); + auto pads_end = make_shared(element::Type_t::i64, Shape{1}); auto pad_mode = op::PadMode::EDGE; @@ -649,8 +649,8 @@ TEST(attributes, pad_op) TEST(attributes, psroi_pooling_op) { FactoryRegistry::get().register_factory(); - auto input = make_shared(element::f32, Shape{1, 1024, 63, 38}); - auto coords = make_shared(element::f32, Shape{300, 5}); + auto input = make_shared(element::Type_t::f32, Shape{1, 1024, 63, 38}); + auto coords = make_shared(element::Type_t::f32, Shape{300, 5}); const int64_t output_dim = 882; const int64_t group_size = 3; @@ -676,8 +676,8 @@ TEST(attributes, reduce_logical_and_op) { // ReduceLogicalAnd derives visit_attributes from op::util::LogicalReductionKeepDims FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{3, 4, 5}); - auto reduction_axes = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto reduction_axes = make_shared(element::Type_t::i64, Shape{2}); bool keep_dims = true; @@ -692,8 +692,8 @@ TEST(attributes, reduce_logical_or_op) { // ReduceLogicalOr derives visit_attributes from op::util::LogicalReductionKeepDims FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{3, 4, 5}); - auto reduction_axes = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto reduction_axes = make_shared(element::Type_t::i64, Shape{2}); bool keep_dims = true; @@ -708,8 +708,8 @@ TEST(attributes, reduce_max_op) { // ReduceMax derives visit_attributes from op::util::ArithmeticReductionKeepDims FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{3, 4, 5}); - auto reduction_axes = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto reduction_axes = make_shared(element::Type_t::i64, Shape{2}); bool keep_dims = true; @@ -724,8 +724,8 @@ TEST(attributes, reduce_mean_op) { // ReduceMean derives visit_attributes from op::util::ArithmeticReductionKeepDims FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{3, 4, 5}); - auto reduction_axes = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto reduction_axes = make_shared(element::Type_t::i64, Shape{2}); bool keep_dims = true; @@ -740,8 +740,8 @@ TEST(attributes, reduce_min_op) { // ReduceMin derives visit_attributes from op::util::ArithmeticReductionKeepDims FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{3, 4, 5}); - auto reduction_axes = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto reduction_axes = make_shared(element::Type_t::i64, Shape{2}); bool keep_dims = true; @@ -756,8 +756,8 @@ TEST(attributes, reduce_prod_op) { // ReduceProd derives visit_attributes from op::util::ArithmeticReductionKeepDims FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{3, 4, 5}); - auto reduction_axes = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto reduction_axes = make_shared(element::Type_t::i64, Shape{2}); bool keep_dims = true; @@ -772,8 +772,8 @@ TEST(attributes, reduce_sum_op) { // ReduceSum derives visit_attributes from op::util::ArithmeticReductionKeepDims FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{3, 4, 5}); - auto reduction_axes = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto reduction_axes = make_shared(element::Type_t::i64, Shape{2}); bool keep_dims = true; @@ -787,7 +787,7 @@ TEST(attributes, reduce_sum_op) TEST(attributes, region_yolo_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{1, 255, 26, 26}); + auto data = make_shared(element::Type_t::f32, Shape{1, 255, 26, 26}); size_t num_coords = 4; size_t num_classes = 1; @@ -816,8 +816,8 @@ TEST(attributes, region_yolo_op) TEST(attributes, reshape_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{2, 3, 4}); - auto pattern = make_shared(element::i32, Shape{2}); + auto data = make_shared(element::Type_t::i32, Shape{2, 3, 4}); + auto pattern = make_shared(element::Type_t::i32, Shape{2}); bool special_zero = true; @@ -831,8 +831,8 @@ TEST(attributes, reshape_op) TEST(attributes, reverse_op_enum_mode) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{200}); - auto reversed_axes = make_shared(element::i32, Shape{200}); + auto data = make_shared(element::Type_t::i32, Shape{200}); + auto reversed_axes = make_shared(element::Type_t::i32, Shape{200}); auto reverse = make_shared(data, reversed_axes, opset1::Reverse::Mode::INDEX); NodeBuilder builder(reverse); @@ -844,8 +844,8 @@ TEST(attributes, reverse_op_enum_mode) TEST(attributes, reverse_op_string_mode) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{200}); - auto reversed_axes = make_shared(element::i32, Shape{200}); + auto data = make_shared(element::Type_t::i32, Shape{200}); + auto reversed_axes = make_shared(element::Type_t::i32, Shape{200}); std::string mode = "index"; @@ -859,8 +859,8 @@ TEST(attributes, reverse_op_string_mode) TEST(attributes, reverse_sequence_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{2, 3, 4, 2}); - auto seq_indices = make_shared(element::i32, Shape{4}); + auto data = make_shared(element::Type_t::i32, Shape{2, 3, 4, 2}); + auto seq_indices = make_shared(element::Type_t::i32, Shape{4}); auto batch_axis = 2; auto seq_axis = 1; @@ -879,10 +879,10 @@ TEST(attributes, reverse_sequence_op) TEST(attributes, rnn_cell_op_custom_attributes) { FactoryRegistry::get().register_factory(); - auto X = make_shared(element::f32, Shape{2, 3}); - auto H = make_shared(element::f32, Shape{2, 3}); - auto W = make_shared(element::f32, Shape{3, 3}); - auto R = make_shared(element::f32, Shape{3, 3}); + auto X = make_shared(element::Type_t::f32, Shape{2, 3}); + auto H = make_shared(element::Type_t::f32, Shape{2, 3}); + auto W = make_shared(element::Type_t::f32, Shape{3, 3}); + auto R = make_shared(element::Type_t::f32, Shape{3, 3}); const size_t hidden_size = 3; auto activations = std::vector{"sigmoid", "tanh"}; @@ -906,10 +906,10 @@ TEST(attributes, rnn_cell_op_custom_attributes) TEST(attributes, rnn_cell_op_default_attributes) { FactoryRegistry::get().register_factory(); - auto X = make_shared(element::f32, Shape{2, 3}); - auto H = make_shared(element::f32, Shape{2, 3}); - auto W = make_shared(element::f32, Shape{3, 3}); - auto R = make_shared(element::f32, Shape{3, 3}); + auto X = make_shared(element::Type_t::f32, Shape{2, 3}); + auto H = make_shared(element::Type_t::f32, Shape{2, 3}); + auto W = make_shared(element::Type_t::f32, Shape{3, 3}); + auto R = make_shared(element::Type_t::f32, Shape{3, 3}); const size_t hidden_size = 3; @@ -928,7 +928,7 @@ TEST(attributes, rnn_cell_op_default_attributes) TEST(attributes, elu_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{2, 4}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4}); double alpha = 0.1; @@ -942,11 +942,11 @@ TEST(attributes, elu_op) TEST(attributes, fake_quantize_op) { FactoryRegistry::get().register_factory(); - const auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto input_low = make_shared(element::f32, Shape{}); - const auto input_high = make_shared(element::f32, Shape{}); - const auto output_low = make_shared(element::f32, Shape{}); - const auto output_high = make_shared(element::f32, Shape{}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto input_low = make_shared(element::Type_t::f32, Shape{}); + const auto input_high = make_shared(element::Type_t::f32, Shape{}); + const auto output_low = make_shared(element::Type_t::f32, Shape{}); + const auto output_high = make_shared(element::Type_t::f32, Shape{}); auto levels = 5; auto auto_broadcast = op::AutoBroadcastType::NUMPY; @@ -963,8 +963,8 @@ TEST(attributes, fake_quantize_op) TEST(attributes, broadcast_v3) { FactoryRegistry::get().register_factory(); - const auto arg = make_shared(element::i64, Shape{1, 3, 1}); - const auto shape = make_shared(element::i64, Shape{3}); + const auto arg = make_shared(element::Type_t::i64, Shape{1, 3, 1}); + const auto shape = make_shared(element::Type_t::i64, Shape{3}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); @@ -977,7 +977,7 @@ TEST(attributes, broadcast_v3) TEST(attributes, grn_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{2, 3, 4, 5}); + auto data = make_shared(element::Type_t::f32, Shape{2, 3, 4, 5}); float bias = 1.25f; @@ -991,8 +991,8 @@ TEST(attributes, grn_op) TEST(attributes, group_conv_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{1, 12, 224, 224}); - auto filters = make_shared(element::f32, Shape{4, 1, 3, 5, 5}); + auto data = make_shared(element::Type_t::f32, Shape{1, 12, 224, 224}); + auto filters = make_shared(element::Type_t::f32, Shape{4, 1, 3, 5, 5}); auto strides = Strides{1, 1}; auto pads_begin = CoordinateDiff{1, 2}; auto pads_end = CoordinateDiff{1, 2}; @@ -1011,9 +1011,10 @@ TEST(attributes, group_conv_op) TEST(attributes, group_conv_backprop_data_op) { FactoryRegistry::get().register_factory(); - const auto data = make_shared(element::f32, Shape{1, 20, 224, 224}); - const auto filter = make_shared(element::f32, Shape{4, 5, 2, 3, 3}); - const auto output_shape = make_shared(element::f32, Shape{1, 8, 447, 447}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 20, 224, 224}); + const auto filter = make_shared(element::Type_t::f32, Shape{4, 5, 2, 3, 3}); + const auto output_shape = + make_shared(element::Type_t::f32, Shape{1, 8, 447, 447}); const auto strides = Strides{2, 1}; const auto pads_begin = CoordinateDiff{3, 4}; @@ -1045,8 +1046,8 @@ TEST(attributes, group_conv_backprop_data_op) TEST(attributes, lrn_op) { FactoryRegistry::get().register_factory(); - const auto arg = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto axes = make_shared(element::i32, Shape{2}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto axes = make_shared(element::Type_t::i32, Shape{2}); const double alpha = 1.1; const double beta = 2.2; @@ -1066,12 +1067,12 @@ TEST(attributes, lrn_op) TEST(attributes, lstm_cell_op) { FactoryRegistry::get().register_factory(); - auto X = make_shared(element::f32, Shape{2, 3}); - auto H = make_shared(element::f32, Shape{2, 3}); - auto W = make_shared(element::f32, Shape{12, 3}); - auto R = make_shared(element::f32, Shape{12, 3}); - const auto initial_hidden_state = make_shared(element::f32, Shape{2, 3}); - const auto initial_cell_state = make_shared(element::f32, Shape{2, 3}); + auto X = make_shared(element::Type_t::f32, Shape{2, 3}); + auto H = make_shared(element::Type_t::f32, Shape{2, 3}); + auto W = make_shared(element::Type_t::f32, Shape{12, 3}); + auto R = make_shared(element::Type_t::f32, Shape{12, 3}); + const auto initial_hidden_state = make_shared(element::Type_t::f32, Shape{2, 3}); + const auto initial_cell_state = make_shared(element::Type_t::f32, Shape{2, 3}); const auto hidden_size = 3; const std::vector activations = {"tanh", "sigmoid", "tanh"}; @@ -1109,17 +1110,19 @@ TEST(attributes, lstm_sequence_op) const size_t hidden_size = 64; const auto X = - make_shared(element::f32, Shape{batch_size, seq_length, input_size}); - const auto initial_hidden_state = - make_shared(element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto initial_cell_state = - make_shared(element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); - const auto W = make_shared(element::f32, + make_shared(element::Type_t::f32, Shape{batch_size, seq_length, input_size}); + const auto initial_hidden_state = make_shared( + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); + const auto initial_cell_state = make_shared( + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); + const auto sequence_lengths = + make_shared(element::Type_t::i32, Shape{batch_size}); + const auto W = make_shared(element::Type_t::f32, Shape{num_directions, 4 * hidden_size, input_size}); - const auto R = make_shared(element::f32, + const auto R = make_shared(element::Type_t::f32, Shape{num_directions, 4 * hidden_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{num_directions, 4 * hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{num_directions, 4 * hidden_size}); const auto lstm_direction = op::RecurrentSequenceDirection::BIDIRECTIONAL; const std::vector activations_alpha = {1, 2, 3}; @@ -1154,7 +1157,7 @@ TEST(attributes, lstm_sequence_op) TEST(attributes, shuffle_channels_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{200}); + auto data = make_shared(element::Type_t::i32, Shape{200}); auto axis = 0; auto groups = 2; auto shuffle_channels = make_shared(data, axis, groups); @@ -1168,7 +1171,7 @@ TEST(attributes, shuffle_channels_op) TEST(attributes, softmax_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{200}); + auto data = make_shared(element::Type_t::i32, Shape{200}); auto axis = 0; auto softmax = make_shared(data, axis); NodeBuilder builder(softmax); @@ -1180,7 +1183,7 @@ TEST(attributes, softmax_op) TEST(attributes, space_to_depth_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{2, 3, 50, 50}); + auto data = make_shared(element::Type_t::i32, Shape{2, 3, 50, 50}); auto block_size = 2; auto mode = opset1::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; auto space_to_depth = make_shared(data, mode, block_size); @@ -1194,8 +1197,8 @@ TEST(attributes, space_to_depth_op) TEST(attributes, split_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{200}); - auto axis = make_shared(element::i32, Shape{}); + auto data = make_shared(element::Type_t::i32, Shape{200}); + auto axis = make_shared(element::Type_t::i32, Shape{}); auto num_splits = 2; auto split = make_shared(data, axis, num_splits); NodeBuilder builder(split); @@ -1207,8 +1210,8 @@ TEST(attributes, split_op) TEST(attributes, squared_difference_op) { FactoryRegistry::get().register_factory(); - auto x1 = make_shared(element::i32, Shape{200}); - auto x2 = make_shared(element::i32, Shape{200}); + auto x1 = make_shared(element::Type_t::i32, Shape{200}); + auto x2 = make_shared(element::Type_t::i32, Shape{200}); auto auto_broadcast = op::AutoBroadcastType::NUMPY; auto squared_difference = make_shared(x1, x2, auto_broadcast); NodeBuilder builder(squared_difference); @@ -1220,10 +1223,10 @@ TEST(attributes, squared_difference_op) TEST(attributes, strided_slice_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{2, 3, 4, 5}); - auto begin = make_shared(element::i32, Shape{2}); - auto end = make_shared(element::i32, Shape{2}); - auto stride = make_shared(element::i32, Shape{2}); + auto data = make_shared(element::Type_t::i32, Shape{2, 3, 4, 5}); + auto begin = make_shared(element::Type_t::i32, Shape{2}); + auto end = make_shared(element::Type_t::i32, Shape{2}); + auto stride = make_shared(element::Type_t::i32, Shape{2}); auto begin_mask = std::vector{0, 0}; auto end_mask = std::vector{0, 0}; @@ -1253,8 +1256,8 @@ TEST(attributes, strided_slice_op) TEST(attributes, topk_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{2, 3, 4, 5}); - auto k = make_shared(element::i32, Shape{}); + auto data = make_shared(element::Type_t::i32, Shape{2, 3, 4, 5}); + auto k = make_shared(element::Type_t::i32, Shape{}); auto axis = 0; auto mode = opset1::TopK::Mode::MAX; @@ -1272,8 +1275,8 @@ TEST(attributes, topk_op) TEST(attributes, logical_xor_op) { FactoryRegistry::get().register_factory(); - auto x1 = make_shared(element::boolean, Shape{200}); - auto x2 = make_shared(element::boolean, Shape{200}); + auto x1 = make_shared(element::Type_t::boolean, Shape{200}); + auto x2 = make_shared(element::Type_t::boolean, Shape{200}); auto auto_broadcast = op::AutoBroadcastType::NUMPY; @@ -1287,7 +1290,7 @@ TEST(attributes, logical_xor_op) TEST(attributes, extractimagepatches_op) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::i32, Shape{64, 3, 10, 10}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; @@ -1308,7 +1311,7 @@ TEST(attributes, extractimagepatches_op) TEST(attributes, mvn_op) { FactoryRegistry::get().register_factory(); - const auto data = make_shared(element::i32, Shape{2, 3, 4, 5}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 3, 4, 5}); const auto axes = AxisSet{0, 1}; @@ -1326,7 +1329,7 @@ TEST(attributes, mvn_op) TEST(attributes, reorg_yolo_op_stride) { FactoryRegistry::get().register_factory(); - const auto data = make_shared(element::i32, Shape{1, 64, 26, 26}); + const auto data = make_shared(element::Type_t::i32, Shape{1, 64, 26, 26}); const auto op = make_shared(data, 2); NodeBuilder builder(op); @@ -1338,7 +1341,7 @@ TEST(attributes, reorg_yolo_op_stride) TEST(attributes, reorg_yolo_op_strides) { FactoryRegistry::get().register_factory(); - const auto data = make_shared(element::i32, Shape{1, 64, 26, 26}); + const auto data = make_shared(element::Type_t::i32, Shape{1, 64, 26, 26}); const auto op = make_shared(data, Strides{2}); NodeBuilder builder(op); @@ -1350,8 +1353,8 @@ TEST(attributes, reorg_yolo_op_strides) TEST(attributes, roi_pooling_op) { FactoryRegistry::get().register_factory(); - const auto data = make_shared(element::f32, Shape{2, 3, 4, 5}); - const auto coords = make_shared(element::f32, Shape{2, 5}); + const auto data = make_shared(element::Type_t::f32, Shape{2, 3, 4, 5}); + const auto coords = make_shared(element::Type_t::f32, Shape{2, 5}); const auto op = make_shared(data, coords, Shape{5, 5}, 0.123, "bilinear"); NodeBuilder builder(op); @@ -1365,7 +1368,7 @@ TEST(attributes, roi_pooling_op) TEST(attributes, constant_op) { vector data{5.0f, 4.0f, 3.0f, 2.0f, 1.0f, 0.0f}; - auto k = make_shared(element::f32, Shape{2, 3}, data); + auto k = make_shared(element::Type_t::f32, Shape{2, 3}, data); NodeBuilder builder(k); auto g_k = as_type_ptr(builder.create()); g_k->validate_and_infer_types(); @@ -1379,8 +1382,8 @@ TEST(attributes, constant_op) TEST(attributes, bucketize_v3_op_default_attributes) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{2, 3, 4}); - auto buckets = make_shared(element::f32, Shape{5}); + auto data = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto buckets = make_shared(element::Type_t::f32, Shape{5}); auto bucketize = make_shared(data, buckets); NodeBuilder builder(bucketize); @@ -1393,9 +1396,9 @@ TEST(attributes, bucketize_v3_op_default_attributes) TEST(attributes, bucketize_v3_op_custom_attributes) { FactoryRegistry::get().register_factory(); - auto data = make_shared(element::f32, Shape{2, 3, 4}); - auto buckets = make_shared(element::f32, Shape{5}); - element::Type output_type = element::i32; + auto data = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto buckets = make_shared(element::Type_t::f32, Shape{5}); + element::Type output_type = element::Type_t::i32; bool with_right_bound = false; auto bucketize = make_shared(data, buckets, output_type, with_right_bound); @@ -1412,8 +1415,8 @@ TEST(attributes, cum_sum_op_default_attributes) FactoryRegistry::get().register_factory(); Shape shape{1, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i32, Shape{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axis = make_shared(element::Type_t::i32, Shape{1}); auto cs = make_shared(A, axis); NodeBuilder builder(cs); @@ -1428,8 +1431,8 @@ TEST(attributes, cum_sum_op_custom_attributes) FactoryRegistry::get().register_factory(); Shape shape{1, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i32, Shape{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axis = make_shared(element::Type_t::i32, Shape{1}); bool exclusive = true; bool reverse = true; auto cs = make_shared(A, axis, exclusive, reverse); @@ -1444,8 +1447,8 @@ TEST(attributes, cum_sum_op_custom_attributes) TEST(attributes, interpolate_op) { FactoryRegistry::get().register_factory(); - auto img = make_shared(element::f32, Shape{1, 3, 32, 32}); - auto out_shape = make_shared(element::i32, Shape{2}); + auto img = make_shared(element::Type_t::f32, Shape{1, 3, 32, 32}); + auto out_shape = make_shared(element::Type_t::i32, Shape{2}); op::v0::InterpolateAttrs interp_atrs; interp_atrs.axes = AxisSet{1, 2}; @@ -1473,11 +1476,11 @@ TEST(attributes, interpolate_op) TEST(attributes, detection_output_op) { FactoryRegistry::get().register_factory(); - const auto box_logits = make_shared(element::f32, Shape{1, 3, 32, 32}); - const auto class_preds = make_shared(element::f32, Shape{32}); - const auto proposals = make_shared(element::f32, Shape{128, 2}); - const auto aux_class_preds = make_shared(element::f32, Shape{16}); - const auto aux_box_pred = make_shared(element::f32, Shape{32, 2}); + const auto box_logits = make_shared(element::Type_t::f32, Shape{1, 3, 32, 32}); + const auto class_preds = make_shared(element::Type_t::f32, Shape{32}); + const auto proposals = make_shared(element::Type_t::f32, Shape{128, 2}); + const auto aux_class_preds = make_shared(element::Type_t::f32, Shape{16}); + const auto aux_box_pred = make_shared(element::Type_t::f32, Shape{32, 2}); op::DetectionOutputAttrs attrs; attrs.num_classes = 32; @@ -1526,8 +1529,8 @@ TEST(attributes, detection_output_op) TEST(attributes, prior_box_op) { FactoryRegistry::get().register_factory(); - const auto layer_shape = make_shared(element::i64, Shape{128, 128}); - const auto image_shape = make_shared(element::i64, Shape{32, 32}); + const auto layer_shape = make_shared(element::Type_t::i64, Shape{128, 128}); + const auto image_shape = make_shared(element::Type_t::i64, Shape{32, 32}); op::PriorBoxAttrs attrs; attrs.min_size = vector{16.f, 32.f}; @@ -1567,8 +1570,8 @@ TEST(attributes, prior_box_op) TEST(attributes, prior_box_clustered_op) { FactoryRegistry::get().register_factory(); - const auto layer_shape = make_shared(element::i64, Shape{128, 128}); - const auto image_shape = make_shared(element::i64, Shape{32, 32}); + const auto layer_shape = make_shared(element::Type_t::i64, Shape{128, 128}); + const auto image_shape = make_shared(element::Type_t::i64, Shape{32, 32}); op::PriorBoxClusteredAttrs attrs; attrs.widths = vector{128.f, 512.f, 4096.f}; @@ -1598,9 +1601,11 @@ TEST(attributes, prior_box_clustered_op) TEST(attributes, proposal_op) { FactoryRegistry::get().register_factory(); - const auto class_probs = make_shared(element::i64, Shape{1024, 3, 128, 128}); - const auto class_logits = make_shared(element::i64, Shape{1024, 3, 128, 128}); - const auto image_shape = make_shared(element::i64, Shape{4}); + const auto class_probs = + make_shared(element::Type_t::i64, Shape{1024, 3, 128, 128}); + const auto class_logits = + make_shared(element::Type_t::i64, Shape{1024, 3, 128, 128}); + const auto image_shape = make_shared(element::Type_t::i64, Shape{4}); op::ProposalAttrs attrs; attrs.base_size = 224; diff --git a/ngraph/test/backend/abc.in.cpp b/ngraph/test/backend/abc.in.cpp index 4457ebc647b..8ce73fe72a9 100644 --- a/ngraph/test/backend/abc.in.cpp +++ b/ngraph/test/backend/abc.in.cpp @@ -31,9 +31,9 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, abc) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto f = make_shared((A + B) * C, ParameterVector{A, B, C}); std::vector a{1, 2, 3, 4}; @@ -62,9 +62,9 @@ NGRAPH_TEST(${BACKEND_NAME}, abc) NGRAPH_TEST(${BACKEND_NAME}, abc_int64) { Shape shape{2, 2}; - auto A = make_shared(element::i64, shape); - auto B = make_shared(element::i64, shape); - auto C = make_shared(element::i64, shape); + auto A = make_shared(element::Type_t::i64, shape); + auto B = make_shared(element::Type_t::i64, shape); + auto C = make_shared(element::Type_t::i64, shape); auto f = make_shared((A + B) * C, ParameterVector{A, B, C}); std::vector a{1, 2, 3, 4}; diff --git a/ngraph/test/backend/abs.in.cpp b/ngraph/test/backend/abs.in.cpp index 9c2d62c090f..1ab328f996b 100644 --- a/ngraph/test/backend/abs.in.cpp +++ b/ngraph/test/backend/abs.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, abs) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/acos.in.cpp b/ngraph/test/backend/acos.in.cpp index 893322c3d72..530ce69b7ff 100644 --- a/ngraph/test/backend/acos.in.cpp +++ b/ngraph/test/backend/acos.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, acos) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/acosh.in.cpp b/ngraph/test/backend/acosh.in.cpp index bcaf7b23aa6..1bfb63fc4d1 100644 --- a/ngraph/test/backend/acosh.in.cpp +++ b/ngraph/test/backend/acosh.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, acosh) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); vector input{0.f, 1.f, -1.f, 2.f, -2.f, 3.f, -3.f, 4.f, 5.f, 10.f, 100.f}; diff --git a/ngraph/test/backend/add.in.cpp b/ngraph/test/backend/add.in.cpp index 93e9f0b5916..e069038c609 100644 --- a/ngraph/test/backend/add.in.cpp +++ b/ngraph/test/backend/add.in.cpp @@ -48,8 +48,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, add) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); vector a{1, 2, 3, 4}; @@ -64,8 +64,8 @@ NGRAPH_TEST(${BACKEND_NAME}, add) NGRAPH_TEST(${BACKEND_NAME}, add_overload) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(A + B, ParameterVector{A, B}); vector a{1, 2, 3, 4}; @@ -80,8 +80,8 @@ NGRAPH_TEST(${BACKEND_NAME}, add_overload) NGRAPH_TEST(${BACKEND_NAME}, add_in_place) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto T = A + B; auto T2 = T + T; auto T3 = T2 + T2; diff --git a/ngraph/test/backend/aliased_output.in.cpp b/ngraph/test/backend/aliased_output.in.cpp index 8409779339e..42baf1aef64 100644 --- a/ngraph/test/backend/aliased_output.in.cpp +++ b/ngraph/test/backend/aliased_output.in.cpp @@ -31,11 +31,11 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, aliased_output) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto C = A + B; auto D = A * B; - auto E = op::Constant::create(element::f32, shape, {1, 2, 3, 4}); + auto E = op::Constant::create(element::Type_t::f32, shape, {1, 2, 3, 4}); auto f = make_shared(NodeVector{C, C, D, D, C, E, E}, ParameterVector{A, B}); vector a{0, 1, 2, 3}; diff --git a/ngraph/test/backend/api.in.cpp b/ngraph/test/backend/api.in.cpp index 295ff6dfe7f..fae7559f737 100644 --- a/ngraph/test/backend/api.in.cpp +++ b/ngraph/test/backend/api.in.cpp @@ -35,8 +35,8 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, create_tensor_1) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -44,12 +44,12 @@ NGRAPH_TEST(${BACKEND_NAME}, create_tensor_1) // Create some tensors for input/output vector av = {1, 2, 3, 4}; vector bv = {5, 6, 7, 8}; - shared_ptr a = backend->create_tensor(element::f32, shape); - shared_ptr b = backend->create_tensor(element::f32, shape); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, av); copy_data(b, bv); - shared_ptr result = backend->create_tensor(element::f32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -60,18 +60,18 @@ NGRAPH_TEST(${BACKEND_NAME}, create_tensor_1) NGRAPH_TEST(${BACKEND_NAME}, get_parameters_and_results) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto f = make_shared((A + B) * C, ParameterVector{A, B, C}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape); - shared_ptr b = backend->create_tensor(element::f32, shape); - shared_ptr c = backend->create_tensor(element::f32, shape); - shared_ptr result = backend->create_tensor(element::f32, shape); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape); + shared_ptr c = backend->create_tensor(element::Type_t::f32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{1, 2}, {3, 4}}).get_vector()); copy_data(b, test::NDArray({{5, 6}, {7, 8}}).get_vector()); diff --git a/ngraph/test/backend/asin.in.cpp b/ngraph/test/backend/asin.in.cpp index 5b6084e3040..95ecbcc2668 100644 --- a/ngraph/test/backend/asin.in.cpp +++ b/ngraph/test/backend/asin.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, asin) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/asinh.in.cpp b/ngraph/test/backend/asinh.in.cpp index 6dd0abe9568..b716fce2874 100644 --- a/ngraph/test/backend/asinh.in.cpp +++ b/ngraph/test/backend/asinh.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, asinh) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); vector input{0.f, 1.f, -1.f, 2.f, -2.f, 3.f, -3.f, 4.f, 5.f, 10.f, 100.f}; diff --git a/ngraph/test/backend/atan.in.cpp b/ngraph/test/backend/atan.in.cpp index e2f0c04b27f..adb9bd107dc 100644 --- a/ngraph/test/backend/atan.in.cpp +++ b/ngraph/test/backend/atan.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, atan) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/atanh.in.cpp b/ngraph/test/backend/atanh.in.cpp index ce7b5a82b64..99e6ab8ce25 100644 --- a/ngraph/test/backend/atanh.in.cpp +++ b/ngraph/test/backend/atanh.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, atanh) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); vector input{0.f, 1.f, -1.f, 2.f, -2.f, 3.f, -3.f, 4.f, 5.f, 10.f, 100.f}; diff --git a/ngraph/test/backend/auto_broadcast.in.cpp b/ngraph/test/backend/auto_broadcast.in.cpp index 928218ccbf9..723dd467dcd 100644 --- a/ngraph/test/backend/auto_broadcast.in.cpp +++ b/ngraph/test/backend/auto_broadcast.in.cpp @@ -71,11 +71,11 @@ void check_auto_bcast( if (std::is_same::value) { - iet = element::boolean; + iet = element::Type_t::boolean; } if (std::is_same::value) { - oet = element::boolean; + oet = element::Type_t::boolean; } auto A = make_shared(iet, Shape{2, 3}); auto B = make_shared(iet, Shape{3}); @@ -110,17 +110,17 @@ NGRAPH_TEST(${BACKEND_NAME}, auto_bcast_binary_elementwise_pdpd_dynamic) { auto pshape_a = PartialShape::dynamic(); auto pshape_b = PartialShape::dynamic(); - auto a = make_shared(element::f32, pshape_a); - auto b = make_shared(element::f32, pshape_b); + auto a = make_shared(element::Type_t::f32, pshape_a); + auto b = make_shared(element::Type_t::f32, pshape_b); op::AutoBroadcastSpec autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, -1); auto f = make_shared(make_shared(a, b, autob), ParameterVector{a, b}); auto backend = runtime::Backend::create("${BACKEND_NAME}", true); auto ex = backend->compile(f); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); - auto t_a = backend->create_tensor(element::f32, Shape{2, 3}); - auto t_b = backend->create_tensor(element::f32, Shape{3}); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); + auto t_a = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); + auto t_b = backend->create_tensor(element::Type_t::f32, Shape{3}); copy_data(t_a, vector{1, 2, 3, 4, 5, 6}); copy_data(t_b, vector{5, 6, 7}); ex->call_with_validate({t_r}, {t_a, t_b}); @@ -134,18 +134,18 @@ NGRAPH_TEST(${BACKEND_NAME}, auto_bcast_binary_elementwise_pdpd_dynamic) autob = op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1); f = make_shared(make_shared(a, b, autob), ParameterVector{a, b}); ex = backend->compile(f); - t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); - t_a = backend->create_tensor(element::f32, Shape{2, 3, 4, 5}); - t_b = backend->create_tensor(element::f32, Shape{3, 4}); + t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); + t_a = backend->create_tensor(element::Type_t::f32, Shape{2, 3, 4, 5}); + t_b = backend->create_tensor(element::Type_t::f32, Shape{3, 4}); copy_data(t_a, vector(2 * 3 * 4 * 5, 1)); copy_data(t_b, vector(3 * 4, 1)); ex->call_with_validate({t_r}, {t_a, t_b}); ASSERT_EQ(t_r->get_shape(), (Shape{2, 3, 4, 5})); // a shape {2, 3, 4, 5}, b shape {3, 1} axis = 1 - t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); - t_a = backend->create_tensor(element::f32, Shape{2, 3, 4, 5}); - t_b = backend->create_tensor(element::f32, Shape{3, 1}); + t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); + t_a = backend->create_tensor(element::Type_t::f32, Shape{2, 3, 4, 5}); + t_b = backend->create_tensor(element::Type_t::f32, Shape{3, 1}); copy_data(t_a, vector(2 * 3 * 4 * 5, 1)); copy_data(t_b, vector(3, 1)); ex->call_with_validate({t_r}, {t_a, t_b}); @@ -154,8 +154,8 @@ NGRAPH_TEST(${BACKEND_NAME}, auto_bcast_binary_elementwise_pdpd_dynamic) NGRAPH_TEST(${BACKEND_NAME}, auto_bcast_string_cast) { - auto a = make_shared(element::f32, Shape{1}); - auto b = make_shared(element::f32, Shape{1}); + auto a = make_shared(element::Type_t::f32, Shape{1}); + auto b = make_shared(element::Type_t::f32, Shape{1}); auto add = make_shared(a, b, "NUMPY"); ASSERT_EQ(add->get_autob(), op::AutoBroadcastType::NUMPY); diff --git a/ngraph/test/backend/batch_norm.in.cpp b/ngraph/test/backend/batch_norm.in.cpp index d4b501c8c9d..ee81eb2c48c 100644 --- a/ngraph/test/backend/batch_norm.in.cpp +++ b/ngraph/test/backend/batch_norm.in.cpp @@ -161,7 +161,7 @@ public: NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_0eps_f64) { using T = double; - auto& et = element::f64; + element::Type et = element::Type_t::f64; auto backend = runtime::Backend::create("${BACKEND_NAME}"); BatchNormInferenceTesterZeroEpsilon bnt(backend, et); EXPECT_TRUE(bnt.test_gamma()) << "Gamma test"; @@ -173,7 +173,7 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_0eps_f64) NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_0eps_f32) { using T = float; - auto& et = element::f32; + element::Type et = element::Type_t::f32; auto backend = runtime::Backend::create("${BACKEND_NAME}"); BatchNormInferenceTesterZeroEpsilon bnt(backend, et); EXPECT_TRUE(bnt.test_gamma()) << "Gamma test"; @@ -255,7 +255,7 @@ public: NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_f64) { using T = double; - auto& et = element::f64; + element::Type et = element::Type_t::f64; auto backend = runtime::Backend::create("${BACKEND_NAME}"); BatchNormInferenceTesterNonZeroEpsilon bnt(backend, et); EXPECT_TRUE(bnt.test_gamma()) << "Gamma test"; @@ -267,7 +267,7 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_f64) NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_f32) { using T = float; - auto& et = element::f32; + element::Type et = element::Type_t::f32; auto backend = runtime::Backend::create("${BACKEND_NAME}"); BatchNormInferenceTesterNonZeroEpsilon bnt(backend, et); EXPECT_TRUE(bnt.test_gamma()) << "Gamma test"; @@ -279,10 +279,10 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_f32) NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication) { auto input_shape = Shape{2, 2, 2, 1}; - auto input = make_shared(element::f32, input_shape); + auto input = make_shared(element::Type_t::f32, input_shape); auto mvgb_shape = Shape{2}; - auto mvgb = make_shared(element::f32, mvgb_shape); + auto mvgb = make_shared(element::Type_t::f32, mvgb_shape); double eps = 0.001; auto shape_r = Shape{2, 2, 2, 1}; @@ -291,7 +291,7 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication) auto f = make_shared(bn, ParameterVector{input, mvgb, mvgb, mvgb, mvgb}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto _input = backend->create_tensor(element::f32, input_shape); + auto _input = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(_input, vector{0.54881352f, 0.71518934f, @@ -302,9 +302,9 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication) 0.4375872f, 0.89177299f}); - auto _mvgb = backend->create_tensor(element::f32, mvgb_shape); + auto _mvgb = backend->create_tensor(element::Type_t::f32, mvgb_shape); copy_data(_mvgb, vector{1.0f, 1.0f}); - auto bn_output = backend->create_tensor(element::f32, shape_r); + auto bn_output = backend->create_tensor(element::Type_t::f32, shape_r); vector expected_result{0.54903894f, 0.71533161f, @@ -324,10 +324,10 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication) NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication_v5) { auto input_shape = Shape{2, 2, 2, 1}; - auto input = make_shared(element::f32, input_shape); + auto input = make_shared(element::Type_t::f32, input_shape); auto mvgb_shape = Shape{2}; - auto mvgb = make_shared(element::f32, mvgb_shape); + auto mvgb = make_shared(element::Type_t::f32, mvgb_shape); double eps = 0.001; auto shape_r = Shape{2, 2, 2, 1}; @@ -336,7 +336,7 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication_v5) auto f = make_shared(bn, ParameterVector{input, mvgb, mvgb, mvgb, mvgb}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto _input = backend->create_tensor(element::f32, input_shape); + auto _input = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(_input, vector{0.54881352f, 0.71518934f, @@ -347,9 +347,9 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication_v5) 0.4375872f, 0.89177299f}); - auto _mvgb = backend->create_tensor(element::f32, mvgb_shape); + auto _mvgb = backend->create_tensor(element::Type_t::f32, mvgb_shape); copy_data(_mvgb, vector{1.0f, 1.0f}); - auto bn_output = backend->create_tensor(element::f32, shape_r); + auto bn_output = backend->create_tensor(element::Type_t::f32, shape_r); vector expected_result{0.54903894f, 0.71533161f, @@ -369,15 +369,15 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_inference_parameters_duplication_v5) NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1) { auto input_shape = Shape{2, 2, 2, 1}; - auto input = make_shared(element::f32, input_shape); + auto input = make_shared(element::Type_t::f32, input_shape); auto gamma_shape = Shape{2}; - auto gamma = make_shared(element::f32, gamma_shape); + auto gamma = make_shared(element::Type_t::f32, gamma_shape); auto beta_shape = Shape{2}; - auto beta = make_shared(element::f32, beta_shape); + auto beta = make_shared(element::Type_t::f32, beta_shape); auto mean_shape = Shape{2}; - auto mean = make_shared(element::f32, mean_shape); + auto mean = make_shared(element::Type_t::f32, mean_shape); auto var_shape = Shape{2}; - auto var = make_shared(element::f32, var_shape); + auto var = make_shared(element::Type_t::f32, var_shape); double eps = 0.001; auto shape_r = Shape{2, 2, 2, 1}; auto bn = make_shared(input, gamma, beta, mean, var, eps); @@ -385,7 +385,7 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1) auto f = make_shared(bn, ParameterVector{input, gamma, beta, mean, var}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto _input = backend->create_tensor(element::f32, input_shape); + auto _input = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(_input, vector{0.54881352f, 0.71518934f, @@ -396,15 +396,15 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1) 0.4375872f, 0.89177299f}); - auto _gamma = backend->create_tensor(element::f32, gamma_shape); + auto _gamma = backend->create_tensor(element::Type_t::f32, gamma_shape); copy_data(_gamma, vector{1.0f, 1.0f}); - auto _beta = backend->create_tensor(element::f32, beta_shape); + auto _beta = backend->create_tensor(element::Type_t::f32, beta_shape); copy_data(_beta, vector{0.0f, 0.0f}); - auto _mean = backend->create_tensor(element::f32, mean_shape); + auto _mean = backend->create_tensor(element::Type_t::f32, mean_shape); copy_data(_mean, vector{0.583388f, 0.619252f}); - auto _var = backend->create_tensor(element::f32, var_shape); + auto _var = backend->create_tensor(element::Type_t::f32, var_shape); copy_data(_var, vector{0.0119972f, 0.0282681f}); - auto bn_output = backend->create_tensor(element::f32, shape_r); + auto bn_output = backend->create_tensor(element::Type_t::f32, shape_r); vector expected_result{ -0.30327f, 1.1561f, -0.0963782f, -0.434702f, -1.4011f, 0.548275f, -1.06187f, 1.59295f}; @@ -418,15 +418,15 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1) NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1_v5) { auto input_shape = Shape{2, 2, 2, 1}; - auto input = make_shared(element::f32, input_shape); + auto input = make_shared(element::Type_t::f32, input_shape); auto gamma_shape = Shape{2}; - auto gamma = make_shared(element::f32, gamma_shape); + auto gamma = make_shared(element::Type_t::f32, gamma_shape); auto beta_shape = Shape{2}; - auto beta = make_shared(element::f32, beta_shape); + auto beta = make_shared(element::Type_t::f32, beta_shape); auto mean_shape = Shape{2}; - auto mean = make_shared(element::f32, mean_shape); + auto mean = make_shared(element::Type_t::f32, mean_shape); auto var_shape = Shape{2}; - auto var = make_shared(element::f32, var_shape); + auto var = make_shared(element::Type_t::f32, var_shape); double eps = 0.001; auto shape_r = Shape{2, 2, 2, 1}; auto bn = make_shared(input, gamma, beta, mean, var, eps); @@ -434,7 +434,7 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1_v5) auto f = make_shared(bn, ParameterVector{input, gamma, beta, mean, var}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto _input = backend->create_tensor(element::f32, input_shape); + auto _input = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(_input, vector{0.54881352f, 0.71518934f, @@ -445,15 +445,15 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_fprop_inference_b2c2h2w1_v5) 0.4375872f, 0.89177299f}); - auto _gamma = backend->create_tensor(element::f32, gamma_shape); + auto _gamma = backend->create_tensor(element::Type_t::f32, gamma_shape); copy_data(_gamma, vector{1.0f, 1.0f}); - auto _beta = backend->create_tensor(element::f32, beta_shape); + auto _beta = backend->create_tensor(element::Type_t::f32, beta_shape); copy_data(_beta, vector{0.0f, 0.0f}); - auto _mean = backend->create_tensor(element::f32, mean_shape); + auto _mean = backend->create_tensor(element::Type_t::f32, mean_shape); copy_data(_mean, vector{0.583388f, 0.619252f}); - auto _var = backend->create_tensor(element::f32, var_shape); + auto _var = backend->create_tensor(element::Type_t::f32, var_shape); copy_data(_var, vector{0.0119972f, 0.0282681f}); - auto bn_output = backend->create_tensor(element::f32, shape_r); + auto bn_output = backend->create_tensor(element::Type_t::f32, shape_r); vector expected_result{ -0.30327f, 1.1561f, -0.0963782f, -0.434702f, -1.4011f, 0.548275f, -1.06187f, 1.59295f}; diff --git a/ngraph/test/backend/broadcast.in.cpp b/ngraph/test/backend/broadcast.in.cpp index 25b5ac6976b..6c203a8adb9 100644 --- a/ngraph/test/backend/broadcast.in.cpp +++ b/ngraph/test/backend/broadcast.in.cpp @@ -43,19 +43,19 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_vector) { Shape shape_a{}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{4}; auto f = make_shared( make_shared( - A, op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r)), + A, op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r)), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{6}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -66,19 +66,19 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_vector) NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_matrix) { Shape shape_a{}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2}; auto f = make_shared( make_shared( - A, op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r)), + A, op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r)), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{6}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -89,19 +89,19 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_matrix) NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_tensor) { Shape shape_a{}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2, 2}; auto f = make_shared( make_shared( - A, op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r)), + A, op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r)), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{6}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -113,18 +113,18 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_tensor) NGRAPH_TEST(${BACKEND_NAME}, broadcast_trivial) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared( - A, op::Constant::create(element::u64, Shape{shape.size()}, shape)), + A, op::Constant::create(element::Type_t::u64, Shape{shape.size()}, shape)), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, 6, 8, 16, 32, 64, 128}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -136,21 +136,21 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_trivial) NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_colwise) { Shape shape_a{3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{3, 4}; auto f = make_shared( make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{1}, {0})), + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{1}, {0})), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -162,21 +162,21 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_colwise) NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_rowwise) { Shape shape_a{4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{3, 4}; auto f = make_shared( make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{1}, {1})), + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{1}, {1})), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -189,22 +189,24 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_rowwise) NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_rowwise_reversed) { Shape shape_a{4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{3, 4}; auto broadcast = make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{1}, {1})); - auto reverse = make_shared( - broadcast, op::Constant::create(element::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX); + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{1}, {1})); + auto reverse = + make_shared(broadcast, + op::Constant::create(element::Type_t::i64, {1}, {1}), + op::v1::Reverse::Mode::INDEX); auto f = make_shared(reverse, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -216,21 +218,21 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_rowwise_reversed) NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_rowwise_int64) { Shape shape_a{4}; - auto A = make_shared(element::i64, shape_a); + auto A = make_shared(element::Type_t::i64, shape_a); Shape shape_r{3, 4}; auto f = make_shared( make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{1}, {1})), + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{1}, {1})), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i64, shape_a); + auto a = backend->create_tensor(element::Type_t::i64, shape_a); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i64, shape_r); + auto result = backend->create_tensor(element::Type_t::i64, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -240,21 +242,21 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_vector_rowwise_int64) NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_to_matrix_int64) { Shape shape_a{1}; - auto A = make_shared(element::i64, shape_a); + auto A = make_shared(element::Type_t::i64, shape_a); Shape shape_r{3, 1}; auto f = make_shared( make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{1}, {1})), + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{1}, {1})), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i64, shape_a); + auto a = backend->create_tensor(element::Type_t::i64, shape_a); copy_data(a, vector{4}); - auto result = backend->create_tensor(element::i64, shape_r); + auto result = backend->create_tensor(element::Type_t::i64, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -264,21 +266,21 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_to_matrix_int64) NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_to_matrix_int32) { Shape shape_a{1}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{3, 1}; auto f = make_shared( make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{1}, {1})), + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{1}, {1})), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{4}); - auto result = backend->create_tensor(element::i32, shape_r); + auto result = backend->create_tensor(element::Type_t::i32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -287,15 +289,16 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_scalar_to_matrix_int32) static void broadcast_test_helper(const Shape& shape_a, const Shape& shape_r, const AxisSet& axes) { - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); vector inp_data(shape_size(shape_a)); iota(inp_data.begin(), inp_data.end(), 1.f); - auto shape_const = op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r); + auto shape_const = op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r); std::shared_ptr broadcast; if (axes.size() > 0) { - auto axes_const = op::Constant::create(element::i64, Shape{axes.size()}, axes.to_vector()); + auto axes_const = + op::Constant::create(element::Type_t::i64, Shape{axes.size()}, axes.to_vector()); broadcast = make_shared(A, shape_const, axes_const); } else @@ -307,14 +310,14 @@ static void broadcast_test_helper(const Shape& shape_a, const Shape& shape_r, co auto ref_backend = runtime::Backend::create("INTERPRETER"); auto wrk_backend = runtime::Backend::create("${BACKEND_NAME}"); - auto wrk_a = wrk_backend->create_tensor(element::f32, shape_a); + auto wrk_a = wrk_backend->create_tensor(element::Type_t::f32, shape_a); copy_data(wrk_a, inp_data); - auto ref_a = ref_backend->create_tensor(element::f32, shape_a); + auto ref_a = ref_backend->create_tensor(element::Type_t::f32, shape_a); copy_data(ref_a, inp_data); - auto wrk_result = wrk_backend->create_tensor(element::f32, shape_r); - auto ref_result = ref_backend->create_tensor(element::f32, shape_r); + auto wrk_result = wrk_backend->create_tensor(element::Type_t::f32, shape_r); + auto ref_result = ref_backend->create_tensor(element::Type_t::f32, shape_r); auto wrk_handle = wrk_backend->compile(f); auto ref_handle = ref_backend->compile(f); @@ -446,19 +449,19 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_algo_3d_stride_2) NGRAPH_TEST(${BACKEND_NAME}, broadcast_matrix_0) { Shape shape_a{2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2, 2}; auto f = make_shared( make_shared( - A, op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r)), + A, op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r)), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -470,21 +473,21 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_matrix_0) NGRAPH_TEST(${BACKEND_NAME}, broadcast_matrix_1) { Shape shape_a{2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2, 2}; auto f = make_shared( make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{2}, {0, 2})), + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{2}, {0, 2})), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -496,21 +499,21 @@ NGRAPH_TEST(${BACKEND_NAME}, broadcast_matrix_1) NGRAPH_TEST(${BACKEND_NAME}, broadcast_matrix_2) { Shape shape_a{2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2, 2}; auto f = make_shared( make_shared( A, - op::Constant::create(element::u64, Shape{shape_r.size()}, shape_r), - op::Constant::create(element::i64, Shape{2}, {0, 1})), + op::Constant::create(element::Type_t::u64, Shape{shape_r.size()}, shape_r), + op::Constant::create(element::Type_t::i64, Shape{2}, {0, 1})), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/builder_reduce_ops_opset1.in.cpp b/ngraph/test/backend/builder_reduce_ops_opset1.in.cpp index 1a322bb09bf..d5128334ef2 100644 --- a/ngraph/test/backend/builder_reduce_ops_opset1.in.cpp +++ b/ngraph/test/backend/builder_reduce_ops_opset1.in.cpp @@ -36,7 +36,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_mean) { const Shape input_shape{4, 3, 2}; const AxisSet axes{1, 2}; - const auto input = make_shared(element::f32, input_shape); + const auto input = make_shared(element::Type_t::f32, input_shape); const auto mean_builder = builder::opset1::mean(input, axes); auto function = make_shared(mean_builder, ParameterVector{input}); @@ -53,7 +53,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_mean_dynamic) { const Shape input_shape{2, 4, 5}; const AxisSet axes{0, 1}; - const auto input = make_shared(element::f32, input_shape); + const auto input = make_shared(element::Type_t::f32, input_shape); const auto mean_builder = builder::opset1::mean(input, axes); auto function = make_shared(mean_builder, ParameterVector{input}); @@ -71,7 +71,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_mean_dynamic_2) { const Shape input_shape{2, 1, 3}; const AxisSet axes{1, 2}; - const auto input = make_shared(element::f32, input_shape); + const auto input = make_shared(element::Type_t::f32, input_shape); const auto mean_builder = builder::opset1::mean(input, axes); auto function = make_shared(mean_builder, ParameterVector{input}); @@ -91,7 +91,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_collapse_5d_to_3d) const auto elems_in_tensor = shape_size(shape_input); - const auto A = make_shared(element::f32, shape_input); + const auto A = make_shared(element::Type_t::f32, shape_input); const auto builder_collapse = builder::opset1::collapse(A, 1, shape_input.size() - 2); const auto f = make_shared(builder_collapse, ParameterVector{A}); @@ -112,7 +112,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_collapse_all_dims) const auto elems_in_tensor = shape_size(shape_input); - const auto A = make_shared(element::f32, shape_input); + const auto A = make_shared(element::Type_t::f32, shape_input); const auto builder_collapse = builder::opset1::collapse(A, 0, shape_input.size() - 1); const auto f = make_shared(builder_collapse, ParameterVector{A}); @@ -132,7 +132,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_collapse_none) const auto elems_in_tensor = shape_size(shape_input); - const auto A = make_shared(element::f32, shape_input); + const auto A = make_shared(element::Type_t::f32, shape_input); const auto builder_collapse = builder::opset1::collapse(A, 2, shape_input.size() - 4); const auto f = make_shared(builder_collapse, ParameterVector{A}); @@ -151,7 +151,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_opset1_collapse_dyn_shape) PartialShape pshape_input{1, 2, 3, 4, 5, Dimension()}; PartialShape pshape_output{1, 24, 5, Dimension()}; - const auto A = make_shared(element::f32, pshape_input); + const auto A = make_shared(element::Type_t::f32, pshape_input); EXPECT_TRUE(A->get_output_partial_shape(0).same_scheme( PartialShape{1, 2, 3, 4, 5, Dimension::dynamic()})); const auto builder_collapse = builder::opset1::collapse(A, 1, 3); diff --git a/ngraph/test/backend/ceiling.in.cpp b/ngraph/test/backend/ceiling.in.cpp index e237bfa9c29..ca97bd85eb7 100644 --- a/ngraph/test/backend/ceiling.in.cpp +++ b/ngraph/test/backend/ceiling.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, ceiling) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); @@ -59,7 +59,7 @@ NGRAPH_TEST(${BACKEND_NAME}, ceiling_int64) { // This tests large numbers that will not fit in a double Shape shape{3}; - auto A = make_shared(element::i64, shape); + auto A = make_shared(element::Type_t::i64, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); vector expected{0, 1, 0x4000000000000001}; diff --git a/ngraph/test/backend/comparison.in.cpp b/ngraph/test/backend/comparison.in.cpp index 0f9651e3c1e..98a078a1048 100644 --- a/ngraph/test/backend/comparison.in.cpp +++ b/ngraph/test/backend/comparison.in.cpp @@ -43,18 +43,18 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, equal) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 8, -8, 17, -0.5, 0, 1, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 8, 4, 8, 0, 0, 1, 1.5}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -64,18 +64,18 @@ NGRAPH_TEST(${BACKEND_NAME}, equal) NGRAPH_TEST(${BACKEND_NAME}, notequal) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 8, -8, 17, -0.5, 0, 1, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 8, 4, 8, 0, 0, 1, 1.5}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -85,18 +85,18 @@ NGRAPH_TEST(${BACKEND_NAME}, notequal) NGRAPH_TEST(${BACKEND_NAME}, greater) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 8, -8, 17, -0.5, 0.5, 2, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 4, 8, 0, 0, 1, 1.5}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -106,18 +106,18 @@ NGRAPH_TEST(${BACKEND_NAME}, greater) NGRAPH_TEST(${BACKEND_NAME}, greater_int64) { Shape shape{2, 2, 2}; - auto A = make_shared(element::i64, shape); - auto B = make_shared(element::i64, shape); + auto A = make_shared(element::Type_t::i64, shape); + auto B = make_shared(element::Type_t::i64, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i64, shape); + auto a = backend->create_tensor(element::Type_t::i64, shape); copy_data(a, vector{0x4000000000000002, 0x4000000000000006, -8, 17, -5, 5, 2, 1}); - auto b = backend->create_tensor(element::i64, shape); + auto b = backend->create_tensor(element::Type_t::i64, shape); copy_data(b, vector{0x4000000000000001, 0x4000000000000002, 4, 8, 0, 0, 1, 2}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -127,18 +127,18 @@ NGRAPH_TEST(${BACKEND_NAME}, greater_int64) NGRAPH_TEST(${BACKEND_NAME}, greatereq) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 8, -8, 17, -0.5, 0, 2, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, -8, 8, 0, 0, 0.5, 1.5}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -148,18 +148,18 @@ NGRAPH_TEST(${BACKEND_NAME}, greatereq) NGRAPH_TEST(${BACKEND_NAME}, less) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 8, -8, 17, -0.5, 0.5, 2, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 4, 8, 0, 0, 1, 1.5}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -169,18 +169,18 @@ NGRAPH_TEST(${BACKEND_NAME}, less) NGRAPH_TEST(${BACKEND_NAME}, lesseq) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 8, -8, 17, -0.5, 0, 2, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, -8, 8, 0, 0, 0.5, 1.5}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -190,18 +190,18 @@ NGRAPH_TEST(${BACKEND_NAME}, lesseq) NGRAPH_TEST(${BACKEND_NAME}, lesseq_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{0x40000170, 0x40000005, 0x40000005, -5}); - auto b = backend->create_tensor(element::i32, shape); + auto b = backend->create_tensor(element::Type_t::i32, shape); copy_data(b, vector{0x40000140, 0x40000001, 0x40000005, 0}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -211,18 +211,18 @@ NGRAPH_TEST(${BACKEND_NAME}, lesseq_int32) NGRAPH_TEST(${BACKEND_NAME}, lesseq_bool) { Shape shape{2, 2, 2}; - auto A = make_shared(element::boolean, shape); - auto B = make_shared(element::boolean, shape); + auto A = make_shared(element::Type_t::boolean, shape); + auto B = make_shared(element::Type_t::boolean, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); + auto a = backend->create_tensor(element::Type_t::boolean, shape); copy_data(a, vector{1, 1, 1, 1, 1, 1, 1, 1}); - auto b = backend->create_tensor(element::boolean, shape); + auto b = backend->create_tensor(element::Type_t::boolean, shape); copy_data(b, vector{0, 0, 0, 0, 0, 0, 0, 0}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. diff --git a/ngraph/test/backend/concat.in.cpp b/ngraph/test/backend/concat.in.cpp index 0dc8b899efe..db8e68275b6 100644 --- a/ngraph/test/backend/concat.in.cpp +++ b/ngraph/test/backend/concat.in.cpp @@ -34,11 +34,11 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, concat_negative_axis) { auto pshape_a = PartialShape::dynamic(); - auto A = make_shared(element::f32, pshape_a); + auto A = make_shared(element::Type_t::f32, pshape_a); auto pshape_b = PartialShape::dynamic(); - auto B = make_shared(element::f32, pshape_b); + auto B = make_shared(element::Type_t::f32, pshape_b); auto pshape_c = PartialShape::dynamic(); - auto C = make_shared(element::f32, pshape_c); + auto C = make_shared(element::Type_t::f32, pshape_c); auto pshape_r = PartialShape::dynamic(); auto f = make_shared(make_shared(NodeVector{A, B, C}, -1), ParameterVector{A, B, C}); @@ -46,13 +46,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_negative_axis) auto backend = runtime::Backend::create("${BACKEND_NAME}", true); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, Shape{2, 2}); + auto a = backend->create_tensor(element::Type_t::f32, Shape{2, 2}); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, Shape{2, 3}); + auto b = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); copy_data(b, vector{1, 2, 4, 8, 16, 32}); - auto c = backend->create_tensor(element::f32, Shape{2, 3}); + auto c = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); copy_data(c, vector{2, 3, 5, 7, 11, 13}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); ASSERT_EQ(result->get_shape(), (Shape{2, 8})); @@ -63,11 +63,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_negative_axis) NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_colwise) { Shape shape_a{2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{2, 3}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_c{2, 3}; - auto C = make_shared(element::f32, shape_c); + auto C = make_shared(element::Type_t::f32, shape_c); Shape shape_r{2, 8}; auto f = make_shared(make_shared(NodeVector{A, B, C}, 1), ParameterVector{A, B, C}); @@ -75,13 +75,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_colwise) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{1, 2, 4, 8, 16, 32}); - auto c = backend->create_tensor(element::f32, shape_c); + auto c = backend->create_tensor(element::Type_t::f32, shape_c); copy_data(c, vector{2, 3, 5, 7, 11, 13}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -94,11 +94,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_colwise) NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_rowwise) { Shape shape_a{2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{3, 2}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_c{3, 2}; - auto C = make_shared(element::f32, shape_c); + auto C = make_shared(element::Type_t::f32, shape_c); Shape shape_r{8, 2}; auto f = make_shared(make_shared(NodeVector{A, B, C}, 0), ParameterVector{A, B, C}); @@ -106,13 +106,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_rowwise) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{1, 2, 4, 8, 16, 32}); - auto c = backend->create_tensor(element::f32, shape_c); + auto c = backend->create_tensor(element::Type_t::f32, shape_c); copy_data(c, vector{2, 3, 5, 7, 11, 13}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -125,11 +125,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_rowwise) NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_int64) { Shape shape_a{2, 2}; - auto A = make_shared(element::i64, shape_a); + auto A = make_shared(element::Type_t::i64, shape_a); Shape shape_b{3, 2}; - auto B = make_shared(element::i64, shape_b); + auto B = make_shared(element::Type_t::i64, shape_b); Shape shape_c{3, 2}; - auto C = make_shared(element::i64, shape_c); + auto C = make_shared(element::Type_t::i64, shape_c); Shape shape_r{8, 2}; auto f = make_shared(make_shared(NodeVector{A, B, C}, 0), ParameterVector{A, B, C}); @@ -137,13 +137,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_matrix_int64) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i64, shape_a); + auto a = backend->create_tensor(element::Type_t::i64, shape_a); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::i64, shape_b); + auto b = backend->create_tensor(element::Type_t::i64, shape_b); copy_data(b, vector{1, 2, 4, 8, 16, 32}); - auto c = backend->create_tensor(element::i64, shape_c); + auto c = backend->create_tensor(element::Type_t::i64, shape_c); copy_data(c, vector{2, 3, 5, 7, 11, 13}); - auto result = backend->create_tensor(element::i64, shape_r); + auto result = backend->create_tensor(element::Type_t::i64, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -166,7 +166,7 @@ NGRAPH_TEST_P(${BACKEND_NAME}, concat_vector_params, concat_vector_large) ParameterVector inputs_param; for (uint32_t i = 0; i < num_inputs; i++) { - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); inputs_param.push_back(A); inputs.push_back(A); } @@ -180,12 +180,12 @@ NGRAPH_TEST_P(${BACKEND_NAME}, concat_vector_params, concat_vector_large) std::vector ref_result; for (uint32_t i = 0; i < num_inputs; i++) { - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{static_cast(i)}); ref_result.push_back(static_cast(i)); inputs_value.push_back(a); } - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, inputs_value); @@ -205,11 +205,11 @@ NGRAPH_INSTANTIATE_TEST_CASE_P(${BACKEND_NAME}, NGRAPH_TEST(${BACKEND_NAME}, concat_vector) { Shape shape_a{4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{6}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_c{2}; - auto C = make_shared(element::f32, shape_c); + auto C = make_shared(element::Type_t::f32, shape_c); Shape shape_r{12}; auto f = make_shared(make_shared(NodeVector{A, B, C}, 0), ParameterVector{A, B, C}); @@ -217,13 +217,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_vector) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{1, 2, 4, 8, 16, 32}); - auto c = backend->create_tensor(element::f32, shape_c); + auto c = backend->create_tensor(element::Type_t::f32, shape_c); copy_data(c, vector{18, 19}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -235,9 +235,9 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_vector) NGRAPH_TEST(${BACKEND_NAME}, concat_4d_tensor) { Shape shape{1, 1, 1, 1}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); Shape shape_r{3, 1, 1, 1}; auto f = make_shared(make_shared(NodeVector{A, B, C}, 0), ParameterVector{A, B, C}); @@ -245,13 +245,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_4d_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{2}); - auto c = backend->create_tensor(element::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); copy_data(c, vector{3}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -262,9 +262,9 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_4d_tensor) NGRAPH_TEST(${BACKEND_NAME}, concat_2d_tensor) { Shape shape{1, 1}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); Shape shape_r{3, 1}; auto f = make_shared(make_shared(NodeVector{A, B, C}, 0), ParameterVector{A, B, C}); @@ -272,13 +272,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_2d_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{2}); - auto c = backend->create_tensor(element::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); copy_data(c, vector{3}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -289,11 +289,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_2d_tensor) NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_2d_tensor) { Shape shape{1, 1}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto add1 = make_shared(A, B); - auto C = make_shared(element::f32, shape); - auto D = make_shared(element::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); + auto D = make_shared(element::Type_t::f32, shape); auto add2 = make_shared(C, D); auto subtract = make_shared(C, A); Shape shape_r{3, 1}; @@ -303,15 +303,15 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_2d_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{2}); - auto c = backend->create_tensor(element::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); copy_data(c, vector{3}); - auto d = backend->create_tensor(element::f32, shape); + auto d = backend->create_tensor(element::Type_t::f32, shape); copy_data(d, vector{4}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c, d}); @@ -322,11 +322,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_2d_tensor) NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_propagate_2d_tensor) { Shape shape{1, 1}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto add1 = make_shared(A, B); - auto C = make_shared(element::f32, shape); - auto D = make_shared(element::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); + auto D = make_shared(element::Type_t::f32, shape); auto add2 = make_shared(C, D); auto concat1 = make_shared(NodeVector{add1, add2}, 0); auto subtract = make_shared(C, A); @@ -337,15 +337,15 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_propagate_2d_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{2}); - auto c = backend->create_tensor(element::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); copy_data(c, vector{3}); - auto d = backend->create_tensor(element::f32, shape); + auto d = backend->create_tensor(element::Type_t::f32, shape); copy_data(d, vector{4}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c, d}); @@ -357,20 +357,20 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_tree_1) { Shape shape{1, 2, 2}; Shape shape_r{1, 4, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto add1 = make_shared(A, B); auto add2 = make_shared(A, B); auto concat = make_shared(NodeVector{add1, add2}, 1); auto f = make_shared(make_shared(concat, concat), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 1, 1, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 1, 1, 1}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); vector expected; @@ -383,8 +383,8 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_tree_2) { Shape shape{1, 2, 2}; Shape shape_r{1, 8, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto add1 = make_shared(A, B); auto add2 = make_shared(A, B); auto concat1 = make_shared(NodeVector{add1, add2}, 1); @@ -394,11 +394,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_tree_2) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 1, 1, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 1, 1, 1}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); vector expected; @@ -411,8 +411,8 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_tree_3) { Shape shape{1, 2, 2}; Shape shape_r{1, 16, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto concat1 = make_shared(NodeVector{A, B}, 1); auto concat2 = make_shared(NodeVector{A, B}, 1); auto concat3 = make_shared(NodeVector{A, B}, 1); @@ -423,11 +423,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_tree_3) auto f = make_shared(make_shared(concat14, concat14), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 1, 1, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 1, 1, 1}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); vector expected; @@ -440,8 +440,8 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_add_concat) { Shape shape{2, 2}; Shape shape_r{4, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto add1 = make_shared(A, B); auto add2 = make_shared(add1, add1); auto concat = make_shared(NodeVector{add1, add2}, 0); @@ -449,11 +449,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_add_concat) auto f = make_shared(add3, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 1, 1, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 1, 1, 1}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); vector expected = {4, 4, 4, 4, 8, 8, 8, 8}; @@ -464,8 +464,8 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_add_concat_2) { Shape shape{1, 2, 2}; Shape shape_r{1, 6, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto add1 = make_shared(A, B); auto add2 = make_shared(A, B); auto add3 = make_shared(A, B); @@ -480,11 +480,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_in_place_add_concat_2) auto f = make_shared(add6, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 1, 1, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 1, 1, 1}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); vector expected = {4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4}; @@ -556,11 +556,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_5d) } Shape shape_a{2, 3, 4, 3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{2, 3, 3, 3, 2}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_c{2, 3, 2, 3, 2}; - auto C = make_shared(element::f32, shape_c); + auto C = make_shared(element::Type_t::f32, shape_c); Shape shape_r{2, 3, 9, 3, 2}; auto r = make_shared(NodeVector{A, B, C}, 2); @@ -569,14 +569,14 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_5d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, a_data); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, b_data); - auto c = backend->create_tensor(element::f32, shape_c); + auto c = backend->create_tensor(element::Type_t::f32, shape_c); copy_data(c, c_data); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -616,9 +616,9 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_5d) NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_1d_last) { Shape shape_a{4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{0}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_r{4}; auto r = make_shared(NodeVector{A, B}, 0); @@ -630,12 +630,12 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_1d_last) vector a_data{1, 2, 3, 4}; vector b_data(0); - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, a_data); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, b_data); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -646,11 +646,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_1d_last) NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_1d_middle) { Shape shape_a{4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{0}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_c{4}; - auto C = make_shared(element::f32, shape_c); + auto C = make_shared(element::Type_t::f32, shape_c); Shape shape_r{8}; auto r = make_shared(NodeVector{A, B, C}, 0); @@ -663,14 +663,14 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_1d_middle) vector b_data(0); vector c_data{5, 6, 7, 8}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, a_data); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, b_data); - auto c = backend->create_tensor(element::f32, shape_c); + auto c = backend->create_tensor(element::Type_t::f32, shape_c); copy_data(c, c_data); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -682,13 +682,13 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_1d_middle) NGRAPH_TEST(${BACKEND_NAME}, concat_zero_zero) { Shape shape{0}; - auto constant_1 = op::Constant::create(element::f32, shape, {1}); + auto constant_1 = op::Constant::create(element::Type_t::f32, shape, {1}); auto concat_1 = make_shared(NodeVector{constant_1, constant_1}, 0); auto f = make_shared(concat_1, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); @@ -700,11 +700,11 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_zero_zero) NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_4d_middle) { Shape shape_a{2, 2, 1, 1}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{2, 2, 0, 1}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_c{2, 2, 1, 1}; - auto C = make_shared(element::f32, shape_c); + auto C = make_shared(element::Type_t::f32, shape_c); Shape shape_r{2, 2, 2, 1}; auto r = make_shared(NodeVector{A, B, C}, 2); @@ -717,14 +717,14 @@ NGRAPH_TEST(${BACKEND_NAME}, concat_zero_length_4d_middle) vector b_data(0); vector c_data{5, 6, 7, 8}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, a_data); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, b_data); - auto c = backend->create_tensor(element::f32, shape_c); + auto c = backend->create_tensor(element::Type_t::f32, shape_c); copy_data(c, c_data); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); diff --git a/ngraph/test/backend/constant.in.cpp b/ngraph/test/backend/constant.in.cpp index 813037b0d00..e5d872e50ad 100644 --- a/ngraph/test/backend/constant.in.cpp +++ b/ngraph/test/backend/constant.in.cpp @@ -34,13 +34,13 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, tensor_constant) { Shape shape{2, 2, 2}; - auto A = op::Constant::create(element::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); + auto A = op::Constant::create(element::Type_t::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto f = make_shared(A, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); @@ -52,14 +52,14 @@ NGRAPH_TEST(${BACKEND_NAME}, tensor_constant) NGRAPH_TEST(${BACKEND_NAME}, tensor_2constant) { Shape shape{2, 2, 2}; - auto A = op::Constant::create(element::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); + auto A = op::Constant::create(element::Type_t::f32, shape, {1, 2, 3, 4, 5, 6, 7, 8}); auto f = make_shared(NodeVector{A, A}, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result0 = backend->create_tensor(element::f32, shape); - auto result1 = backend->create_tensor(element::f32, shape); + auto result0 = backend->create_tensor(element::Type_t::f32, shape); + auto result1 = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result0, result1}, {}); @@ -74,13 +74,13 @@ NGRAPH_TEST(${BACKEND_NAME}, tensor_2constant) NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_with_op) { Shape shape{2, 2, 2}; - auto A = op::Constant::create(element::f32, shape, {-1, 2, 3, -4, 5, -6, -7, 8}); + auto A = op::Constant::create(element::Type_t::f32, shape, {-1, 2, 3, -4, 5, -6, -7, 8}); auto f = make_shared(make_shared(A), ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); @@ -91,29 +91,30 @@ NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_with_op) NGRAPH_TEST(${BACKEND_NAME}, constant_multi_use) { - auto A = make_shared(element::i32, Shape{}, std::vector{"388"}); + auto A = + make_shared(element::Type_t::i32, Shape{}, std::vector{"388"}); auto f = make_shared(A, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - std::shared_ptr r1 = backend->create_tensor(element::i32, Shape{}); + std::shared_ptr r1 = backend->create_tensor(element::Type_t::i32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({r1}, std::vector>{}); EXPECT_EQ(read_vector(r1), std::vector{388}); - std::shared_ptr r2 = backend->create_tensor(element::i32, Shape{}); + std::shared_ptr r2 = backend->create_tensor(element::Type_t::i32, Shape{}); handle->call_with_validate({r2}, std::vector>{}); EXPECT_EQ(read_vector(r2), std::vector{388}); } NGRAPH_TEST(${BACKEND_NAME}, scalar_constant_float32) { - auto r = op::Constant::create(element::f32, Shape{}, {4.75}); + auto r = op::Constant::create(element::Type_t::f32, Shape{}, {4.75}); auto f = make_shared(r, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result = backend->create_tensor(element::f32, Shape{}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); @@ -123,13 +124,13 @@ NGRAPH_TEST(${BACKEND_NAME}, scalar_constant_float32) NGRAPH_TEST(${BACKEND_NAME}, scalar_constant_int64) { - auto r = op::Constant::create(element::i64, Shape{}, {0x4000000000000001}); + auto r = op::Constant::create(element::Type_t::i64, Shape{}, {0x4000000000000001}); auto f = make_shared(r, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result = backend->create_tensor(element::i64, Shape{}); + auto result = backend->create_tensor(element::Type_t::i64, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); @@ -139,13 +140,13 @@ NGRAPH_TEST(${BACKEND_NAME}, scalar_constant_int64) NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_float32) { Shape shape{2, 2}; - auto r = op::Constant::create(element::f32, shape, {4.75, 4.5, -5.25, 0.0}); + auto r = op::Constant::create(element::Type_t::f32, shape, {4.75, 4.5, -5.25, 0.0}); auto f = make_shared(r, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); @@ -157,11 +158,12 @@ NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_float32) NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_int64) { Shape shape{2}; - auto r = op::Constant::create(element::i64, shape, {0x4000000000000001, 0x4000000000000002}); + auto r = + op::Constant::create(element::Type_t::i64, shape, {0x4000000000000001, 0x4000000000000002}); auto f = make_shared(r, ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result = backend->create_tensor(element::i64, shape); + auto result = backend->create_tensor(element::Type_t::i64, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); EXPECT_EQ((vector{0x4000000000000001, 0x4000000000000002}), @@ -171,18 +173,18 @@ NGRAPH_TEST(${BACKEND_NAME}, tensor_constant_int64) NGRAPH_TEST(${BACKEND_NAME}, constant_equality_bool) { Shape shape{4}; - // auto A = make_shared(element::boolean, shape); - // auto B = make_shared(element::boolean, shape); + // auto A = make_shared(element::Type_t::boolean, shape); + // auto B = make_shared(element::Type_t::boolean, shape); // auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto A = op::Constant::create(element::boolean, shape, {true, false, true, false}); - auto B = op::Constant::create(element::boolean, shape, {true, true, true, true}); + auto A = op::Constant::create(element::Type_t::boolean, shape, {true, false, true, false}); + auto B = op::Constant::create(element::Type_t::boolean, shape, {true, true, true, true}); auto f = make_shared(make_shared(A, B), ParameterVector{}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {}); diff --git a/ngraph/test/backend/convert.in.cpp b/ngraph/test/backend/convert.in.cpp index 17cc8d13ff0..0a0b780ea76 100644 --- a/ngraph/test/backend/convert.in.cpp +++ b/ngraph/test/backend/convert.in.cpp @@ -32,15 +32,16 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, convert_int32_float32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto f = make_shared(make_shared(A, element::f32), ParameterVector{A}); + auto A = make_shared(element::Type_t::i32, shape); + auto f = make_shared(make_shared(A, element::Type_t::f32), + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{281, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -50,15 +51,16 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_int32_float32) NGRAPH_TEST(${BACKEND_NAME}, convert_uint16_float32) { Shape shape{2, 2}; - auto A = make_shared(element::u16, shape); - auto f = make_shared(make_shared(A, element::f32), ParameterVector{A}); + auto A = make_shared(element::Type_t::u16, shape); + auto f = make_shared(make_shared(A, element::Type_t::f32), + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::u16, shape); + auto a = backend->create_tensor(element::Type_t::u16, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -69,9 +71,9 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_uint16_float32) NGRAPH_TEST(${BACKEND_NAME}, convert_int32_bool) { Shape shape{2, 3}; - auto A = make_shared(element::i32, shape); - auto f = - make_shared(make_shared(A, element::boolean), ParameterVector{A}); + auto A = make_shared(element::Type_t::i32, shape); + auto f = make_shared(make_shared(A, element::Type_t::boolean), + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -79,9 +81,9 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_int32_bool) int32_t max = std::numeric_limits::max(); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{0, 12, 23, 0, lowest, max}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -91,9 +93,9 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_int32_bool) NGRAPH_TEST(${BACKEND_NAME}, convert_float32_bool) { Shape shape{3, 3}; - auto A = make_shared(element::f32, shape); - auto f = - make_shared(make_shared(A, element::boolean), ParameterVector{A}); + auto A = make_shared(element::Type_t::f32, shape); + auto f = make_shared(make_shared(A, element::Type_t::boolean), + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -104,9 +106,9 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_float32_bool) float neg_inf = -std::numeric_limits::infinity(); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0.f, 1.5745f, 0.12352f, 0.f, lowest, max, min, pos_inf, neg_inf}); - auto result = backend->create_tensor(element::boolean, shape); + auto result = backend->create_tensor(element::Type_t::boolean, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -121,14 +123,14 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_float32_bf16) vector a_data = { 0.5f, 1.5f, 0.5f, 2.5f, 1.5f, 0.5f, 3.5f, 2.5f, 0.5f, 0.5f, 2.5f, 0.5f, 0.5f, 0.5f, 1.5f}; - auto A = make_shared(element::f32, shape_a); - auto convert = make_shared(A, element::bf16); + auto A = make_shared(element::Type_t::f32, shape_a); + auto convert = make_shared(A, element::Type_t::bf16); auto f = make_shared(NodeVector{convert}, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, a_data); - auto result = backend->create_tensor(element::bf16, shape_a); + auto result = backend->create_tensor(element::Type_t::bf16, shape_a); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); EXPECT_EQ((vector{ @@ -144,14 +146,14 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_bf16_float32) vector a_data = { 0.5, 1.5, 0.5, 2.5, 1.5, 0.5, 3.5, 2.5, 0.5, 0.5, 2.5, 0.5, 0.5, 0.5, 1.5}; - auto A = make_shared(element::bf16, shape_a); - auto convert = make_shared(A, element::f32); + auto A = make_shared(element::Type_t::bf16, shape_a); + auto convert = make_shared(A, element::Type_t::f32); auto f = make_shared(NodeVector{convert}, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::bf16, shape_a); + auto a = backend->create_tensor(element::Type_t::bf16, shape_a); copy_data(a, a_data); - auto result = backend->create_tensor(element::f32, shape_a); + auto result = backend->create_tensor(element::Type_t::f32, shape_a); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); EXPECT_EQ((vector{0.5f, diff --git a/ngraph/test/backend/convolution.in.cpp b/ngraph/test/backend/convolution.in.cpp index ab2b5939b79..1b4d7ef2dcf 100644 --- a/ngraph/test/backend/convolution.in.cpp +++ b/ngraph/test/backend/convolution.in.cpp @@ -34,9 +34,9 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, convolution_outlining) { Shape shape_a{1, 2, 2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{2, 2, 1, 1}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_r{1, 2, 2, 2}; auto conv1 = make_shared(A, B, @@ -57,11 +57,11 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_outlining) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{1.0f, 1.0f, 1.0f, 1.0f}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); vector expected_result{4.0f, 4.0f, 4.0f, 4.0f, 4.0f, 4.0f, 4.0f, 4.0f}; @@ -73,9 +73,9 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_outlining) NGRAPH_TEST(${BACKEND_NAME}, convolution_simple) { Shape shape_a{1, 2, 2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{2, 2, 1, 1}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_r{1, 2, 2, 2}; auto conv1 = make_shared(A, B, @@ -90,11 +90,11 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_simple) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{3.0f, 3.0f, 3.0f, 3.0f}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); vector expected_result{18.0f, 24.0f, 30.0f, 36.0f, 18.0f, 24.0f, 30.0f, 36.0f}; @@ -106,9 +106,9 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_simple) NGRAPH_TEST(${BACKEND_NAME}, convolution_simple_padding) { Shape shape_a{1, 1, 2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{1, 1, 1, 1}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_r{1, 1, 5, 5}; auto conv1 = make_shared(A, B, @@ -123,11 +123,11 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_simple_padding) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1.0f, 2.0f, 3.0f, 4.0f}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{2.0f}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); // clang-format off vector expected_result{0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 2.0f, 4.0f, 0.0f, 0.0f, @@ -145,12 +145,12 @@ NGRAPH_TEST(${BACKEND_NAME}, convolution_simple_padding) NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_data) { Shape shape_filter{6, 3, 3, 3}; - auto filters = make_shared(element::f32, PartialShape::dynamic()); + auto filters = make_shared(element::Type_t::f32, PartialShape::dynamic()); Shape shape_delta{2, 6, 3, 3}; - auto deltas = make_shared(element::f32, PartialShape::dynamic()); + auto deltas = make_shared(element::Type_t::f32, PartialShape::dynamic()); Shape shape_data_batch_shape{2, 3, 5, 5}; auto data_batch_shape = - make_shared(element::i64, PartialShape{Dimension::dynamic()}); + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic()}); auto strides = Strides{1, 1}; auto dilations = Strides{1, 1}; auto padding_begin = CoordinateDiff{0, 0}; @@ -165,7 +165,7 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_data) auto handle = backend->compile(f); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); vector filter, delta, expected_result; @@ -181,11 +181,12 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_convolution_backprop_data) vector shapes = {5, 5}; // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_delta); + auto a = backend->create_tensor(element::Type_t::f32, shape_delta); copy_data(a, delta); - auto b = backend->create_tensor(element::f32, shape_filter); + auto b = backend->create_tensor(element::Type_t::f32, shape_filter); copy_data(b, filter); - auto c = backend->create_tensor(element::i64, Shape{shapes.size()}); // dynamic data batch shape + auto c = backend->create_tensor(element::Type_t::i64, + Shape{shapes.size()}); // dynamic data batch shape copy_data(c, shapes); handle->call_with_validate({result}, {a, b, c}); EXPECT_FALSE(test::all_close_f(vector{expected_result}, read_vector(result))); diff --git a/ngraph/test/backend/cos.in.cpp b/ngraph/test/backend/cos.in.cpp index 9e29f11199f..87f9b81192b 100644 --- a/ngraph/test/backend/cos.in.cpp +++ b/ngraph/test/backend/cos.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, cos) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/cosh.in.cpp b/ngraph/test/backend/cosh.in.cpp index 461c609fb2d..126ee0e3f5f 100644 --- a/ngraph/test/backend/cosh.in.cpp +++ b/ngraph/test/backend/cosh.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, cosh) { Shape shape{6}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); vector input{1.0f, 0.0f, -0.0f, -1.0f, 5.0f, -5.0f}; diff --git a/ngraph/test/backend/ctc_greedy_decoder.in.cpp b/ngraph/test/backend/ctc_greedy_decoder.in.cpp index ae516d7ef2e..57fb6675a52 100644 --- a/ngraph/test/backend/ctc_greedy_decoder.in.cpp +++ b/ngraph/test/backend/ctc_greedy_decoder.in.cpp @@ -53,8 +53,8 @@ NGRAPH_TEST(${BACKEND_NAME}, ctc_greedy_decoder) const auto data_shape = Shape{T, N, C}; const auto masks_shape = Shape{T, N}; - auto data = make_shared(element::f32, data_shape); - auto masks = make_shared(element::f32, masks_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto masks = make_shared(element::Type_t::f32, masks_shape); auto decoder = make_shared(data, masks, false); auto function = make_shared(decoder, ParameterVector{data, masks}); auto test_case = test::TestCase(function); @@ -74,8 +74,8 @@ NGRAPH_TEST(${BACKEND_NAME}, ctc_greedy_decoder_f16) const auto data_shape = Shape{T, N, C}; const auto masks_shape = Shape{T, N}; - auto data = make_shared(element::f16, data_shape); - auto masks = make_shared(element::f16, masks_shape); + auto data = make_shared(element::Type_t::f16, data_shape); + auto masks = make_shared(element::Type_t::f16, masks_shape); auto decoder = make_shared(data, masks, false); auto function = make_shared(decoder, ParameterVector{data, masks}); auto test_case = test::TestCase(function); @@ -95,8 +95,8 @@ NGRAPH_TEST(${BACKEND_NAME}, ctc_greedy_decoder_multiple_batches) const auto data_shape = Shape{T, N, C}; const auto masks_shape = Shape{T, N}; - auto data = make_shared(element::f32, data_shape); - auto masks = make_shared(element::f32, masks_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto masks = make_shared(element::Type_t::f32, masks_shape); auto decoder = make_shared(data, masks, false); auto function = make_shared(decoder, ParameterVector{data, masks}); auto test_case = test::TestCase(function); @@ -136,8 +136,8 @@ NGRAPH_TEST(${BACKEND_NAME}, ctc_greedy_decoder_single_batch_short_sequence) const auto data_shape = Shape{T, N, C}; const auto masks_shape = Shape{T, N}; - auto data = make_shared(element::f32, data_shape); - auto masks = make_shared(element::f32, masks_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto masks = make_shared(element::Type_t::f32, masks_shape); auto decoder = make_shared(data, masks, false); auto function = make_shared(decoder, ParameterVector{data, masks}); auto test_case = test::TestCase(function); @@ -157,8 +157,8 @@ NGRAPH_TEST(${BACKEND_NAME}, ctc_greedy_decoder_merge) const auto data_shape = Shape{T, N, C}; const auto masks_shape = Shape{T, N}; - auto data = make_shared(element::f32, data_shape); - auto masks = make_shared(element::f32, masks_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto masks = make_shared(element::Type_t::f32, masks_shape); auto decoder = make_shared(data, masks, true); auto function = make_shared(decoder, ParameterVector{data, masks}); auto test_case = test::TestCase(function); @@ -178,8 +178,8 @@ NGRAPH_TEST(${BACKEND_NAME}, ctc_greedy_decoder_single_no_merge) const auto data_shape = Shape{T, N, C}; const auto masks_shape = Shape{T, N}; - auto data = make_shared(element::f32, data_shape); - auto masks = make_shared(element::f32, masks_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto masks = make_shared(element::Type_t::f32, masks_shape); auto decoder = make_shared(data, masks, false); auto function = make_shared(decoder, ParameterVector{data, masks}); auto test_case = test::TestCase(function); @@ -199,8 +199,8 @@ NGRAPH_TEST(${BACKEND_NAME}, ctc_greedy_decoder_multiple_sequences) const auto data_shape = Shape{T, N, C}; const auto masks_shape = Shape{T, N}; - auto data = make_shared(element::f32, data_shape); - auto masks = make_shared(element::f32, masks_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto masks = make_shared(element::Type_t::f32, masks_shape); auto decoder = make_shared(data, masks, false); auto function = make_shared(decoder, ParameterVector{data, masks}); auto test_case = test::TestCase(function); diff --git a/ngraph/test/backend/cum_sum.in.cpp b/ngraph/test/backend/cum_sum.in.cpp index 7e6f143562e..e70d0258066 100644 --- a/ngraph/test/backend/cum_sum.in.cpp +++ b/ngraph/test/backend/cum_sum.in.cpp @@ -42,18 +42,18 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, cum_sum_default) { Shape shape{1, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i32, Shape{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axis = make_shared(element::Type_t::i32, Shape{1}); auto f = make_shared(make_shared(A, axis), ParameterVector{A, axis}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape()); copy_data(axis_tensor, vector{1}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, axis_tensor}); @@ -63,18 +63,18 @@ NGRAPH_TEST(${BACKEND_NAME}, cum_sum_default) NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim) { Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i64, Shape{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axis = make_shared(element::Type_t::i64, Shape{1}); auto f = make_shared(make_shared(A, axis), ParameterVector{A, axis}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7}); auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape()); copy_data(axis_tensor, vector{0}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, axis_tensor}); @@ -85,15 +85,15 @@ NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim) NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim_default_axis) { Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -105,19 +105,19 @@ NGRAPH_TEST(${BACKEND_NAME}, cum_sum_3d) { auto test_cumsum_3d = [](const int32_t axis_val) -> void { Shape shape{3, 2, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i32, Shape{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axis = make_shared(element::Type_t::i32, Shape{1}); auto f = make_shared(make_shared(A, axis), ParameterVector{A, axis}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}); auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape()); copy_data(axis_tensor, vector{axis_val}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, axis_tensor}); @@ -153,19 +153,19 @@ NGRAPH_TEST(${BACKEND_NAME}, cum_sum_2dim_allmodes) { auto test_cum_sum_allmodes = [](const int64_t axis_val, int exclusive, int reverse) { Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); - auto axis = make_shared(element::i64, Shape{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axis = make_shared(element::Type_t::i64, Shape{1}); auto f = make_shared(make_shared(A, axis, exclusive, reverse), ParameterVector{A, axis}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7}); auto axis_tensor = backend->create_tensor(axis->get_element_type(), axis->get_shape()); copy_data(axis_tensor, vector{axis_val}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, axis_tensor}); diff --git a/ngraph/test/backend/divide.in.cpp b/ngraph/test/backend/divide.in.cpp index 8ad877117e0..46d4faa9321 100644 --- a/ngraph/test/backend/divide.in.cpp +++ b/ngraph/test/backend/divide.in.cpp @@ -52,18 +52,18 @@ NGRAPH_TEST(${BACKEND_NAME}, divide) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 4, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -74,18 +74,18 @@ NGRAPH_TEST(${BACKEND_NAME}, divide_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{0x40000140, 0x40000001, 8, 16}); - auto b = backend->create_tensor(element::i32, shape); + auto b = backend->create_tensor(element::Type_t::i32, shape); copy_data(b, vector{2, 5, 4, 8}); - auto result = backend->create_tensor(element::i32, shape); + auto result = backend->create_tensor(element::Type_t::i32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -96,18 +96,18 @@ NGRAPH_TEST(${BACKEND_NAME}, divide_cpp_rounding_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A, B, false), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{-10, -10, 10, 10}); - auto b = backend->create_tensor(element::i32, shape); + auto b = backend->create_tensor(element::Type_t::i32, shape); copy_data(b, vector{-3, 3, -3, 3}); - auto result = backend->create_tensor(element::i32, shape); + auto result = backend->create_tensor(element::Type_t::i32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -118,18 +118,18 @@ NGRAPH_TEST(${BACKEND_NAME}, divide_python_rounding_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{-10, -10, 10, 10}); - auto b = backend->create_tensor(element::i32, shape); + auto b = backend->create_tensor(element::Type_t::i32, shape); copy_data(b, vector{-3, 3, -3, 3}); - auto result = backend->create_tensor(element::i32, shape); + auto result = backend->create_tensor(element::Type_t::i32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -140,18 +140,18 @@ NGRAPH_TEST(${BACKEND_NAME}, divide_overload) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(A / B, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 4, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -162,18 +162,18 @@ NGRAPH_TEST(${BACKEND_NAME}, divide_by_zero_float32) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{0, 0, 0, 0}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); diff --git a/ngraph/test/backend/dyn_reshape.in.cpp b/ngraph/test/backend/dyn_reshape.in.cpp index a7e6ebbe425..ca382cee1d9 100644 --- a/ngraph/test/backend/dyn_reshape.in.cpp +++ b/ngraph/test/backend/dyn_reshape.in.cpp @@ -29,8 +29,8 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, reshape_v1) { - auto arg = std::make_shared(element::i64, PartialShape::dynamic()); - auto pattern = make_shared(element::i64, PartialShape::dynamic(1)); + auto arg = std::make_shared(element::Type_t::i64, PartialShape::dynamic()); + auto pattern = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); auto reshape_v1 = std::make_shared(arg, pattern, false); auto f = std::make_shared(NodeVector{reshape_v1}, ParameterVector{arg, pattern}); @@ -41,15 +41,15 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_v1) auto arg_data = vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; auto pattern_data = vector{2, 2, 3}; - auto arg_tensor = backend->create_tensor(element::i64, Shape{arg_data.size()}); - auto pattern_tensor = backend->create_tensor(element::i64, Shape{pattern_data.size()}); + auto arg_tensor = backend->create_tensor(element::Type_t::i64, Shape{arg_data.size()}); + auto pattern_tensor = backend->create_tensor(element::Type_t::i64, Shape{pattern_data.size()}); copy_data(arg_tensor, arg_data); copy_data(pattern_tensor, pattern_data); - auto output = backend->create_dynamic_tensor(element::i64, PartialShape::dynamic()); + auto output = backend->create_dynamic_tensor(element::Type_t::i64, PartialShape::dynamic()); ex->call_with_validate({output}, {arg_tensor, pattern_tensor}); - ASSERT_EQ(output->get_element_type(), element::i64); + ASSERT_EQ(output->get_element_type(), element::Type_t::i64); EXPECT_EQ(read_vector(output), vector({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12})); } diff --git a/ngraph/test/backend/dynamic.in.cpp b/ngraph/test/backend/dynamic.in.cpp index a7b21469063..911d9acf649 100644 --- a/ngraph/test/backend/dynamic.in.cpp +++ b/ngraph/test/backend/dynamic.in.cpp @@ -39,7 +39,8 @@ NGRAPH_TEST(${BACKEND_NAME}, create_dynamic_backend) NGRAPH_TEST(${BACKEND_NAME}, create_dynamic_tensor) { auto backend = runtime::Backend::create("${BACKEND_NAME}", true); - auto t = backend->create_dynamic_tensor(element::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto t = backend->create_dynamic_tensor(element::Type_t::f32, + PartialShape{2, Dimension::dynamic(), 3}); ASSERT_TRUE(t->get_partial_shape().same_scheme(PartialShape{2, Dimension::dynamic(), 3})); } @@ -48,9 +49,12 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_abc) // // Create a graph for f(a,b,c) = (a+b)*c, where a, b, c all have shape {2,?,3}. // - auto a = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto b = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto c = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto a = + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto b = + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto c = + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); auto a_plus_b_times_c = (a + b) * c; @@ -66,8 +70,8 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_abc) // // Create a dynamic output tensor with shape {2,?,3}. // - auto t_r = - backend->create_dynamic_tensor(element::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, + PartialShape{2, Dimension::dynamic(), 3}); // // For each of n=[0,...,5), run the compiled executable against a test vector of shape @@ -83,9 +87,9 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_abc) } // Create static tensors for the inputs and copy data. - auto t_a = backend->create_tensor(element::f32, Shape{2, middle_dim, 3}); - auto t_b = backend->create_tensor(element::f32, Shape{2, middle_dim, 3}); - auto t_c = backend->create_tensor(element::f32, Shape{2, middle_dim, 3}); + auto t_a = backend->create_tensor(element::Type_t::f32, Shape{2, middle_dim, 3}); + auto t_b = backend->create_tensor(element::Type_t::f32, Shape{2, middle_dim, 3}); + auto t_c = backend->create_tensor(element::Type_t::f32, Shape{2, middle_dim, 3}); copy_data(t_a, inputs); copy_data(t_b, inputs); @@ -112,9 +116,9 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_abc) static void axpy_test(const PartialShape& input_pshape, const std::vector& input_shapes) { - auto a = make_shared(element::f32, input_pshape); - auto x = make_shared(element::f32, input_pshape); - auto y = make_shared(element::f32, input_pshape); + auto a = make_shared(element::Type_t::f32, input_pshape); + auto x = make_shared(element::Type_t::f32, input_pshape); + auto y = make_shared(element::Type_t::f32, input_pshape); auto axpy = a * x + y; @@ -122,7 +126,7 @@ static void axpy_test(const PartialShape& input_pshape, const std::vector auto backend = runtime::Backend::create("${BACKEND_NAME}", true); auto ex = backend->compile(f); - auto t_r = backend->create_dynamic_tensor(element::f32, input_pshape); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, input_pshape); for (auto& shape : input_shapes) { @@ -132,9 +136,9 @@ static void axpy_test(const PartialShape& input_pshape, const std::vector inputs[i] = i; } - auto t_a = backend->create_tensor(element::f32, shape); - auto t_x = backend->create_tensor(element::f32, shape); - auto t_y = backend->create_tensor(element::f32, shape); + auto t_a = backend->create_tensor(element::Type_t::f32, shape); + auto t_x = backend->create_tensor(element::Type_t::f32, shape); + auto t_y = backend->create_tensor(element::Type_t::f32, shape); copy_data(t_a, inputs); copy_data(t_x, inputs); @@ -179,13 +183,13 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_axpy) static void to_vector_test(const PartialShape& input_pshape, const std::vector& input_shapes) { - auto x = make_shared(element::f32, input_pshape); + auto x = make_shared(element::Type_t::f32, input_pshape); shared_ptr x_new_shape = make_shared(x); - auto axes = op::Constant::create(element::i64, {}, {0}); + auto axes = op::Constant::create(element::Type_t::i64, {}, {0}); x_new_shape = make_shared(x_new_shape, axes); x_new_shape = make_shared( - x_new_shape, op::Constant::create(element::u64, {1}, Shape{1}), false); + x_new_shape, op::Constant::create(element::Type_t::u64, {1}, Shape{1}), false); auto x_reshaped = make_shared(x, x_new_shape, true); @@ -193,7 +197,7 @@ static void to_vector_test(const PartialShape& input_pshape, const std::vectorcompile(f); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic(1)); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic(1)); for (auto& shape : input_shapes) { @@ -203,7 +207,7 @@ static void to_vector_test(const PartialShape& input_pshape, const std::vectorcreate_tensor(element::f32, shape); + auto t_x = backend->create_tensor(element::Type_t::f32, shape); copy_data(t_x, inputs); @@ -241,11 +245,12 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_to_vector) static void reverse_shape_test(const PartialShape& input_pshape, const std::vector& input_shapes) { - auto x = make_shared(element::f32, input_pshape); + auto x = make_shared(element::Type_t::f32, input_pshape); shared_ptr x_new_shape = make_shared(x); - x_new_shape = make_shared( - x_new_shape, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + x_new_shape = make_shared(x_new_shape, + op::Constant::create(element::Type_t::i64, {1}, {0}), + op::v1::Reverse::Mode::INDEX); auto x_reshaped = make_shared(x, x_new_shape, true); @@ -253,7 +258,7 @@ static void reverse_shape_test(const PartialShape& input_pshape, auto backend = runtime::Backend::create("${BACKEND_NAME}", true); auto ex = backend->compile(f); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); for (auto& shape : input_shapes) { @@ -263,7 +268,7 @@ static void reverse_shape_test(const PartialShape& input_pshape, inputs[i] = i; } - auto t_x = backend->create_tensor(element::f32, shape); + auto t_x = backend->create_tensor(element::Type_t::f32, shape); copy_data(t_x, inputs); @@ -302,8 +307,8 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_reverse_shape) NGRAPH_TEST(${BACKEND_NAME}, dynamic_transpose) { - auto arg = std::make_shared(element::i32, PartialShape::dynamic()); - auto input_order = make_shared(element::i32, PartialShape::dynamic()); + auto arg = std::make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto input_order = make_shared(element::Type_t::i32, PartialShape::dynamic()); auto transpose = std::make_shared(arg, input_order); auto f = std::make_shared(NodeVector{transpose}, ParameterVector{arg, input_order}); @@ -314,15 +319,16 @@ NGRAPH_TEST(${BACKEND_NAME}, dynamic_transpose) auto arg_data = vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}; auto input_order_data = vector{2, 0, 1}; - auto arg_tensor = backend->create_tensor(element::i32, Shape{2, 2, 3}); - auto input_order_tensor = backend->create_tensor(element::i32, Shape{input_order_data.size()}); + auto arg_tensor = backend->create_tensor(element::Type_t::i32, Shape{2, 2, 3}); + auto input_order_tensor = + backend->create_tensor(element::Type_t::i32, Shape{input_order_data.size()}); copy_data(arg_tensor, arg_data); copy_data(input_order_tensor, input_order_data); - auto output = backend->create_dynamic_tensor(element::i32, PartialShape::dynamic()); + auto output = backend->create_dynamic_tensor(element::Type_t::i32, PartialShape::dynamic()); ex->call_with_validate({output}, {arg_tensor, input_order_tensor}); - ASSERT_EQ(output->get_element_type(), element::i32); + ASSERT_EQ(output->get_element_type(), element::Type_t::i32); EXPECT_EQ(read_vector(output), vector({1, 4, 7, 10, 2, 5, 8, 11, 3, 6, 9, 12})); } diff --git a/ngraph/test/backend/erf.in.cpp b/ngraph/test/backend/erf.in.cpp index 1cbe2260567..baac0c7861e 100644 --- a/ngraph/test/backend/erf.in.cpp +++ b/ngraph/test/backend/erf.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, erf) { Shape shape{8}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/exp.in.cpp b/ngraph/test/backend/exp.in.cpp index f4d3ae2a1c5..52369462c7c 100644 --- a/ngraph/test/backend/exp.in.cpp +++ b/ngraph/test/backend/exp.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, exp) { Shape shape{8}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/floor.in.cpp b/ngraph/test/backend/floor.in.cpp index 03d919b1aa5..bb8675c92e9 100644 --- a/ngraph/test/backend/floor.in.cpp +++ b/ngraph/test/backend/floor.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, floor) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); @@ -58,7 +58,7 @@ NGRAPH_TEST(${BACKEND_NAME}, floor) NGRAPH_TEST(${BACKEND_NAME}, floor_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); @@ -71,7 +71,7 @@ NGRAPH_TEST(${BACKEND_NAME}, floor_int64) { // This tests large numbers that will not fit in a double Shape shape{3}; - auto A = make_shared(element::i64, shape); + auto A = make_shared(element::Type_t::i64, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/function_name.in.cpp b/ngraph/test/backend/function_name.in.cpp index c8f99e5d179..559d4ce901e 100644 --- a/ngraph/test/backend/function_name.in.cpp +++ b/ngraph/test/backend/function_name.in.cpp @@ -33,8 +33,8 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, function_name) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(A + B, ParameterVector{A, B}, "funky func name"); auto backend = runtime::Backend::create("${BACKEND_NAME}"); diff --git a/ngraph/test/backend/fused_op.in.cpp b/ngraph/test/backend/fused_op.in.cpp index 38d451aa564..155a11f7f02 100644 --- a/ngraph/test/backend/fused_op.in.cpp +++ b/ngraph/test/backend/fused_op.in.cpp @@ -57,7 +57,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, elu) { - auto A = make_shared(element::f32, Shape{3, 2}); + auto A = make_shared(element::Type_t::f32, Shape{3, 2}); auto elu = make_shared(A, 0.5f); auto function = make_shared(NodeVector{elu}, ParameterVector{A}); @@ -70,7 +70,7 @@ NGRAPH_TEST(${BACKEND_NAME}, elu) NGRAPH_TEST(${BACKEND_NAME}, elu_negative_alpha) { - auto A = make_shared(element::f32, Shape{3, 2}); + auto A = make_shared(element::Type_t::f32, Shape{3, 2}); auto elu = make_shared(A, -1.f); auto function = make_shared(NodeVector{elu}, ParameterVector{A}); @@ -85,8 +85,8 @@ NGRAPH_TEST(${BACKEND_NAME}, prelu) { Shape shape{3, 2}; Shape rshape{3}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, rshape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, rshape); auto prelu = make_shared(A, B); auto f = make_shared(NodeVector{prelu}, ParameterVector{A, B}); std::vector a{-2, 3, -2, 1, -1, 0}; @@ -103,7 +103,7 @@ NGRAPH_TEST(${BACKEND_NAME}, hardsigmoid) const Shape shape{2, 7}; const float alpha_f = 0.125f; const float beta_f = 0.642f; - const auto A = make_shared(element::f32, shape); + const auto A = make_shared(element::Type_t::f32, shape); const auto alpha = op::Constant::create(A->get_element_type(), Shape{}, {alpha_f}); const auto beta = op::Constant::create(A->get_element_type(), Shape{}, {beta_f}); auto hardsigmoid = make_shared(A, alpha, beta); @@ -138,8 +138,8 @@ NGRAPH_TEST(${BACKEND_NAME}, prelu_shared_slope) { Shape shape{3, 2}; Shape rshape{}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, rshape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, rshape); auto prelu = make_shared(A, B); auto f = make_shared(NodeVector{prelu}, ParameterVector{A, B}); std::vector a{-2, 3, -2, 1, -1, 0}; @@ -155,8 +155,8 @@ NGRAPH_TEST(${BACKEND_NAME}, prelu_negative_slope) { Shape shape{3, 2}; Shape rshape{}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, rshape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, rshape); auto prelu = make_shared(A, B); auto f = make_shared(NodeVector{prelu}, ParameterVector{A, B}); std::vector a{-2, 3, -2, 1, -1, 0}; @@ -170,8 +170,8 @@ NGRAPH_TEST(${BACKEND_NAME}, prelu_negative_slope) NGRAPH_TEST(${BACKEND_NAME}, group_conv) { - auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 2, 2}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -193,8 +193,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv) NGRAPH_TEST(${BACKEND_NAME}, group_conv_striding) { - auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 2, 2}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{2, 2}, @@ -215,8 +215,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_striding) NGRAPH_TEST(${BACKEND_NAME}, group_conv_window_dilation) { - auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 2, 2}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -238,8 +238,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_window_dilation) NGRAPH_TEST(${BACKEND_NAME}, group_conv_data_dilation) { - auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 2, 2}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -262,8 +262,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_data_dilation) NGRAPH_TEST(${BACKEND_NAME}, group_conv_padding) { - auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 2, 2}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -286,8 +286,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_padding) NGRAPH_TEST(${BACKEND_NAME}, group_conv_padding_and_window_dilation) { - auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 2, 2}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -310,8 +310,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_padding_and_window_dilation) NGRAPH_TEST(${BACKEND_NAME}, group_conv_input_shape_variation) { - auto data = make_shared(element::f32, Shape{1, 4, 4, 1}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 4, 1}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -334,8 +334,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_input_shape_variation) NGRAPH_TEST(${BACKEND_NAME}, group_conv_input_data_variation) { - auto data = make_shared(element::f32, Shape{1, 4, 3, 3}); - auto filters = make_shared(element::f32, Shape{2, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 3, 3}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -360,8 +360,8 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_input_data_variation) NGRAPH_TEST(${BACKEND_NAME}, group_conv_groups_included_in_shape) { - auto data = make_shared(element::f32, Shape{1, 4, 2, 2}); - auto filters = make_shared(element::f32, Shape{2, 1, 2, 1, 1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 4, 2, 2}); + auto filters = make_shared(element::Type_t::f32, Shape{2, 1, 2, 1, 1}); auto group_conv = make_shared(data, filters, Strides{1, 1}, @@ -382,7 +382,7 @@ NGRAPH_TEST(${BACKEND_NAME}, group_conv_groups_included_in_shape) NGRAPH_TEST(${BACKEND_NAME}, space_to_depth_block_first) { - auto A = make_shared(element::f32, Shape{1, 2, 4, 4}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2, 4, 4}); const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; auto space_to_depth = make_shared(A, mode, 2); auto function = make_shared(NodeVector{space_to_depth}, ParameterVector{A}); @@ -403,7 +403,7 @@ NGRAPH_TEST(${BACKEND_NAME}, space_to_depth_block_first) NGRAPH_TEST(${BACKEND_NAME}, space_to_depth_depth_first) { - auto A = make_shared(element::f32, Shape{1, 2, 4, 4}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2, 4, 4}); const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; auto space_to_depth = make_shared(A, mode, 2); auto function = make_shared(NodeVector{space_to_depth}, ParameterVector{A}); @@ -421,7 +421,7 @@ NGRAPH_TEST(${BACKEND_NAME}, space_to_depth_depth_first) NGRAPH_TEST(${BACKEND_NAME}, depth_to_space_block_first) { - auto A = make_shared(element::f32, Shape{1, 8, 2, 2}); + auto A = make_shared(element::Type_t::f32, Shape{1, 8, 2, 2}); auto depth_to_space = make_shared(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2); auto function = make_shared(NodeVector{depth_to_space}, ParameterVector{A}); @@ -440,7 +440,7 @@ NGRAPH_TEST(${BACKEND_NAME}, depth_to_space_block_first) NGRAPH_TEST(${BACKEND_NAME}, depth_to_space_depth_first) { - auto A = make_shared(element::f32, Shape{1, 8, 2, 2}); + auto A = make_shared(element::Type_t::f32, Shape{1, 8, 2, 2}); auto depth_to_space = make_shared(A, op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2); auto function = make_shared(NodeVector{depth_to_space}, ParameterVector{A}); @@ -460,8 +460,9 @@ NGRAPH_TEST(${BACKEND_NAME}, depth_to_space_depth_first) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{3}, vector{1, 2, 3}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = + make_shared(element::Type_t::i64, Shape{3}, vector{1, 2, 3}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -487,8 +488,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_empty_axes_input) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{0}, vector{}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = make_shared(element::Type_t::i64, Shape{0}, vector{}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -515,8 +516,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_empty_axes_input) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_h_4d) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{1}, vector{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = make_shared(element::Type_t::i64, Shape{1}, vector{1}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -541,8 +542,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_h_4d) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_1axis_5d) { Shape data_shape{1, 2, 2, 2, 3}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{1}, vector{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = make_shared(element::Type_t::i64, Shape{1}, vector{1}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -567,8 +568,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_1axis_5d) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_123axes_5d) { Shape data_shape{1, 2, 2, 2, 3}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{3}, vector{1, 2, 3}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = + make_shared(element::Type_t::i64, Shape{3}, vector{1, 2, 3}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -593,8 +595,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_123axes_5d) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_c_2x2_shape) { Shape data_shape{2, 2}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{}, vector{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = make_shared(element::Type_t::i64, Shape{}, vector{1}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -617,8 +619,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_c_2x2_shape) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_c_2x4_shape) { Shape data_shape{2, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{}, vector{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = make_shared(element::Type_t::i64, Shape{}, vector{1}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -648,8 +650,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_across_c_2x4_shape) NGRAPH_TEST(${BACKEND_NAME}, normalize_across_chw_4d_max_bias) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{3}, vector{1, 2, 3}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = + make_shared(element::Type_t::i64, Shape{3}, vector{1, 2, 3}); float eps{5000}; auto eps_mode = op::EpsMode::MAX; @@ -696,7 +699,7 @@ namespace NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_double) { - auto type = element::f64; + auto type = element::Type_t::f64; typedef double ctype; auto sshape = Shape{5, 2}; @@ -782,7 +785,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_double) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_float) { - auto type = element::f32; + auto type = element::Type_t::f32; typedef float ctype; auto sshape = Shape{5, 2}; @@ -868,7 +871,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_float) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int8) { - auto type = element::i8; + auto type = element::Type_t::i8; typedef int8_t ctype; auto sshape = Shape{4, 2}; @@ -897,7 +900,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int8) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int16) { - auto type = element::i16; + auto type = element::Type_t::i16; typedef int16_t ctype; auto sshape = Shape{4, 2}; @@ -926,7 +929,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int16) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int32) { - auto type = element::i32; + auto type = element::Type_t::i32; typedef int32_t ctype; auto sshape = Shape{4, 2}; @@ -955,7 +958,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int32) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int64) { - auto type = element::i64; + auto type = element::Type_t::i64; typedef int64_t ctype; auto sshape = Shape{4, 2}; @@ -984,7 +987,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_int64) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint8) { - auto type = element::u8; + auto type = element::Type_t::u8; typedef uint8_t ctype; auto sshape = Shape{4, 2}; @@ -1016,7 +1019,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint8) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint16) { - auto type = element::u16; + auto type = element::Type_t::u16; typedef uint16_t ctype; auto sshape = Shape{4, 2}; @@ -1048,7 +1051,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint16) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint32) { - auto type = element::u32; + auto type = element::Type_t::u32; typedef uint32_t ctype; auto sshape = Shape{4, 2}; @@ -1080,7 +1083,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint32) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint64) { - auto type = element::u64; + auto type = element::Type_t::u64; typedef uint64_t ctype; auto sshape = Shape{4, 2}; @@ -1112,7 +1115,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_uint64) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_float16) { - auto type = element::f16; + auto type = element::Type_t::f16; typedef float16 ctype; auto sshape = Shape{5, 2}; @@ -1198,7 +1201,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_float16) NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_bfloat16) { - auto type = element::bf16; + auto type = element::Type_t::bf16; typedef bfloat16 ctype; auto sshape = Shape{5, 2}; @@ -1285,7 +1288,7 @@ NGRAPH_TEST(${BACKEND_NAME}, fused_clamp_bfloat16) NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_normalization) { Shape data_shape{1, 2, 5}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); auto mvn_func = make_shared(data, true, false); auto function = make_shared(NodeVector{mvn_func}, ParameterVector{data}); @@ -1305,7 +1308,7 @@ NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_normalization) NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_normalization_split_channels) { Shape data_shape{1, 2, 5, 1}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); auto mvn_func = make_shared(data, false, false); auto function = make_shared(NodeVector{mvn_func}, ParameterVector{data}); @@ -1325,7 +1328,7 @@ NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_normalization_split_channels) NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization) { Shape data_shape{1, 2, 5}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); auto mvn_func = make_shared(data); auto function = make_shared(NodeVector{mvn_func}, ParameterVector{data}); @@ -1354,7 +1357,7 @@ NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization) NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization_split_channels) { Shape data_shape{1, 2, 5}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); auto mvn_func = make_shared(data, false); auto function = make_shared(NodeVector{mvn_func}, ParameterVector{data}); @@ -1383,7 +1386,7 @@ NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization_split_channels) NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization_shared_across_channel_batch_size_2) { Shape data_shape{2, 2, 5}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); auto mvn_func = make_shared(data, true); auto function = make_shared(NodeVector{mvn_func}, ParameterVector{data}); @@ -1406,7 +1409,7 @@ NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization_shared_across_chann NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization_not_shared_across_channel_batch_size_2) { Shape data_shape{2, 2, 5}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); auto mvn_func = make_shared(data, false); auto function = make_shared(NodeVector{mvn_func}, ParameterVector{data}); @@ -1429,7 +1432,7 @@ NGRAPH_TEST(${BACKEND_NAME}, mvn_mean_variance_normalization_not_shared_across_c NGRAPH_TEST(${BACKEND_NAME}, grn_4d) { const Shape data_shape{1, 2, 3, 4}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); float bias{1e-6f}; const auto grn = make_shared(data, bias); @@ -1453,7 +1456,7 @@ NGRAPH_TEST(${BACKEND_NAME}, grn_4d) NGRAPH_TEST(${BACKEND_NAME}, grn_2d_with_bias) { const Shape data_shape{3, 4}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); float bias{2.25f}; const auto grn = make_shared(data, bias); @@ -1484,9 +1487,9 @@ NGRAPH_TEST(${BACKEND_NAME}, grn_2d_with_bias) NGRAPH_TEST(${BACKEND_NAME}, unsqueeze) { - auto data_node = make_shared(element::f32, Shape{4, 2}); + auto data_node = make_shared(element::Type_t::f32, Shape{4, 2}); auto axes_node = - make_shared(element::i64, Shape{2}, vector{1, 2}); + make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto squeeze = make_shared(data_node, axes_node); auto function = make_shared(NodeVector{squeeze}, ParameterVector{data_node}); @@ -1499,7 +1502,7 @@ NGRAPH_TEST(${BACKEND_NAME}, unsqueeze) NGRAPH_TEST(${BACKEND_NAME}, shuffle_channels_simple) { - const auto data = make_shared(element::i32, Shape{1, 15, 2, 2}); + const auto data = make_shared(element::Type_t::i32, Shape{1, 15, 2, 2}); auto tested_op = make_shared(data, 1, 5); auto function = make_shared(tested_op, ParameterVector{data}); @@ -1523,7 +1526,7 @@ NGRAPH_TEST(${BACKEND_NAME}, shuffle_channels_negative_axis) // in this test the output is the same as in shuffle_channels_simple but // the axis value is negative and the C(channels) value is in a different dimension(0) of the // shape - const auto data = make_shared(element::i32, Shape{15, 2, 1, 2}); + const auto data = make_shared(element::Type_t::i32, Shape{15, 2, 1, 2}); auto tested_op = make_shared(data, -4, 5); auto function = make_shared(tested_op, ParameterVector{data}); @@ -1544,7 +1547,7 @@ NGRAPH_TEST(${BACKEND_NAME}, shuffle_channels_negative_axis) NGRAPH_TEST(${BACKEND_NAME}, shuffle_channels_float) { - const auto data = make_shared(element::f32, Shape{6, 1, 1, 1}); + const auto data = make_shared(element::Type_t::f32, Shape{6, 1, 1, 1}); auto tested_op = make_shared(data, 0, 2); auto function = make_shared(tested_op, ParameterVector{data}); @@ -1559,9 +1562,9 @@ NGRAPH_TEST(${BACKEND_NAME}, shuffle_channels_float) NGRAPH_TEST(${BACKEND_NAME}, squeeze) { - const auto data_node = make_shared(element::f32, Shape{1, 4, 1, 1, 2}); + const auto data_node = make_shared(element::Type_t::f32, Shape{1, 4, 1, 1, 2}); const auto axes_node = - make_shared(element::i64, Shape{2}, vector{0, 2}); + make_shared(element::Type_t::i64, Shape{2}, vector{0, 2}); const auto squeeze = make_shared(data_node, axes_node); const auto function = make_shared(NodeVector{squeeze}, ParameterVector{data_node}); @@ -1575,9 +1578,9 @@ NGRAPH_TEST(${BACKEND_NAME}, squeeze) NGRAPH_TEST(${BACKEND_NAME}, squeeze_default_axes) { - const auto data_node = make_shared(element::f32, Shape{1, 4, 1, 1, 2}); + const auto data_node = make_shared(element::Type_t::f32, Shape{1, 4, 1, 1, 2}); const auto axes_node = - make_shared(element::i64, Shape{0}, vector{}); + make_shared(element::Type_t::i64, Shape{0}, vector{}); const auto squeeze = make_shared(data_node, axes_node); const auto function = make_shared(NodeVector{squeeze}, ParameterVector{data_node}); @@ -1591,15 +1594,15 @@ NGRAPH_TEST(${BACKEND_NAME}, squeeze_default_axes) NGRAPH_TEST(${BACKEND_NAME}, squeeze_dynamic) { - const auto data_param = make_shared(element::f32, Shape{1, 4, 1, 1, 2}); - const auto axes_param = make_shared(element::i64, Shape{2}); + const auto data_param = make_shared(element::Type_t::f32, Shape{1, 4, 1, 1, 2}); + const auto axes_param = make_shared(element::Type_t::i64, Shape{2}); EXPECT_THROW(make_shared(data_param, axes_param), CheckFailure); } NGRAPH_TEST(${BACKEND_NAME}, squared_difference) { - const auto x1 = make_shared(element::f32, Shape{2, 2}); - const auto x2 = make_shared(element::f32, Shape{2, 2}); + const auto x1 = make_shared(element::Type_t::f32, Shape{2, 2}); + const auto x2 = make_shared(element::Type_t::f32, Shape{2, 2}); auto tested_op = make_shared(x1, x2); auto function = make_shared(tested_op, ParameterVector{x1, x2}); @@ -1614,8 +1617,8 @@ NGRAPH_TEST(${BACKEND_NAME}, squared_difference) NGRAPH_TEST(${BACKEND_NAME}, squared_difference_broadcast) { - const auto x1 = make_shared(element::i32, Shape{2, 2}); - const auto x2 = make_shared(element::i32, Shape{}); + const auto x1 = make_shared(element::Type_t::i32, Shape{2, 2}); + const auto x2 = make_shared(element::Type_t::i32, Shape{}); auto tested_op = make_shared(x1, x2); auto function = make_shared(tested_op, ParameterVector{x1, x2}); @@ -1635,15 +1638,18 @@ NGRAPH_TEST(${BACKEND_NAME}, lstm_cell_zero_bias_peepholes) const size_t hidden_size = 3; const size_t gates_count = 4; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto C_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{gates_count * hidden_size}); - const auto P = make_shared(element::f32, Shape{3 * hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto C_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{gates_count * hidden_size}); + const auto P = make_shared(element::Type_t::f32, Shape{3 * hidden_size}); const auto lstm_cell = make_shared( X, @@ -1710,15 +1716,18 @@ NGRAPH_TEST(${BACKEND_NAME}, lstm_cell_bias_peepholes) const size_t hidden_size = 3; const size_t gates_count = 4; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto C_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{gates_count * hidden_size}); - const auto P = make_shared(element::f32, Shape{3 * hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto C_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{gates_count * hidden_size}); + const auto P = make_shared(element::Type_t::f32, Shape{3 * hidden_size}); const auto lstm_cell = make_shared(X, H_t, C_t, W, R, B, hidden_size); @@ -1799,15 +1808,18 @@ NGRAPH_TEST(${BACKEND_NAME}, lstm_cell_bias_peepholes_clip_input_forget) const float clip_threshold = 3.5f; bool input_forget = true; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto C_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{gates_count * hidden_size}); - const auto P = make_shared(element::f32, Shape{3 * hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto C_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{gates_count * hidden_size}); + const auto P = make_shared(element::Type_t::f32, Shape{3 * hidden_size}); const auto lstm_cell = make_shared(X, H_t, @@ -1900,15 +1912,18 @@ NGRAPH_TEST(${BACKEND_NAME}, lstm_cell_activaction_functions) vector activation_alpha{0.f, 0.f, 1.8345f}; vector activation_beta{0.f, 0.f, 3.05f}; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto C_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{gates_count * hidden_size}); - const auto P = make_shared(element::f32, Shape{3 * hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto C_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{gates_count * hidden_size}); + const auto P = make_shared(element::Type_t::f32, Shape{3 * hidden_size}); const auto lstm_cell = make_shared(X, H_t, @@ -1993,11 +2008,11 @@ NGRAPH_TEST(${BACKEND_NAME}, fake_quantize) { const Shape data_shape{1, 2, 3, 4}; const size_t levels = 4; - const auto data = make_shared(element::f32, data_shape); - const auto input_low = make_shared(element::f32, Shape{}); - const auto input_high = make_shared(element::f32, Shape{}); - const auto output_low = make_shared(element::f32, Shape{}); - const auto output_high = make_shared(element::f32, Shape{}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto input_low = make_shared(element::Type_t::f32, Shape{}); + const auto input_high = make_shared(element::Type_t::f32, Shape{}); + const auto output_low = make_shared(element::Type_t::f32, Shape{}); + const auto output_high = make_shared(element::Type_t::f32, Shape{}); const auto quantize = make_shared(data, input_low, input_high, output_low, output_high, levels); @@ -2036,11 +2051,11 @@ NGRAPH_TEST(${BACKEND_NAME}, fake_quantize_with_clip) { const Shape data_shape{1, 2, 3, 4}; const size_t levels = 5; - const auto data = make_shared(element::f32, data_shape); - const auto input_low = make_shared(element::f32, Shape{}); - const auto input_high = make_shared(element::f32, Shape{}); - const auto output_low = make_shared(element::f32, Shape{}); - const auto output_high = make_shared(element::f32, Shape{}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto input_low = make_shared(element::Type_t::f32, Shape{}); + const auto input_high = make_shared(element::Type_t::f32, Shape{}); + const auto output_low = make_shared(element::Type_t::f32, Shape{}); + const auto output_high = make_shared(element::Type_t::f32, Shape{}); const auto quantize = make_shared(data, input_low, input_high, output_low, output_high, levels); @@ -2076,11 +2091,11 @@ NGRAPH_TEST(${BACKEND_NAME}, fake_quantize_with_clip_across_channels) { Shape data_shape{1, 2, 5, 5}; size_t levels = 5; - auto data = make_shared(element::f32, data_shape); - auto input_low = make_shared(element::f32, Shape{2, 1, 1}); - auto input_high = make_shared(element::f32, Shape{2, 1, 1}); - auto output_low = make_shared(element::f32, Shape{2, 1, 1}); - auto output_high = make_shared(element::f32, Shape{2, 1, 1}); + auto data = make_shared(element::Type_t::f32, data_shape); + auto input_low = make_shared(element::Type_t::f32, Shape{2, 1, 1}); + auto input_high = make_shared(element::Type_t::f32, Shape{2, 1, 1}); + auto output_low = make_shared(element::Type_t::f32, Shape{2, 1, 1}); + auto output_high = make_shared(element::Type_t::f32, Shape{2, 1, 1}); auto quantize = make_shared(data, input_low, input_high, output_low, output_high, levels); @@ -2119,11 +2134,11 @@ NGRAPH_TEST(${BACKEND_NAME}, fake_quantize_pdpd) { Shape data_shape{1, 2, 5, 5}; size_t levels = 5; - auto data = make_shared(element::f32, data_shape); - auto input_low = make_shared(element::f32, Shape{2}); - auto input_high = make_shared(element::f32, Shape{2}); - auto output_low = make_shared(element::f32, Shape{2}); - auto output_high = make_shared(element::f32, Shape{2}); + auto data = make_shared(element::Type_t::f32, data_shape); + auto input_low = make_shared(element::Type_t::f32, Shape{2}); + auto input_high = make_shared(element::Type_t::f32, Shape{2}); + auto output_low = make_shared(element::Type_t::f32, Shape{2}); + auto output_high = make_shared(element::Type_t::f32, Shape{2}); auto quantize = make_shared(data, @@ -2170,10 +2185,12 @@ NGRAPH_TEST(${BACKEND_NAME}, rnn_cell_no_bias) const size_t input_size = 3; const size_t hidden_size = 3; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto W = make_shared(element::f32, Shape{hidden_size, input_size}); - const auto R = make_shared(element::f32, Shape{hidden_size, hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto W = make_shared(element::Type_t::f32, Shape{hidden_size, input_size}); + const auto R = + make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); const auto rnn_cell = make_shared(X, H_t, W, R, hidden_size); auto function = make_shared(rnn_cell, ParameterVector{X, H_t, W, R}); @@ -2220,11 +2237,13 @@ NGRAPH_TEST(${BACKEND_NAME}, rnn_cell_bias_clip) const size_t hidden_size = 3; float clip = 2.88f; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto W = make_shared(element::f32, Shape{hidden_size, input_size}); - const auto R = make_shared(element::f32, Shape{hidden_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto W = make_shared(element::Type_t::f32, Shape{hidden_size, input_size}); + const auto R = + make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); + const auto B = make_shared(element::Type_t::f32, Shape{hidden_size}); const auto rnn_cell = make_shared(X, H_t, @@ -2282,11 +2301,13 @@ NGRAPH_TEST(${BACKEND_NAME}, rnn_cell_activation_function) const size_t hidden_size = 3; float clip = 2.88f; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto W = make_shared(element::f32, Shape{hidden_size, input_size}); - const auto R = make_shared(element::f32, Shape{hidden_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto W = make_shared(element::Type_t::f32, Shape{hidden_size, input_size}); + const auto R = + make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); + const auto B = make_shared(element::Type_t::f32, Shape{hidden_size}); const auto rnn_cell = make_shared(X, H_t, @@ -2346,13 +2367,15 @@ NGRAPH_TEST(${BACKEND_NAME}, gru_cell_bias_clip) float clip = 2.88f; bool linear_before_reset = false; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{gates_count * hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{gates_count * hidden_size}); const auto gru_cell = make_shared(X, H_t, @@ -2419,13 +2442,15 @@ NGRAPH_TEST(${BACKEND_NAME}, gru_cell_linear_before_reset) float clip = 2.88f; bool linear_before_reset = true; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{(gates_count + 1) * hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{(gates_count + 1) * hidden_size}); const auto gru_cell = make_shared(X, H_t, @@ -2491,13 +2516,15 @@ NGRAPH_TEST(${BACKEND_NAME}, gru_cell_activation_function) float clip = 2.88f; bool linear_before_reset = true; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{(gates_count + 1) * hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{(gates_count + 1) * hidden_size}); const auto gru_cell = make_shared(X, H_t, @@ -2561,31 +2588,31 @@ NGRAPH_TEST(${BACKEND_NAME}, depth_to_space_space_to_depth_block_first) Shape dts_input_shape{2, 32, 2, 4, 2, 4}; size_t block_size = 2; - auto dts_input = make_shared(element::f32, dts_input_shape); + auto dts_input = make_shared(element::Type_t::f32, dts_input_shape); auto depth_to_space = make_shared( dts_input, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, block_size); auto dts_func = make_shared(NodeVector{depth_to_space}, ParameterVector{dts_input}); - auto dts_input_tensor = backend->create_tensor(element::f32, dts_input_shape); + auto dts_input_tensor = backend->create_tensor(element::Type_t::f32, dts_input_shape); const auto data_size = shape_size(dts_input_shape); vector data(data_size); std::iota(data.begin(), data.end(), 0); copy_data(dts_input_tensor, data); const auto dts_output_shape = depth_to_space->get_output_shape(0); - auto dts_output_tensor = backend->create_tensor(element::f32, dts_output_shape); + auto dts_output_tensor = backend->create_tensor(element::Type_t::f32, dts_output_shape); auto handle = backend->compile(dts_func); handle->call_with_validate({dts_output_tensor}, {dts_input_tensor}); auto dts_result = read_vector(dts_output_tensor); // use depth_to_space output as space_to_depth input - auto std_input = make_shared(element::f32, dts_output_shape); + auto std_input = make_shared(element::Type_t::f32, dts_output_shape); auto space_to_depth = make_shared( std_input, op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST, block_size); auto std_func = make_shared(NodeVector{space_to_depth}, ParameterVector{std_input}); - auto std_input_tensor = backend->create_tensor(element::f32, dts_output_shape); + auto std_input_tensor = backend->create_tensor(element::Type_t::f32, dts_output_shape); copy_data(std_input_tensor, dts_result); - auto std_output_tensor = backend->create_tensor(element::f32, dts_input_shape); + auto std_output_tensor = backend->create_tensor(element::Type_t::f32, dts_input_shape); handle = backend->compile(std_func); handle->call_with_validate({std_output_tensor}, {std_input_tensor}); auto std_result = read_vector(std_output_tensor); @@ -2601,31 +2628,31 @@ NGRAPH_TEST(${BACKEND_NAME}, depth_to_space_space_to_depth_depth_first) Shape dts_input_shape{2, 32, 2, 4, 2, 4}; size_t block_size = 2; - auto dts_input = make_shared(element::f32, dts_input_shape); + auto dts_input = make_shared(element::Type_t::f32, dts_input_shape); auto depth_to_space = make_shared( dts_input, op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, block_size); auto dts_func = make_shared(NodeVector{depth_to_space}, ParameterVector{dts_input}); - auto dts_input_tensor = backend->create_tensor(element::f32, dts_input_shape); + auto dts_input_tensor = backend->create_tensor(element::Type_t::f32, dts_input_shape); const auto data_size = shape_size(dts_input_shape); vector data(data_size); std::iota(data.begin(), data.end(), 0); copy_data(dts_input_tensor, data); const auto dts_output_shape = depth_to_space->get_output_shape(0); - auto dts_output_tensor = backend->create_tensor(element::f32, dts_output_shape); + auto dts_output_tensor = backend->create_tensor(element::Type_t::f32, dts_output_shape); auto handle = backend->compile(dts_func); handle->call_with_validate({dts_output_tensor}, {dts_input_tensor}); auto dts_result = read_vector(dts_output_tensor); // use depth_to_space output as space_to_depth input - auto std_input = make_shared(element::f32, dts_output_shape); + auto std_input = make_shared(element::Type_t::f32, dts_output_shape); auto space_to_depth = make_shared( std_input, op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST, block_size); auto std_func = make_shared(NodeVector{space_to_depth}, ParameterVector{std_input}); - auto std_input_tensor = backend->create_tensor(element::f32, dts_output_shape); + auto std_input_tensor = backend->create_tensor(element::Type_t::f32, dts_output_shape); copy_data(std_input_tensor, dts_result); - auto std_output_tensor = backend->create_tensor(element::f32, dts_input_shape); + auto std_output_tensor = backend->create_tensor(element::Type_t::f32, dts_input_shape); handle = backend->compile(std_func); handle->call_with_validate({std_output_tensor}, {std_input_tensor}); auto std_result = read_vector(std_output_tensor); diff --git a/ngraph/test/backend/gather.in.cpp b/ngraph/test/backend/gather.in.cpp index 0f60ed9c1f7..44471965222 100644 --- a/ngraph/test/backend/gather.in.cpp +++ b/ngraph/test/backend/gather.in.cpp @@ -40,9 +40,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_4d_indices_axis_0_uint8) Shape data_shape{3, 2}; Shape indices_shape{2, 2, 3, 4}; Shape out_shape{2, 2, 3, 4, 2}; - auto P = make_shared(element::u8, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::u8, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -65,9 +65,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_4d_indices_axis_0_2d_input) Shape data_shape{3, 2}; Shape indices_shape{2, 2, 3, 4}; Shape out_shape{2, 2, 3, 4, 2}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -93,9 +93,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_3d_indices_axis_0_2d_input) Shape data_shape{3, 2}; Shape indices_shape{2, 3, 4}; Shape out_shape{2, 3, 4, 2}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -116,9 +116,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_2d_indices_axis_0_2d_input) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -135,9 +135,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_2d_negative_and_positive_indices_axis_0_2d_i Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -154,9 +154,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_1d_indices_axis_0_1d_input) Shape data_shape{3}; Shape indices_shape{2}; Shape out_shape{2}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -172,9 +172,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_scalar_indices_axis_0_2d_input) Shape data_shape{3, 2}; Shape indices_shape{}; Shape out_shape{2}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -190,9 +190,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_2d_indices_axis_1_2d_input) Shape data_shape{3, 3}; Shape indices_shape{1, 2}; Shape out_shape{3, 1, 2}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {1}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -208,9 +208,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_1d_indices_axis_2_4d_input) Shape data_shape{2, 2, 3, 3}; Shape indices_shape{2}; Shape out_shape{2, 2, 2, 3}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {2}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {2}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -231,9 +231,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_scalar_indices_axis_1_2d_input) Shape data_shape{3, 3}; Shape indices_shape{}; Shape out_shape{3}; - auto P = make_shared(element::f32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {1}); + auto P = make_shared(element::Type_t::f32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -249,9 +249,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_int8) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::i8, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::i8, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -267,9 +267,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_int16) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::i16, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::i16, data_shape); + auto I = make_shared(element::Type_t::i64, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -285,9 +285,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_int32) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::i32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::i32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -303,9 +303,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_int64) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::i64, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::i64, data_shape); + auto I = make_shared(element::Type_t::i64, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -321,9 +321,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_uint8) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::u8, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::u8, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -339,9 +339,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_uint16) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::u16, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::u16, data_shape); + auto I = make_shared(element::Type_t::i64, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -357,9 +357,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_uint32) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::u32, data_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::u32, data_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -375,9 +375,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_uint64) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::u64, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::u64, data_shape); + auto I = make_shared(element::Type_t::i64, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); @@ -393,9 +393,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_axis_0_bool) Shape data_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::boolean, data_shape); - auto I = make_shared(element::i64, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::boolean, data_shape); + auto I = make_shared(element::Type_t::i64, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); auto f = make_shared(G, ParameterVector{P, I}); diff --git a/ngraph/test/backend/gather_nd.in.cpp b/ngraph/test/backend/gather_nd.in.cpp index 5fe3578a4d6..10a8d5dda6e 100644 --- a/ngraph/test/backend/gather_nd.in.cpp +++ b/ngraph/test/backend/gather_nd.in.cpp @@ -45,19 +45,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_single_indices) Shape params_shape{3, 3}; Shape indices_shape{2}; Shape out_shape{}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1, 2}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -77,19 +77,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_scalar_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 2}; Shape out_shape{2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{0, 0, 1, 1}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -109,19 +109,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_1d_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 1}; Shape out_shape{2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1, 0}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -143,19 +143,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_scalar_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 3}; Shape out_shape{2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{0, 0, 1, 1, 0, 1}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -175,19 +175,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_1d_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{0, 1, 1, 0}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -209,19 +209,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_2d_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{1, 1}; Shape out_shape{1, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -243,19 +243,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_scalar_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 1, 2}; Shape out_shape{2, 1}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{0, 0, 0, 1}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -275,19 +275,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_1d_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 1, 1}; Shape out_shape{2, 1, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1, 0}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -309,19 +309,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_scalar_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 2, 3}; Shape out_shape{2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -343,19 +343,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_1d_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{0, 1, 1, 0, 0, 0, 1, 1}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -377,19 +377,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_1d_from_3d_negative) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{0, -1, -1, 0, 0, 0, 1, 1}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -403,19 +403,19 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_2d_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 1, 1}; Shape out_shape{2, 1, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1.0f, 1.1f, 1.2f, 1.3f, 2.0f, 2.1f, 2.2f, 2.3f}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1, 0}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -438,20 +438,20 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_dims1) Shape indices_shape{2, 1}; Shape out_shape{2, 4}; int batch_dims = 1; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I, batch_dims); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1, 0}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -466,22 +466,22 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_dims2) Shape indices_shape{2, 3, 3, 2}; Shape out_shape{6, 3}; int batch_dims = 2; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I, batch_dims); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1, 0, 3, 1, 2, 1, 0, 1, 1, 1, 2, 0, 3, 0, 3, 1, 2, 1, 2, 0, 1, 1, 3, 1, 1, 1, 2, 0, 2, 0, 0, 0, 3, 1, 3, 1}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); @@ -497,20 +497,20 @@ NGRAPH_TEST(${BACKEND_NAME}, gather_nd_batch_dims2_lead_dims) Shape indices_shape{2, 3, 1, 1}; Shape out_shape{6, 1}; int batch_dims = 2; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G = make_shared(P, I, batch_dims); auto f = make_shared(G, ParameterVector{P, I}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto p = backend->create_tensor(element::f32, params_shape); + auto p = backend->create_tensor(element::Type_t::f32, params_shape); copy_data(p, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}); - auto i = backend->create_tensor(element::i32, indices_shape); + auto i = backend->create_tensor(element::Type_t::i32, indices_shape); copy_data(i, vector{1, 0, 2, 0, 2, 2}); - auto result = backend->create_tensor(element::f32, out_shape); + auto result = backend->create_tensor(element::Type_t::f32, out_shape); auto c = backend->compile(f); c->call_with_validate({result}, {p, i}); diff --git a/ngraph/test/backend/gelu.in.cpp b/ngraph/test/backend/gelu.in.cpp index 5e99792b678..426f92c74ec 100644 --- a/ngraph/test/backend/gelu.in.cpp +++ b/ngraph/test/backend/gelu.in.cpp @@ -50,7 +50,7 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, gelu_f32) { Shape shape{100000}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -66,9 +66,9 @@ NGRAPH_TEST(${BACKEND_NAME}, gelu_f32) } // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, args[0]); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::transform(args[0].begin(), args[0].end(), args[0].begin(), [](float x) -> float { return 0.5f * x * (1.0f + erf(x / sqrt(2.0f))); @@ -82,16 +82,16 @@ NGRAPH_TEST(${BACKEND_NAME}, gelu_f32) NGRAPH_TEST(${BACKEND_NAME}, gelu_f64) { Shape shape{8}; - auto A = make_shared(element::f64, shape); + auto A = make_shared(element::Type_t::f64, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f64, shape); + auto a = backend->create_tensor(element::Type_t::f64, shape); vector input{-4.0, -3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0}; copy_data(a, input); - auto result = backend->create_tensor(element::f64, shape); + auto result = backend->create_tensor(element::Type_t::f64, shape); std::transform(input.begin(), input.end(), input.begin(), [](double x) -> double { return 0.5 * x * (1.0 + erf(x / sqrt(2.0))); diff --git a/ngraph/test/backend/group_convolution.in.cpp b/ngraph/test/backend/group_convolution.in.cpp index 8db4e90d6a7..762884564f6 100644 --- a/ngraph/test/backend/group_convolution.in.cpp +++ b/ngraph/test/backend/group_convolution.in.cpp @@ -38,11 +38,11 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, dyn_group_convolution_backprop_data) { Shape shape_filter{6, 1, 3, 3}; - auto filters = make_shared(element::f32, PartialShape::dynamic()); + auto filters = make_shared(element::Type_t::f32, PartialShape::dynamic()); Shape shape_delta{2, 6, 3, 3}; - auto deltas = make_shared(element::f32, PartialShape::dynamic()); + auto deltas = make_shared(element::Type_t::f32, PartialShape::dynamic()); Shape shape_data_batch{2, 3, 5, 5}; - auto data_batch = make_shared(element::f32, PartialShape::dynamic()); + auto data_batch = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto strides = Strides{1, 1}; auto dilations = Strides{1, 1}; auto padding_begin = CoordinateDiff{0, 0}; @@ -58,7 +58,7 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_group_convolution_backprop_data) auto handle = backend->compile(f); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); vector filter, delta, data, expected_result; @@ -74,11 +74,11 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_group_convolution_backprop_data) for (int i = 0; i < 2 * 3 * 5 * 5; i++) expected_result.emplace_back(i); - auto a = backend->create_tensor(element::f32, shape_data_batch); + auto a = backend->create_tensor(element::Type_t::f32, shape_data_batch); copy_data(a, data); - auto b = backend->create_tensor(element::f32, shape_filter); + auto b = backend->create_tensor(element::Type_t::f32, shape_filter); copy_data(b, filter); - auto c = backend->create_tensor(element::f32, shape_delta); + auto c = backend->create_tensor(element::Type_t::f32, shape_delta); copy_data(c, delta); handle->call_with_validate({result}, {a, b, c}); EXPECT_FALSE(test::all_close_f(vector{expected_result}, read_vector(result))); @@ -93,8 +93,8 @@ NGRAPH_TEST(${BACKEND_NAME}, v1_group_conv_backprop_data) Strides dilations{1, 1}; const op::PadType auto_pad{op::PadType::EXPLICIT}; - auto data = make_shared(element::f32, Shape{1, 1, 3, 3}); - auto filters = make_shared(element::f32, Shape{1, 1, 1, 3, 3}); + auto data = make_shared(element::Type_t::f32, Shape{1, 1, 3, 3}); + auto filters = make_shared(element::Type_t::f32, Shape{1, 1, 1, 3, 3}); auto gcbd = make_shared( data, filters, strides, pads_begin, pads_end, dilations, auto_pad, output_padding); @@ -139,9 +139,9 @@ NGRAPH_TEST(${BACKEND_NAME}, v1_group_conv_backprop_data_output_shape) Strides strides{1, 1}; Strides dilations{1, 1}; - auto data = make_shared(element::f32, Shape{1, 1, 1, 10}); - auto filters = make_shared(element::f32, Shape{1, 1, 1, 1, 5}); - auto output_shape = op::Constant::create(element::i64, Shape{2}, {1, 14}); + auto data = make_shared(element::Type_t::f32, Shape{1, 1, 1, 10}); + auto filters = make_shared(element::Type_t::f32, Shape{1, 1, 1, 1, 5}); + auto output_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 14}); auto gcbd = make_shared( data, filters, output_shape, strides, dilations, op::PadType::SAME_UPPER); diff --git a/ngraph/test/backend/hard_sigmoid.in.cpp b/ngraph/test/backend/hard_sigmoid.in.cpp index b8379c06950..08798a191de 100644 --- a/ngraph/test/backend/hard_sigmoid.in.cpp +++ b/ngraph/test/backend/hard_sigmoid.in.cpp @@ -33,10 +33,10 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, hard_sigmoid_1d) { const Shape a_shape{3}; - const auto A = make_shared(element::f32, a_shape); + const auto A = make_shared(element::Type_t::f32, a_shape); - const auto alpha = op::Constant::create(element::f32, Shape{}, {0.5f}); - const auto beta = op::Constant::create(element::f32, Shape{}, {0.6f}); + const auto alpha = op::Constant::create(element::Type_t::f32, Shape{}, {0.5f}); + const auto beta = op::Constant::create(element::Type_t::f32, Shape{}, {0.6f}); const auto R = make_shared(A, alpha, beta); const auto f = make_shared(R, ParameterVector{A}); @@ -55,10 +55,10 @@ NGRAPH_TEST(${BACKEND_NAME}, hard_sigmoid_1d) NGRAPH_TEST(${BACKEND_NAME}, hard_sigmoid_2d) { const Shape a_shape{2, 5}; - const auto A = make_shared(element::f32, a_shape); + const auto A = make_shared(element::Type_t::f32, a_shape); - const auto alpha = op::Constant::create(element::f32, Shape{}, {0.2f}); - const auto beta = op::Constant::create(element::f32, Shape{}, {0.5f}); + const auto alpha = op::Constant::create(element::Type_t::f32, Shape{}, {0.2f}); + const auto beta = op::Constant::create(element::Type_t::f32, Shape{}, {0.5f}); const auto R = make_shared(A, alpha, beta); const auto f = make_shared(R, ParameterVector{A}); diff --git a/ngraph/test/backend/interpolate.in.cpp b/ngraph/test/backend/interpolate.in.cpp index 9fcc1e1a324..6911c81bb8b 100644 --- a/ngraph/test/backend/interpolate.in.cpp +++ b/ngraph/test/backend/interpolate.in.cpp @@ -42,16 +42,17 @@ NGRAPH_TEST(${BACKEND_NAME}, interpolate_down_scales_const_linear) attrs.axes = AxisSet{0, 1, 2, 3}; attrs.mode = "linear"; attrs.align_corners = false; - const auto input = make_shared(element::f32, input_shape); - const auto output_shape_input = op::v0::Constant::create(element::i64, {4}, {1, 1, 1, 2}); + const auto input = make_shared(element::Type_t::f32, input_shape); + const auto output_shape_input = + op::v0::Constant::create(element::Type_t::i64, {4}, {1, 1, 1, 2}); std::vector intput_data{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}; auto interpolate = make_shared(input, output_shape_input, attrs); auto f = make_shared(interpolate, ParameterVector{input}); auto backend = runtime::Backend::create("IE_CPU"); - auto input_tensor = backend->create_tensor(element::f32, input_shape); - auto result_tensor = backend->create_tensor(element::f32, output_shape); + auto input_tensor = backend->create_tensor(element::Type_t::f32, input_shape); + auto result_tensor = backend->create_tensor(element::Type_t::f32, output_shape); auto handle = backend->compile(f); copy_data(input_tensor, intput_data); diff --git a/ngraph/test/backend/layer_norm.in.cpp b/ngraph/test/backend/layer_norm.in.cpp index ebb0feb4f65..9fa0c3267ff 100644 --- a/ngraph/test/backend/layer_norm.in.cpp +++ b/ngraph/test/backend/layer_norm.in.cpp @@ -48,18 +48,18 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, layer_norm_affine_stats) { - auto p_data = make_shared(element::f32, Shape{2, 4}); - auto p_scale = make_shared(element::f32, Shape{4}); - auto p_bias = make_shared(element::f32, Shape{4}); + auto p_data = make_shared(element::Type_t::f32, Shape{2, 4}); + auto p_scale = make_shared(element::Type_t::f32, Shape{4}); + auto p_bias = make_shared(element::Type_t::f32, Shape{4}); auto ln = make_shared(p_data, p_scale, p_bias); auto f = make_shared(ln->outputs(), ParameterVector{p_data, p_scale, p_bias}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create tensors for input - auto data = backend->create_tensor(element::f32, Shape{2, 4}); - auto scale = backend->create_tensor(element::f32, Shape{4}); - auto bias = backend->create_tensor(element::f32, Shape{4}); + auto data = backend->create_tensor(element::Type_t::f32, Shape{2, 4}); + auto scale = backend->create_tensor(element::Type_t::f32, Shape{4}); + auto bias = backend->create_tensor(element::Type_t::f32, Shape{4}); // Fill in input tensors vector d_input{-4.0f, -3.0f, -2.0f, -1.0f, 0.0f, 1.0f, 2.0f, 3.0f}; copy_data(data, d_input); @@ -68,9 +68,9 @@ NGRAPH_TEST(${BACKEND_NAME}, layer_norm_affine_stats) vector b_input{-4.0f, -3.0f, -2.0f, -1.0f}; copy_data(bias, b_input); // Create tensors for output - auto norm = backend->create_tensor(element::f32, Shape{2, 4}); - auto mean = backend->create_tensor(element::f32, Shape{2}); - auto var = backend->create_tensor(element::f32, Shape{2}); + auto norm = backend->create_tensor(element::Type_t::f32, Shape{2, 4}); + auto mean = backend->create_tensor(element::Type_t::f32, Shape{2}); + auto var = backend->create_tensor(element::Type_t::f32, Shape{2}); // Expected results (Manually computed) vector exp_norm{-2.658364534378051758f, diff --git a/ngraph/test/backend/log.in.cpp b/ngraph/test/backend/log.in.cpp index f3558820d39..5b45b868728 100644 --- a/ngraph/test/backend/log.in.cpp +++ b/ngraph/test/backend/log.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, log) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); std::vector a{0.125f, 0.25f, 0.5f, 1.f, 2.f, 4.f, 8.f, 16.f}; diff --git a/ngraph/test/backend/log_softmax.in.cpp b/ngraph/test/backend/log_softmax.in.cpp index 1304e815632..f9a24a83b24 100644 --- a/ngraph/test/backend/log_softmax.in.cpp +++ b/ngraph/test/backend/log_softmax.in.cpp @@ -45,13 +45,13 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, log_softmax_1d_single_value) { Shape shape{1}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{0}; @@ -64,13 +64,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_1d_single_value) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis0) { Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-10000., -10000., -10000., -10000., 0., 0., 0., 0.}; @@ -83,13 +83,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis0) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis1) { Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-3.4401896, -2.4401896, @@ -109,13 +109,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis1) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis_neg1) { Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-3.4401896, -2.4401896, @@ -135,13 +135,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis_neg1) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis_neg2) { Shape shape{2, 4}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 10000, 10001, 10002, 10003}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-10000., -10000., -10000., -10000., 0., 0., 0., 0.}; @@ -154,13 +154,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_2d_axis_neg2) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_0) { Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-12.0024818, -12.0024818, @@ -190,13 +190,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_0) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_1) { Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-3.04858735, -3.04858735, @@ -226,13 +226,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_1) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_2) { Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-2.40760596, -1.40760596, @@ -262,13 +262,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_2) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg1) { Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-2.40760596, -1.40760596, @@ -298,13 +298,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg1) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg2) { Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-3.04858735, -3.04858735, @@ -334,13 +334,13 @@ NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg2) NGRAPH_TEST(${BACKEND_NAME}, log_softmax_3d_axis_neg3) { Shape shape{3, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::vector expected_result{-12.0024818, -12.0024818, diff --git a/ngraph/test/backend/logical_and.in.cpp b/ngraph/test/backend/logical_and.in.cpp index 680f9444a70..e39d68971a2 100644 --- a/ngraph/test/backend/logical_and.in.cpp +++ b/ngraph/test/backend/logical_and.in.cpp @@ -31,8 +31,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, logical_and) { Shape shape{3, 4}; - auto A = make_shared(element::boolean, shape); - auto B = make_shared(element::boolean, shape); + auto A = make_shared(element::Type_t::boolean, shape); + auto B = make_shared(element::Type_t::boolean, shape); auto f = make_shared(std::make_shared(A, B), ParameterVector{A, B}); diff --git a/ngraph/test/backend/logical_not.in.cpp b/ngraph/test/backend/logical_not.in.cpp index c59654b0482..a33db690a3c 100644 --- a/ngraph/test/backend/logical_not.in.cpp +++ b/ngraph/test/backend/logical_not.in.cpp @@ -48,7 +48,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, not) { Shape shape{2, 2}; - auto A = make_shared(element::boolean, shape); + auto A = make_shared(element::Type_t::boolean, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); std::vector a{1, 0, 1, 0}; @@ -62,7 +62,7 @@ NGRAPH_TEST(${BACKEND_NAME}, not) NGRAPH_TEST(${BACKEND_NAME}, not_i32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); std::vector a{1, 0, 2, 0}; diff --git a/ngraph/test/backend/logical_or.in.cpp b/ngraph/test/backend/logical_or.in.cpp index bfe148a8665..40e23624f8d 100644 --- a/ngraph/test/backend/logical_or.in.cpp +++ b/ngraph/test/backend/logical_or.in.cpp @@ -31,8 +31,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, logical_or) { Shape shape{2, 2, 2}; - auto A = make_shared(element::boolean, shape); - auto B = make_shared(element::boolean, shape); + auto A = make_shared(element::Type_t::boolean, shape); + auto B = make_shared(element::Type_t::boolean, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); std::vector a{1, 0, 1, 1, 1, 0, 1, 0}; diff --git a/ngraph/test/backend/logical_xor.in.cpp b/ngraph/test/backend/logical_xor.in.cpp index f71a3f8aa5f..c4ee11b8ec8 100644 --- a/ngraph/test/backend/logical_xor.in.cpp +++ b/ngraph/test/backend/logical_xor.in.cpp @@ -29,8 +29,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, logical_xor) { Shape shape{2, 2, 2}; - auto A = make_shared(element::boolean, shape); - auto B = make_shared(element::boolean, shape); + auto A = make_shared(element::Type_t::boolean, shape); + auto B = make_shared(element::Type_t::boolean, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); std::vector a{1, 0, 1, 1, 1, 0, 1, 0}; diff --git a/ngraph/test/backend/lrn.in.cpp b/ngraph/test/backend/lrn.in.cpp index 3c568e76d04..7aebbef155d 100644 --- a/ngraph/test/backend/lrn.in.cpp +++ b/ngraph/test/backend/lrn.in.cpp @@ -41,7 +41,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, lrn_across_channel) { Shape shape{2, 3, 2, 1}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); double alpha = 3; double beta = 0.5; double bias = 1; @@ -73,8 +73,8 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_channel) NGRAPH_TEST(${BACKEND_NAME}, lrn_across_h) { Shape shape{2, 3, 2, 1}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{1}, vector{2}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{1}, vector{2}); double alpha = 3; double beta = 0.5; double bias = 1; @@ -106,8 +106,8 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_h) NGRAPH_TEST(${BACKEND_NAME}, lrn_across_hw) { Shape shape{2, 3, 2, 1}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{2, 3}); double alpha = 3; double beta = 0.5; double bias = 1; @@ -139,8 +139,9 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_hw) NGRAPH_TEST(${BACKEND_NAME}, lrn_across_all_dims) { Shape shape{2, 3, 2, 1}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{4}, vector{0, 1, 2, 3}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = + make_shared(element::Type_t::i64, Shape{4}, vector{0, 1, 2, 3}); double alpha = 3; double beta = 0.5; double bias = 1; @@ -172,8 +173,8 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_all_dims) NGRAPH_TEST(${BACKEND_NAME}, lrn_across_nw) { Shape shape{2, 3, 2, 1}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{2}, vector{0, 3}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{0, 3}); double alpha = 3; double beta = 0.5; double bias = 1; @@ -205,8 +206,8 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_nw) NGRAPH_TEST(${BACKEND_NAME}, lrn_across_empty) { Shape shape{2, 3, 2, 1}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{0}, vector{}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{0}, vector{}); double alpha = 3; double beta = 0.5; double bias = 1; @@ -238,8 +239,8 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_across_empty) NGRAPH_TEST(${BACKEND_NAME}, lrn_6D_across_2_axes) { Shape shape{2, 3, 2, 2, 1, 1}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{2, 3}); double alpha = 3; double beta = 0.5; double bias = 1; @@ -264,8 +265,8 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_6D_across_2_axes) NGRAPH_TEST(${BACKEND_NAME}, lrn_2d_across_empty) { Shape shape{12}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{0}, vector{}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{0}, vector{}); double alpha = 3; double beta = 0.5; double bias = 1; @@ -296,8 +297,8 @@ NGRAPH_TEST(${BACKEND_NAME}, lrn_2d_across_empty) NGRAPH_TEST(${BACKEND_NAME}, lrn_2d_across_outermost_axis) { Shape shape{6, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{1}, vector{0}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{1}, vector{0}); double alpha = 0.0002; double beta = 0.5; double bias = 2.0; diff --git a/ngraph/test/backend/matmul.in.cpp b/ngraph/test/backend/matmul.in.cpp index a134de115e8..827208e3aa3 100644 --- a/ngraph/test/backend/matmul.in.cpp +++ b/ngraph/test/backend/matmul.in.cpp @@ -46,18 +46,18 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2x0_0x2) Shape shape_b{0, 2}; Shape shape_r{2, 2}; - auto A = make_shared(element::f32, shape_a); - auto B = make_shared(element::f32, shape_b); + auto A = make_shared(element::Type_t::f32, shape_a); + auto B = make_shared(element::Type_t::f32, shape_b); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -72,20 +72,20 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_0x2_2x0) { Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{2, 0}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_r{0, 0}; auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -96,20 +96,20 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_3x2_2x0) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{2, 0}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_r{3, 0}; auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto b = backend->create_tensor(element::f32, shape_b); + auto b = backend->create_tensor(element::Type_t::f32, shape_b); copy_data(b, vector{}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -119,19 +119,19 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_3x2_2x0) NGRAPH_TEST(${BACKEND_NAME}, matmul_2x2_2x2) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); Shape shape_r{2, 2}; auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{5, 6, 7, 8}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -143,17 +143,17 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2x3_3x3) Shape shape_in1{2, 3}; Shape shape_in2{3, 3}; Shape shape_out{2, 3}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, false, false); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape_in1); - shared_ptr b = backend->create_tensor(element::f32, shape_in2); - shared_ptr result = backend->create_tensor(element::f32, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape_out); copy_data(a, vector{1.f, 2.f, 3.f, 4.f, 5.f, 6.f}); copy_data(b, vector{1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f}); @@ -170,17 +170,17 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2x3_3x3_int64) Shape shape_in1{2, 3}; Shape shape_in2{3, 3}; Shape shape_out{2, 3}; - auto A = make_shared(element::i64, shape_in1); - auto B = make_shared(element::i64, shape_in2); + auto A = make_shared(element::Type_t::i64, shape_in1); + auto B = make_shared(element::Type_t::i64, shape_in2); auto matmul = make_shared(A, B, false, false); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::i64, shape_in1); - shared_ptr b = backend->create_tensor(element::i64, shape_in2); - shared_ptr result = backend->create_tensor(element::i64, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::i64, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::i64, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::i64, shape_out); copy_data(a, vector{1, 2, 3, 4, 5, 6}); copy_data(b, vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); @@ -197,17 +197,17 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_3x2_3x3_transpose) Shape shape_in1{3, 2}; Shape shape_in2{3, 3}; Shape shape_out{2, 3}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, true, false); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape_in1); - shared_ptr b = backend->create_tensor(element::f32, shape_in2); - shared_ptr result = backend->create_tensor(element::f32, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape_out); copy_data(a, vector{1.f, 4.f, 2.f, 5.f, 3.f, 6.f}); copy_data(b, vector{1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f}); @@ -224,17 +224,17 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_3x2_2x3_transpose) Shape shape_in1{3, 2}; Shape shape_in2{2, 3}; Shape shape_out{2, 2}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, true, true); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape_in1); - shared_ptr b = backend->create_tensor(element::f32, shape_in2); - shared_ptr result = backend->create_tensor(element::f32, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape_out); copy_data(a, vector{1.f, 4.f, 2.f, 5.f, 3.f, 6.f}); copy_data(b, vector{1.f, 3.f, 5.f, 2.f, 4.f, 6.f}); @@ -251,17 +251,17 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2x3x2_3x3_transpose) Shape shape_in1{2, 3, 2}; Shape shape_in2{3, 3}; Shape shape_out{2, 2, 3}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, true, false); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape_in1); - shared_ptr b = backend->create_tensor(element::f32, shape_in2); - shared_ptr result = backend->create_tensor(element::f32, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape_out); copy_data(a, vector{1.f, 4.f, 2.f, 5.f, 3.f, 6.f, 3.f, 2.f, 1.f, 4.f, 5.f, 6.f}); copy_data(b, vector{1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f}); @@ -279,16 +279,16 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2_2) Shape shape_in1{2}; Shape shape_in2{2}; Shape shape_out{}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, false, false); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - shared_ptr a = backend->create_tensor(element::f32, shape_in1); - shared_ptr b = backend->create_tensor(element::f32, shape_in2); - shared_ptr result = backend->create_tensor(element::f32, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape_out); copy_data(a, vector{1.f, 2.f}); copy_data(b, vector{1.f, 2.f}); @@ -304,16 +304,16 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2x2x3_2x1x3_transpose) Shape shape_in1{2, 2, 3}; Shape shape_in2{2, 1, 3}; Shape shape_out{2, 2, 1}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, false, true); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - shared_ptr a = backend->create_tensor(element::f32, shape_in1); - shared_ptr b = backend->create_tensor(element::f32, shape_in2); - shared_ptr result = backend->create_tensor(element::f32, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape_out); vector in1_vec(shape_size(shape_in1)); vector in2_vec(shape_size(shape_in2)); @@ -336,16 +336,16 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2x2x3_2x1x3_transpose_int64) Shape shape_in1{2, 2, 3}; Shape shape_in2{2, 1, 3}; Shape shape_out{2, 2, 1}; - auto A = make_shared(element::i64, shape_in1); - auto B = make_shared(element::i64, shape_in2); + auto A = make_shared(element::Type_t::i64, shape_in1); + auto B = make_shared(element::Type_t::i64, shape_in2); auto matmul = make_shared(A, B, false, true); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - shared_ptr a = backend->create_tensor(element::i64, shape_in1); - shared_ptr b = backend->create_tensor(element::i64, shape_in2); - shared_ptr result = backend->create_tensor(element::i64, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::i64, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::i64, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::i64, shape_out); vector in1_vec(shape_size(shape_in1)); vector in2_vec(shape_size(shape_in2)); @@ -367,16 +367,16 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_2x2x3_2x3x1_int64) Shape shape_in1{2, 2, 3}; Shape shape_in2{2, 3, 1}; Shape shape_out{2, 2, 1}; - auto A = make_shared(element::i64, shape_in1); - auto B = make_shared(element::i64, shape_in2); + auto A = make_shared(element::Type_t::i64, shape_in1); + auto B = make_shared(element::Type_t::i64, shape_in2); auto matmul = make_shared(A, B, false, false); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - shared_ptr a = backend->create_tensor(element::i64, shape_in1); - shared_ptr b = backend->create_tensor(element::i64, shape_in2); - shared_ptr result = backend->create_tensor(element::i64, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::i64, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::i64, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::i64, shape_out); vector in1_vec(shape_size(shape_in1)); vector in2_vec(shape_size(shape_in2)); @@ -398,17 +398,17 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_1x2x3_1x4x3x2) Shape shape_in1{1, 2, 3}; Shape shape_in2{1, 4, 3, 2}; Shape shape_out{1, 4, 2, 2}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, false, false); auto f = make_shared(matmul, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape_in1); - shared_ptr b = backend->create_tensor(element::f32, shape_in2); - shared_ptr result = backend->create_tensor(element::f32, shape_out); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape_in1); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape_in2); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape_out); vector in1_vec(shape_size(shape_in1)); vector in2_vec(shape_size(shape_in2)); @@ -455,8 +455,8 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_1_3_x_3_false_false_param) std::vector inputs_b{1, 2, 3}; std::vector expected_result{14.}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, transpose_a, transpose_b); auto f = make_shared(matmul, ParameterVector{A, B}); @@ -481,8 +481,8 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_3_1_x_3_true_false_param) std::vector inputs_b{1, 2, 3}; std::vector expected_result{14.}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, transpose_a, transpose_b); auto f = make_shared(matmul, ParameterVector{A, B}); @@ -508,8 +508,8 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_3_x_3_1_false_false_param) std::vector inputs_b{1, 2, 3}; std::vector expected_result{14.}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, transpose_a, transpose_b); auto f = make_shared(matmul, ParameterVector{A, B}); @@ -534,8 +534,8 @@ NGRAPH_TEST(${BACKEND_NAME}, matmul_3_x_1_3_false_true_param) std::vector inputs_b{1, 2, 3}; std::vector expected_result{14.}; - auto A = make_shared(element::f32, shape_in1); - auto B = make_shared(element::f32, shape_in2); + auto A = make_shared(element::Type_t::f32, shape_in1); + auto B = make_shared(element::Type_t::f32, shape_in2); auto matmul = make_shared(A, B, transpose_a, transpose_b); auto f = make_shared(matmul, ParameterVector{A, B}); diff --git a/ngraph/test/backend/maximum.in.cpp b/ngraph/test/backend/maximum.in.cpp index e24a1b6320e..fb668b3664e 100644 --- a/ngraph/test/backend/maximum.in.cpp +++ b/ngraph/test/backend/maximum.in.cpp @@ -51,18 +51,18 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, maximum) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 8, -8, 17, -0.5, 0.5, 2, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 4, 8, 0, 0, 1, 1.5}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -73,18 +73,18 @@ NGRAPH_TEST(${BACKEND_NAME}, maximum) NGRAPH_TEST(${BACKEND_NAME}, maximum_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{0x40000140, 0x40000001, -8, 17}); - auto b = backend->create_tensor(element::i32, shape); + auto b = backend->create_tensor(element::Type_t::i32, shape); copy_data(b, vector{0x40000170, 0x40000000, 4, 8}); - auto result = backend->create_tensor(element::i32, shape); + auto result = backend->create_tensor(element::Type_t::i32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -94,18 +94,18 @@ NGRAPH_TEST(${BACKEND_NAME}, maximum_int32) NGRAPH_TEST(${BACKEND_NAME}, maximum_int64) { Shape shape{2, 2, 2}; - auto A = make_shared(element::i64, shape); - auto B = make_shared(element::i64, shape); + auto A = make_shared(element::Type_t::i64, shape); + auto B = make_shared(element::Type_t::i64, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i64, shape); + auto a = backend->create_tensor(element::Type_t::i64, shape); copy_data(a, vector{1, 8, -8, 17, -5, 67635216, 2, 17179887632}); - auto b = backend->create_tensor(element::i64, shape); + auto b = backend->create_tensor(element::Type_t::i64, shape); copy_data(b, vector{1, 2, 4, 8, 0, 18448, 1, 280592}); - auto result = backend->create_tensor(element::i64, shape); + auto result = backend->create_tensor(element::Type_t::i64, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); diff --git a/ngraph/test/backend/minimum.in.cpp b/ngraph/test/backend/minimum.in.cpp index fcd18dc6b57..cb48daaf8b5 100644 --- a/ngraph/test/backend/minimum.in.cpp +++ b/ngraph/test/backend/minimum.in.cpp @@ -48,8 +48,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, minimum) { Shape shape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); std::vector a{1, 8, -8, 17, -0.5, 0.5, 2, 1}; @@ -64,8 +64,8 @@ NGRAPH_TEST(${BACKEND_NAME}, minimum) NGRAPH_TEST(${BACKEND_NAME}, minimum_int32) { Shape shape{2, 2, 2}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); std::vector a{1, 8, -8, 17, -5, 67635216, 2, 1}; @@ -80,8 +80,8 @@ NGRAPH_TEST(${BACKEND_NAME}, minimum_int32) NGRAPH_TEST(${BACKEND_NAME}, minimum_int64) { Shape shape{2, 2, 2}; - auto A = make_shared(element::i64, shape); - auto B = make_shared(element::i64, shape); + auto A = make_shared(element::Type_t::i64, shape); + auto B = make_shared(element::Type_t::i64, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); std::vector a{1, 8, -8, 17, -5, 67635216, 2, 17179887632}; diff --git a/ngraph/test/backend/multiple_backends.in.cpp b/ngraph/test/backend/multiple_backends.in.cpp index e97d7560f26..515ba2cf217 100644 --- a/ngraph/test/backend/multiple_backends.in.cpp +++ b/ngraph/test/backend/multiple_backends.in.cpp @@ -35,12 +35,12 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, multiple_backends) { Shape shape{2, 2}; - auto A1 = make_shared(element::f32, shape); - auto B1 = make_shared(element::f32, shape); + auto A1 = make_shared(element::Type_t::f32, shape); + auto B1 = make_shared(element::Type_t::f32, shape); auto f = make_shared(A1 + B1, ParameterVector{A1, B1}); - auto A2 = make_shared(element::f32, shape); - auto B2 = make_shared(element::f32, shape); + auto A2 = make_shared(element::Type_t::f32, shape); + auto B2 = make_shared(element::Type_t::f32, shape); auto g = make_shared(A2 * B2, ParameterVector{A2, B2}); auto backend1 = runtime::Backend::create("${BACKEND_NAME}"); @@ -48,13 +48,13 @@ NGRAPH_TEST(${BACKEND_NAME}, multiple_backends) auto backend2 = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a1 = backend1->create_tensor(element::f32, shape); - shared_ptr b1 = backend1->create_tensor(element::f32, shape); - shared_ptr result1 = backend1->create_tensor(element::f32, shape); + shared_ptr a1 = backend1->create_tensor(element::Type_t::f32, shape); + shared_ptr b1 = backend1->create_tensor(element::Type_t::f32, shape); + shared_ptr result1 = backend1->create_tensor(element::Type_t::f32, shape); - shared_ptr a2 = backend2->create_tensor(element::f32, shape); - shared_ptr b2 = backend2->create_tensor(element::f32, shape); - shared_ptr result2 = backend2->create_tensor(element::f32, shape); + shared_ptr a2 = backend2->create_tensor(element::Type_t::f32, shape); + shared_ptr b2 = backend2->create_tensor(element::Type_t::f32, shape); + shared_ptr result2 = backend2->create_tensor(element::Type_t::f32, shape); copy_data(a1, test::NDArray({{1, 2}, {3, 4}}).get_vector()); copy_data(b1, test::NDArray({{5, 6}, {7, 8}}).get_vector()); diff --git a/ngraph/test/backend/multiple_result.in.cpp b/ngraph/test/backend/multiple_result.in.cpp index f9128a5bf93..57361900135 100644 --- a/ngraph/test/backend/multiple_result.in.cpp +++ b/ngraph/test/backend/multiple_result.in.cpp @@ -34,9 +34,9 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, multiple_result) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto A_add_B = make_shared(A, B); auto A_add_B_mul_C = make_shared(A_add_B, C); @@ -44,15 +44,15 @@ NGRAPH_TEST(${BACKEND_NAME}, multiple_result) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{5, 6, 7, 8}); - auto c = backend->create_tensor(element::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); copy_data(c, vector{9, 10, 11, 12}); - auto r0 = backend->create_tensor(element::f32, shape); - auto r1 = backend->create_tensor(element::f32, shape); + auto r0 = backend->create_tensor(element::Type_t::f32, shape); + auto r1 = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({r0, r1}, {a, b, c}); diff --git a/ngraph/test/backend/multiply.in.cpp b/ngraph/test/backend/multiply.in.cpp index 75bd0954805..bea292e9d0e 100644 --- a/ngraph/test/backend/multiply.in.cpp +++ b/ngraph/test/backend/multiply.in.cpp @@ -48,8 +48,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, multiply) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); std::vector a{1, 2, 3, 4}; @@ -64,8 +64,8 @@ NGRAPH_TEST(${BACKEND_NAME}, multiply) NGRAPH_TEST(${BACKEND_NAME}, multiply_overload) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(A * B, ParameterVector{A, B}); std::vector a{1, 2, 3, 4}; diff --git a/ngraph/test/backend/negative.in.cpp b/ngraph/test/backend/negative.in.cpp index 791461caacf..d3b45010644 100644 --- a/ngraph/test/backend/negative.in.cpp +++ b/ngraph/test/backend/negative.in.cpp @@ -46,7 +46,7 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, negative) { Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); std::vector a{1, -2, 0, -4.75f, 8.75f, -8.75f}; @@ -60,7 +60,7 @@ NGRAPH_TEST(${BACKEND_NAME}, negative) NGRAPH_TEST(${BACKEND_NAME}, negative_i32) { auto shape_a = Shape{2, 5}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); auto relu = make_shared(A); auto shape_rt = Shape{2, 5}; auto f = make_shared(relu, ParameterVector{A}); @@ -76,7 +76,7 @@ NGRAPH_TEST(${BACKEND_NAME}, negative_i32) NGRAPH_TEST(${BACKEND_NAME}, negative_f32) { auto shape_a = Shape{2, 5}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); auto relu = make_shared(A); auto shape_rt = Shape{2, 5}; auto f = make_shared(relu, ParameterVector{A}); diff --git a/ngraph/test/backend/node_name.in.cpp b/ngraph/test/backend/node_name.in.cpp index 9424d6f7363..2e30c0b0a39 100644 --- a/ngraph/test/backend/node_name.in.cpp +++ b/ngraph/test/backend/node_name.in.cpp @@ -33,8 +33,8 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, node_name) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto C = A + B; C->set_friendly_name("a node name"); auto f = make_shared(C, ParameterVector{A, B}); @@ -42,9 +42,9 @@ NGRAPH_TEST(${BACKEND_NAME}, node_name) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape); - shared_ptr b = backend->create_tensor(element::f32, shape); - shared_ptr result = backend->create_tensor(element::f32, shape); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape); + shared_ptr b = backend->create_tensor(element::Type_t::f32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{1, 2}, {3, 4}}).get_vector()); copy_data(b, test::NDArray({{5, 6}, {7, 8}}).get_vector()); diff --git a/ngraph/test/backend/non_max_suppression.in.cpp b/ngraph/test/backend/non_max_suppression.in.cpp index e258d272e41..cd7220e911a 100644 --- a/ngraph/test/backend/non_max_suppression.in.cpp +++ b/ngraph/test/backend/non_max_suppression.in.cpp @@ -57,14 +57,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_center_point_box_format) const auto boxes_shape = Shape{1, 6, 4}; const auto scores_shape = Shape{1, 1, 6}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -78,12 +79,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_center_point_box_format) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{3, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{3, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{3, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{3, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -120,14 +121,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_flipped_coordinates) const auto boxes_shape = Shape{1, 6, 4}; const auto scores_shape = Shape{1, 1, 6}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -141,12 +143,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_flipped_coordinates) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{3, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{3, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{3, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{3, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -184,14 +186,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_identical_boxes) const auto boxes_shape = Shape{1, 10, 4}; const auto scores_shape = Shape{1, 1, 10}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -205,12 +208,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_identical_boxes) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{1, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{1, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{1, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{1, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -247,14 +250,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_limit_output_size) const auto boxes_shape = Shape{1, 6, 4}; const auto scores_shape = Shape{1, 1, 6}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -268,12 +272,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_limit_output_size) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{2, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{2, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{2, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -308,14 +312,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_single_box) const auto boxes_shape = Shape{1, 1, 4}; const auto scores_shape = Shape{1, 1, 1}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -329,12 +334,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_single_box) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{1, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{1, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{1, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{1, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -371,14 +376,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_suppress_by_IOU) const auto boxes_shape = Shape{1, 6, 4}; const auto scores_shape = Shape{1, 1, 6}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -392,12 +398,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_suppress_by_IOU) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{3, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{3, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{3, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{3, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -434,14 +440,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_suppress_by_IOU_and_scores) const auto boxes_shape = Shape{1, 6, 4}; const auto scores_shape = Shape{1, 1, 6}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -455,12 +462,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_suppress_by_IOU_and_scores) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{2, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{2, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{2, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -499,14 +506,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_two_batches) const auto boxes_shape = Shape{2, 6, 4}; const auto scores_shape = Shape{2, 1, 6}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -520,12 +528,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_two_batches) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{4, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{4, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{4, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{4, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); @@ -564,14 +572,15 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_two_classes) const auto boxes_shape = Shape{1, 6, 4}; const auto scores_shape = Shape{1, 2, 6}; - const auto boxes = make_shared(element::f32, boxes_shape); - const auto scores = make_shared(element::f32, scores_shape); - auto max_output_boxes_per_class = - op::Constant::create(element::i64, Shape{}, {max_output_boxes_per_class_data}); - auto iou_threshold = op::Constant::create(element::f32, Shape{}, {iou_threshold_data}); + const auto boxes = make_shared(element::Type_t::f32, boxes_shape); + const auto scores = make_shared(element::Type_t::f32, scores_shape); + auto max_output_boxes_per_class = op::Constant::create( + element::Type_t::i64, Shape{}, {max_output_boxes_per_class_data}); + auto iou_threshold = + op::Constant::create(element::Type_t::f32, Shape{}, {iou_threshold_data}); auto score_threshold = - op::Constant::create(element::f32, Shape{}, {score_threshold_data}); - auto soft_nms_sigma = op::Constant::create(element::f32, Shape{}, {0.0f}); + op::Constant::create(element::Type_t::f32, Shape{}, {score_threshold_data}); + auto soft_nms_sigma = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); auto nms = make_shared(boxes, scores, max_output_boxes_per_class, @@ -585,12 +594,12 @@ NGRAPH_TEST(${BACKEND_NAME}, nonmaxsuppression_two_classes) auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto selected_indeces = backend->create_tensor(element::i64, Shape{4, 3}); - auto selected_scores = backend->create_tensor(element::f32, Shape{4, 3}); - auto valid_outputs = backend->create_tensor(element::i64, Shape{1}); + auto selected_indeces = backend->create_tensor(element::Type_t::i64, Shape{4, 3}); + auto selected_scores = backend->create_tensor(element::Type_t::f32, Shape{4, 3}); + auto valid_outputs = backend->create_tensor(element::Type_t::i64, Shape{1}); - auto backend_boxes = backend->create_tensor(element::f32, boxes_shape); - auto backend_scores = backend->create_tensor(element::f32, scores_shape); + auto backend_boxes = backend->create_tensor(element::Type_t::f32, boxes_shape); + auto backend_scores = backend->create_tensor(element::Type_t::f32, scores_shape); copy_data(backend_boxes, boxes_data); copy_data(backend_scores, scores_data); diff --git a/ngraph/test/backend/non_zero.in.cpp b/ngraph/test/backend/non_zero.in.cpp index 774513f6196..f74c0e8dae1 100644 --- a/ngraph/test/backend/non_zero.in.cpp +++ b/ngraph/test/backend/non_zero.in.cpp @@ -29,14 +29,14 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, non_zero) { PartialShape p_shape = PartialShape::dynamic(); - auto p = make_shared(element::f32, p_shape); - auto non_zero = make_shared(p, element::i32); + auto p = make_shared(element::Type_t::f32, p_shape); + auto non_zero = make_shared(p, element::Type_t::i32); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); auto cfun = backend->compile(fun); - auto input = backend->create_tensor(element::f32, Shape{3, 2}); + auto input = backend->create_tensor(element::Type_t::f32, Shape{3, 2}); copy_data(input, vector{0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 3.0f}); std::vector expected_result{2, 2, 0, 1}; @@ -45,7 +45,7 @@ NGRAPH_TEST(${BACKEND_NAME}, non_zero) auto result = make_shared(); cfun->call_with_validate({result}, {input}); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); EXPECT_EQ(result->get_shape(), expected_output_shape); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result); @@ -54,8 +54,8 @@ NGRAPH_TEST(${BACKEND_NAME}, non_zero) NGRAPH_TEST(${BACKEND_NAME}, non_zero_all_1s) { PartialShape p_shape = PartialShape::dynamic(); - auto p = make_shared(element::i32, p_shape); - auto non_zero = make_shared(p, element::i64); + auto p = make_shared(element::Type_t::i32, p_shape); + auto non_zero = make_shared(p, element::Type_t::i64); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -63,7 +63,7 @@ NGRAPH_TEST(${BACKEND_NAME}, non_zero_all_1s) Shape input_shape{3, 2}; vector input_data(shape_size(input_shape), 1); - auto input = backend->create_tensor(element::i32, input_shape); + auto input = backend->create_tensor(element::Type_t::i32, input_shape); copy_data(input, input_data); std::vector expected_result{0, 0, 1, 1, 2, 2, 0, 1, 0, 1, 0, 1}; @@ -72,7 +72,7 @@ NGRAPH_TEST(${BACKEND_NAME}, non_zero_all_1s) auto result = make_shared(); cfun->call_with_validate({result}, {input}); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_output_shape); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result); @@ -81,8 +81,8 @@ NGRAPH_TEST(${BACKEND_NAME}, non_zero_all_1s) NGRAPH_TEST(${BACKEND_NAME}, non_zero_all_0s) { PartialShape p_shape = PartialShape::dynamic(); - auto p = make_shared(element::i32, p_shape); - auto non_zero = make_shared(p, element::i64); + auto p = make_shared(element::Type_t::i32, p_shape); + auto non_zero = make_shared(p, element::Type_t::i64); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); @@ -90,7 +90,7 @@ NGRAPH_TEST(${BACKEND_NAME}, non_zero_all_0s) Shape input_shape{3, 2}; vector input_data(shape_size(input_shape), 0); - auto input = backend->create_tensor(element::i32, input_shape); + auto input = backend->create_tensor(element::Type_t::i32, input_shape); copy_data(input, input_data); Shape expected_output_shape{input_shape.size(), 0}; @@ -98,7 +98,7 @@ NGRAPH_TEST(${BACKEND_NAME}, non_zero_all_0s) auto result = make_shared(); cfun->call_with_validate({result}, {input}); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_output_shape); auto result_data = read_vector(result); ASSERT_EQ(result_data.data(), nullptr); diff --git a/ngraph/test/backend/normalize_l2.in.cpp b/ngraph/test/backend/normalize_l2.in.cpp index 77e0415e632..4d15baf8e96 100644 --- a/ngraph/test/backend/normalize_l2.in.cpp +++ b/ngraph/test/backend/normalize_l2.in.cpp @@ -41,8 +41,8 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_add) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{0, 1}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::ADD), @@ -51,9 +51,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_add) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -64,8 +64,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_add) NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_add) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{0}, vector{}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{0}, vector{}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::ADD), @@ -74,9 +74,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_add) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -87,8 +87,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_add) NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_add) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{}, vector{0}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{}, vector{0}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::ADD), @@ -97,9 +97,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_add) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -110,8 +110,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_add) NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_add) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{}, vector{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{}, vector{1}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::ADD), @@ -120,9 +120,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_add) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -135,8 +135,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_add) NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_max) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{0, 1}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::ADD), @@ -145,9 +145,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_max) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -158,8 +158,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_all_mode_max) NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_max) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{0}, vector{}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{0}, vector{}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::MAX), @@ -168,9 +168,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_max) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -181,8 +181,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_none_mode_max) NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_max) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{}, vector{0}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{}, vector{0}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::MAX), @@ -191,9 +191,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_max) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -204,8 +204,8 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_zero_mode_max) NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_max) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i64, Shape{}, vector{1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i64, Shape{}, vector{1}); float eps = 1e-7; auto f = make_shared( make_shared(A, axes, eps, ngraph::op::EpsMode::MAX), @@ -214,9 +214,9 @@ NGRAPH_TEST(${BACKEND_NAME}, normalize_l2_one_mode_max) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/numeric.in.cpp b/ngraph/test/backend/numeric.in.cpp index 63f444b646d..a95febf5d14 100644 --- a/ngraph/test/backend/numeric.in.cpp +++ b/ngraph/test/backend/numeric.in.cpp @@ -31,8 +31,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, numeric_float_nan) { Shape shape{5}; - auto A = op::Constant::create(element::f32, shape, {-2.5f, 25.5f, 2.25f, NAN, 6.0f}); - auto B = op::Constant::create(element::f32, shape, {10.0f, 5.0f, 2.25f, 10.0f, NAN}); + auto A = op::Constant::create(element::Type_t::f32, shape, {-2.5f, 25.5f, 2.25f, NAN, 6.0f}); + auto B = op::Constant::create(element::Type_t::f32, shape, {10.0f, 5.0f, 2.25f, 10.0f, NAN}); auto f = make_shared(make_shared(A, B), ParameterVector{}); auto test_case = test::TestCase(f); @@ -43,8 +43,8 @@ NGRAPH_TEST(${BACKEND_NAME}, numeric_float_nan) NGRAPH_TEST(${BACKEND_NAME}, numeric_double_nan) { Shape shape{5}; - auto A = op::Constant::create(element::f64, shape, {-2.5f, 25.5f, 2.25f, NAN, 6.0f}); - auto B = op::Constant::create(element::f64, shape, {10.0f, 5.0f, 2.25f, 10.0f, NAN}); + auto A = op::Constant::create(element::Type_t::f64, shape, {-2.5f, 25.5f, 2.25f, NAN, 6.0f}); + auto B = op::Constant::create(element::Type_t::f64, shape, {10.0f, 5.0f, 2.25f, 10.0f, NAN}); auto f = make_shared(make_shared(A, B), ParameterVector{}); auto test_case = test::TestCase(f); @@ -55,8 +55,10 @@ NGRAPH_TEST(${BACKEND_NAME}, numeric_double_nan) NGRAPH_TEST(${BACKEND_NAME}, numeric_float_inf) { Shape shape{5}; - auto A = op::Constant::create(element::f32, shape, {-2.5f, 25.5f, 2.25f, INFINITY, 6.0f}); - auto B = op::Constant::create(element::f32, shape, {10.0f, 5.0f, 2.25f, 10.0f, -INFINITY}); + auto A = + op::Constant::create(element::Type_t::f32, shape, {-2.5f, 25.5f, 2.25f, INFINITY, 6.0f}); + auto B = + op::Constant::create(element::Type_t::f32, shape, {10.0f, 5.0f, 2.25f, 10.0f, -INFINITY}); auto f = make_shared(make_shared(A, B), ParameterVector{}); auto test_case = test::TestCase(f); @@ -67,8 +69,10 @@ NGRAPH_TEST(${BACKEND_NAME}, numeric_float_inf) NGRAPH_TEST(${BACKEND_NAME}, numeric_double_inf) { Shape shape{5}; - auto A = op::Constant::create(element::f64, shape, {-2.5f, 25.5f, 2.25f, INFINITY, 6.0f}); - auto B = op::Constant::create(element::f64, shape, {10.0f, 5.0f, 2.25f, 10.0f, -INFINITY}); + auto A = + op::Constant::create(element::Type_t::f64, shape, {-2.5f, 25.5f, 2.25f, INFINITY, 6.0f}); + auto B = + op::Constant::create(element::Type_t::f64, shape, {10.0f, 5.0f, 2.25f, 10.0f, -INFINITY}); auto f = make_shared(make_shared(A, B), ParameterVector{}); auto test_case = test::TestCase(f); diff --git a/ngraph/test/backend/one_hot.in.cpp b/ngraph/test/backend/one_hot.in.cpp index 47192df718f..93e54b6059b 100644 --- a/ngraph/test/backend/one_hot.in.cpp +++ b/ngraph/test/backend/one_hot.in.cpp @@ -36,12 +36,12 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_2_in_3) { Shape shape_a{}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); int axis = 0; Shape shape_r{3}; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -54,12 +54,12 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_2_in_3) NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_1_in_3) { Shape shape_a{}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); int axis = 0; Shape shape_r{3}; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -72,12 +72,12 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_1_in_3) NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_0_in_3) { Shape shape_a{}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{3}; int axis = 0; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -90,12 +90,12 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_scalar_0_in_3) NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_0) { Shape shape_a{8}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{3, 8}; int axis = 0; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -109,12 +109,12 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_0) NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_1) { Shape shape_a{8}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{8, 3}; int axis = 1; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -128,12 +128,12 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_1) NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_1_barely_oob) { Shape shape_a{8}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{8, 3}; int axis = 1; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -148,12 +148,12 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_1_barely_oob) NGRAPH_TEST(${BACKEND_NAME}, one_hot_matrix_0) { Shape shape_a{3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{3, 3, 3}; int axis = 0; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -169,12 +169,12 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_many_categories) // Imagenet has roughly 20,000 categories constexpr uint32_t category_count = 20000; Shape shape_a{6}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{6, category_count}; int axis = 1; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::i32, {}, {1}); - auto off_value = op::Constant::create(element::i32, {}, {0}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::i32, {}, {1}); + auto off_value = op::Constant::create(element::Type_t::i32, {}, {0}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); @@ -194,24 +194,24 @@ NGRAPH_TEST(${BACKEND_NAME}, one_hot_vector_many_categories) NGRAPH_TEST(${BACKEND_NAME}, one_hot_on_off_float) { Shape shape_a{3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_r{3, 3, 3}; int axis = 0; - auto depth = op::Constant::create(element::i32, {}, {shape_r[axis]}); - auto on_value = op::Constant::create(element::f32, {}, {2.5}); - auto off_value = op::Constant::create(element::f32, {}, {0.5}); + auto depth = op::Constant::create(element::Type_t::i32, {}, {shape_r[axis]}); + auto on_value = op::Constant::create(element::Type_t::f32, {}, {2.5}); + auto off_value = op::Constant::create(element::Type_t::f32, {}, {0.5}); auto r = make_shared(A, depth, on_value, off_value, axis); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{ 0, 1, 1, 2, 1, 0, 0, 2, 1, }); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/pad.in.cpp b/ngraph/test/backend/pad.in.cpp index 7ffbd97a093..99bc1a77e35 100644 --- a/ngraph/test/backend/pad.in.cpp +++ b/ngraph/test/backend/pad.in.cpp @@ -33,11 +33,11 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_1d) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {4}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {5}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {4}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {5}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -46,9 +46,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_1d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{15}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{15}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -61,11 +61,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_1d) NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_1d) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {4}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {4}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {-2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -74,9 +74,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_1d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{8}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{8}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -88,11 +88,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_1d) NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_1d_check_limits) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {4}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-7}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {4}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {-7}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -101,9 +101,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_1d_check_limits) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{3}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{3}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -114,11 +114,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_1d_check_limits) NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE), @@ -127,9 +127,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{11}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{11}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -140,11 +140,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d) NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_top_neg) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {-3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE), @@ -153,9 +153,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_top_neg) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{5}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{5}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -166,11 +166,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_top_neg) NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_top_neg_bigger_than_tensor) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-7}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {-7}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE), @@ -179,9 +179,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_top_neg_bigger_than_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -191,11 +191,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_top_neg_bigger_than_tensor) NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_bottom_neg) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {-2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {-2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE), @@ -204,9 +204,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_bottom_neg) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{7}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{7}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -217,11 +217,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_bottom_neg) NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_bottom_neg_bigger_than_tensor) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {-7}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {-7}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE), @@ -230,9 +230,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_bottom_neg_bigger_than_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{2}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{2}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -242,11 +242,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_1d_bottom_neg_bigger_than_tensor) NGRAPH_TEST(${BACKEND_NAME}, pad_edge_2d) { const Shape data_shape{3, 4}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, 3}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE), @@ -255,9 +255,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_2d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12})); - auto result = backend->create_tensor(element::f32, Shape{6, 9}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{6, 9}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -275,11 +275,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_2d) NGRAPH_TEST(${BACKEND_NAME}, pad_edge_2d_with_neg) { const Shape data_shape{3, 4}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, -1}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {2, -1}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::EDGE), @@ -288,9 +288,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_2d_with_neg) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12})); - auto result = backend->create_tensor(element::f32, Shape{6, 5}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{6, 5}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -308,11 +308,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_edge_2d_with_neg) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -321,9 +321,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{11}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{11}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -335,11 +335,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_top_neg) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {-3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -348,9 +348,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_top_neg) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{5}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{5}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -361,11 +361,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_top_neg) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_top_neg_bigger_than_tensor) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {-7}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {-7}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -374,9 +374,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_top_neg_bigger_than_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -387,11 +387,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_top_neg_bigger_than_tensor) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_bottom_neg) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {-2}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {-2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -400,9 +400,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_bottom_neg) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{7}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{7}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -414,11 +414,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_bottom_neg) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_bottom_neg_bigger_than_tensor) { const Shape data_shape{6}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {-7}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {3}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {-7}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {3}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -427,9 +427,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_bottom_neg_bigger_than_tensor) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3, 4, 5, 6})); - auto result = backend->create_tensor(element::f32, Shape{2}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{2}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -440,11 +440,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_bottom_neg_bigger_than_tensor) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_multi_reflect) { const Shape data_shape{3}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{1}, {10}); - const auto pads_end = op::Constant::create(element::i64, Shape{1}, {9}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{1}, {10}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{1}, {9}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -453,9 +453,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_multi_reflect) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, std::vector({1, 2, 3})); - auto result = backend->create_tensor(element::f32, Shape{22}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{22}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -468,11 +468,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_1d_multi_reflect) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_2d) { const Shape data_shape{3, 4}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, 3}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -481,10 +481,10 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_2d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, test::NDArray({{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}).get_vector()); - auto result = backend->create_tensor(element::f32, Shape{6, 9}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{6, 9}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -502,11 +502,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_2d) NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_2d_with_neg) { const Shape data_shape{3, 4}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, -1}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {2, -1}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::REFLECT), @@ -515,10 +515,10 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_2d_with_neg) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, test::NDArray({{1, 2, 3, 4}, {5, 6, 7, 8}, {9, 10, 11, 12}}).get_vector()); - auto result = backend->create_tensor(element::f32, Shape{6, 5}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{6, 5}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -536,11 +536,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_reflect_2d_with_neg) NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_2d) { const Shape data_shape{2, 3}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {1, -1}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {2, 0}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {9}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {1, -1}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 0}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {9}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -549,9 +549,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_2d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, test::NDArray({{1, 2, 3}, {4, 5, 6}}).get_vector()); - auto result = backend->create_tensor(element::f32, Shape{5, 2}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{5, 2}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -564,11 +564,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_2d) NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_2d_all_negative) { const Shape data_shape{3, 3}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {-1, -1}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {-1, -1}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {9}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {-1, -1}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {-1, -1}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {9}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -577,9 +577,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_2d_all_negative) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, test::NDArray({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}).get_vector()); - auto result = backend->create_tensor(element::f32, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -591,11 +591,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_2d_all_negative) NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_0x0) { const Shape data_shape{0, 0}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, 3}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {3, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {3, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -604,8 +604,8 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_0x0) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); - auto result = backend->create_tensor(element::f32, Shape{5, 5}); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); + auto result = backend->create_tensor(element::Type_t::f32, Shape{5, 5}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -622,11 +622,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_0x0) NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_0x3) { const Shape data_shape{0, 3}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {2, 1}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {3, 1}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 1}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {3, 1}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -635,8 +635,8 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_0x3) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); - auto result = backend->create_tensor(element::f32, Shape{5, 5}); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); + auto result = backend->create_tensor(element::Type_t::f32, Shape{5, 5}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -653,11 +653,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_0x3) NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_3x0) { const Shape data_shape{3, 0}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {1, 3}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 3}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -666,8 +666,8 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_3x0) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); - auto result = backend->create_tensor(element::f32, Shape{5, 5}); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); + auto result = backend->create_tensor(element::Type_t::f32, Shape{5, 5}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -684,11 +684,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_2d_3x0) NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_4d_1x2x2x2) { const Shape data_shape{1, 2, 2, 2}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{4}, {0, 0, 1, 1}); - const auto pads_end = op::Constant::create(element::i64, Shape{4}, {0, 0, 1, 1}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {42}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{4}, {0, 0, 1, 1}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{4}, {0, 0, 1, 1}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {42}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -697,7 +697,7 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_4d_1x2x2x2) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); // clang-format off copy_data(a, test::NDArray( { @@ -713,7 +713,7 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_4d_1x2x2x2) } }).get_vector()); // clang-format on - auto result = backend->create_tensor(element::f32, Shape{1, 2, 4, 4}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 2, 4, 4}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -742,11 +742,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_exterior_4d_1x2x2x2) NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_4d) { const Shape data_shape{1, 3, 2, 2}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{4}, {0, -1, 1, 1}); - const auto pads_end = op::Constant::create(element::i64, Shape{4}, {0, -1, 1, 1}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {42}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{4}, {0, -1, 1, 1}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{4}, {0, -1, 1, 1}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {42}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -755,7 +755,7 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_4d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); // clang-format off copy_data(a, test::NDArray( { @@ -776,7 +776,7 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_negative_exterior_4d) }).get_vector()); // clang-format on - auto result = backend->create_tensor(element::f32, Shape{1, 1, 4, 4}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 1, 4, 4}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -803,11 +803,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_2channel_2image_asym) { const Shape data_shape{2, 2, 4, 4}; const auto window_movement_strides = Strides{2, 2}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{4}, {0, 0, 0, 0}); - const auto pads_end = op::Constant::create(element::i64, Shape{4}, {0, 0, 2, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {42}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{4}, {0, 0, 0, 0}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{4}, {0, 0, 2, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {42}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::CONSTANT), @@ -816,7 +816,7 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_2channel_2image_asym) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, test::NDArray({{{{0, 1, 0, 2}, // img 0 chan 0 {0, 3, 2, 0}, @@ -839,7 +839,7 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_2channel_2image_asym) {1, 0, 0, 0}}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, Shape{2, 2, 6, 6}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{2, 2, 6, 6}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -879,11 +879,11 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_2channel_2image_asym) NGRAPH_TEST(${BACKEND_NAME}, pad_symmetric) { const Shape data_shape{2, 3}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); - const auto pads_begin = op::Constant::create(element::i64, Shape{2}, {1, 2}); - const auto pads_end = op::Constant::create(element::i64, Shape{2}, {1, 2}); - const auto pad_val = op::Constant::create(element::f32, Shape{}, {2112}); + const auto pads_begin = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); + const auto pads_end = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); + const auto pad_val = op::Constant::create(element::Type_t::f32, Shape{}, {2112}); auto f = make_shared( make_shared(data, pads_begin, pads_end, pad_val, op::PadMode::SYMMETRIC), @@ -892,9 +892,9 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_symmetric) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, data_shape); + auto a = backend->create_tensor(element::Type_t::f32, data_shape); copy_data(a, test::NDArray({{1, 2, 3}, {4, 5, 6}}).get_vector()); - auto result = backend->create_tensor(element::f32, Shape{4, 7}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{4, 7}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/parameter_as_output.in.cpp b/ngraph/test/backend/parameter_as_output.in.cpp index b2b84e0e875..898c24691ff 100644 --- a/ngraph/test/backend/parameter_as_output.in.cpp +++ b/ngraph/test/backend/parameter_as_output.in.cpp @@ -31,14 +31,14 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, parameter_as_output) { Shape shape{3, 4}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(A, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::f32, shape); - shared_ptr result = backend->create_tensor(element::f32, shape); + shared_ptr a = backend->create_tensor(element::Type_t::f32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::f32, shape); vector expected{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; vector zero(shape_size(shape), 0); diff --git a/ngraph/test/backend/partial_slice.in.cpp b/ngraph/test/backend/partial_slice.in.cpp index 61a322f9b31..4416bb21631 100644 --- a/ngraph/test/backend/partial_slice.in.cpp +++ b/ngraph/test/backend/partial_slice.in.cpp @@ -49,7 +49,7 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, partial_slice_static) { Shape shape_x{2, 3, 2}; - auto x = make_shared(element::f32, shape_x); + auto x = make_shared(element::Type_t::f32, shape_x); AxisVector axes{0, 1}; vector lower_bounds{1, 0}; vector upper_bounds{2, 2}; @@ -61,10 +61,10 @@ NGRAPH_TEST(${BACKEND_NAME}, partial_slice_static) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto t_x = backend->create_tensor(element::f32, shape_x); + auto t_x = backend->create_tensor(element::Type_t::f32, shape_x); vector v_x{0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f}; copy_data(t_x, v_x); - auto t_r = backend->create_tensor(element::f32, Shape{1, 2, 2}); + auto t_r = backend->create_tensor(element::Type_t::f32, Shape{1, 2, 2}); auto handle = backend->compile(f); handle->call_with_validate({t_r}, {t_x}); @@ -76,7 +76,7 @@ NGRAPH_TEST(${BACKEND_NAME}, partial_slice_static) NGRAPH_TEST(${BACKEND_NAME}, partial_slice_partial_shape) { auto pshape_x = PartialShape{Dimension::dynamic(), 3, Dimension::dynamic()}; - auto x = make_shared(element::f32, pshape_x); + auto x = make_shared(element::Type_t::f32, pshape_x); AxisVector axes{0, 1}; vector lower_bounds{1, 0}; vector upper_bounds{2, 2}; @@ -89,10 +89,10 @@ NGRAPH_TEST(${BACKEND_NAME}, partial_slice_partial_shape) // Create some tensors for input/output Shape shape_x{2, 3, 2}; - auto t_x = backend->create_tensor(element::f32, shape_x); + auto t_x = backend->create_tensor(element::Type_t::f32, shape_x); vector v_x{0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f}; copy_data(t_x, v_x); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({t_r}, {t_x}); @@ -104,7 +104,7 @@ NGRAPH_TEST(${BACKEND_NAME}, partial_slice_partial_shape) NGRAPH_TEST(${BACKEND_NAME}, partial_slice_unkown_rank) { auto pshape_x = PartialShape::dynamic(); - auto x = make_shared(element::f32, pshape_x); + auto x = make_shared(element::Type_t::f32, pshape_x); AxisVector axes{0, 1}; vector lower_bounds{1, 0}; vector upper_bounds{2, 2}; @@ -117,10 +117,10 @@ NGRAPH_TEST(${BACKEND_NAME}, partial_slice_unkown_rank) // Create some tensors for input/output Shape shape_x{2, 3, 2}; - auto t_x = backend->create_tensor(element::f32, shape_x); + auto t_x = backend->create_tensor(element::Type_t::f32, shape_x); vector v_x{0.f, 1.f, 2.f, 3.f, 4.f, 5.f, 6.f, 7.f, 8.f, 9.f, 10.f, 11.f}; copy_data(t_x, v_x); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({t_r}, {t_x}); diff --git a/ngraph/test/backend/power.in.cpp b/ngraph/test/backend/power.in.cpp index 91ed81d89a6..9c0ea5bea0d 100644 --- a/ngraph/test/backend/power.in.cpp +++ b/ngraph/test/backend/power.in.cpp @@ -48,8 +48,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, power) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); std::vector a{1, 2, 3, 5}; diff --git a/ngraph/test/backend/quantize_dequantize.in.cpp b/ngraph/test/backend/quantize_dequantize.in.cpp index 0da1e807c03..98c7779cbb8 100644 --- a/ngraph/test/backend/quantize_dequantize.in.cpp +++ b/ngraph/test/backend/quantize_dequantize.in.cpp @@ -34,8 +34,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::u8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::u8; typedef float input_c_type; typedef uint8_t output_c_type; @@ -67,8 +67,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_zero_offset) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::u8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::u8; typedef float input_c_type; typedef uint8_t output_c_type; @@ -100,8 +100,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_axes) Shape scale_offset_shape{4}; AxisSet quantization_axes{0}; - auto input_type = element::f32; - auto output_type = element::u8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::u8; typedef float input_c_type; typedef uint8_t output_c_type; @@ -134,8 +134,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int8) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -168,8 +168,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int8_zero_offset) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -202,8 +202,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int32) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i32; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i32; typedef float input_c_type; typedef int32_t output_c_type; @@ -236,8 +236,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_int32_zero_offset) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i32; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i32; typedef float input_c_type; typedef int32_t output_c_type; @@ -270,8 +270,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_clamp_uint8) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::u8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::u8; typedef float input_c_type; typedef uint8_t output_c_type; @@ -302,8 +302,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_clamp_int8) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -335,8 +335,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_clamp_int32) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f64; - auto output_type = element::i32; + auto input_type = element::Type_t::f64; + auto output_type = element::Type_t::i32; // TODO: fails with input due to 32 bits typedef double input_c_type; @@ -369,8 +369,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_TOWARD_ZERO) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -401,8 +401,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_TOWARD_INFINITY) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -433,8 +433,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_UPWARD) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -465,8 +465,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_DOWNWARD) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -497,8 +497,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_NEAREST_TOWARD_EVEN) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -529,8 +529,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_TOWARD_INFINITY) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -566,8 +566,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_TOWARD_ZERO) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -603,8 +603,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_UP) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -635,8 +635,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_ROUND_DOWN) Shape scale_offset_shape; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::i8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::i8; typedef float input_c_type; typedef int8_t output_c_type; @@ -667,8 +667,8 @@ NGRAPH_TEST(${BACKEND_NAME}, quantize_dynamic_offset) Shape scale_offset_shape = {}; AxisSet quantization_axes; - auto input_type = element::f32; - auto output_type = element::u8; + auto input_type = element::Type_t::f32; + auto output_type = element::Type_t::u8; typedef float input_c_type; typedef uint8_t output_c_type; diff --git a/ngraph/test/backend/range.in.cpp b/ngraph/test/backend/range.in.cpp index 8aa97079652..5fa671bf6fa 100644 --- a/ngraph/test/backend/range.in.cpp +++ b/ngraph/test/backend/range.in.cpp @@ -42,9 +42,9 @@ struct RangeTest NGRAPH_TEST(${BACKEND_NAME}, range) { // Create a graph for f(start,stop,step) = Range(start,stop,step). - auto start = make_shared(element::i32, Shape{}); - auto stop = make_shared(element::i32, Shape{}); - auto step = make_shared(element::i32, Shape{}); + auto start = make_shared(element::Type_t::i32, Shape{}); + auto stop = make_shared(element::Type_t::i32, Shape{}); + auto step = make_shared(element::Type_t::i32, Shape{}); auto range = make_shared(start, stop, step); ASSERT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); @@ -55,7 +55,7 @@ NGRAPH_TEST(${BACKEND_NAME}, range) auto ex = backend->compile(f); - auto t_r = backend->create_dynamic_tensor(element::i32, PartialShape::dynamic()); + auto t_r = backend->create_dynamic_tensor(element::Type_t::i32, PartialShape::dynamic()); std::vector> int32_tests = { RangeTest{0, 10, 1, Shape{10}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}}, @@ -65,9 +65,9 @@ NGRAPH_TEST(${BACKEND_NAME}, range) for (auto& test : int32_tests) { - auto t_start = backend->create_tensor(element::i32, Shape{}); - auto t_stop = backend->create_tensor(element::i32, Shape{}); - auto t_step = backend->create_tensor(element::i32, Shape{}); + auto t_start = backend->create_tensor(element::Type_t::i32, Shape{}); + auto t_stop = backend->create_tensor(element::Type_t::i32, Shape{}); + auto t_step = backend->create_tensor(element::Type_t::i32, Shape{}); copy_data(t_start, std::vector{test.start}); copy_data(t_stop, std::vector{test.stop}); @@ -75,7 +75,7 @@ NGRAPH_TEST(${BACKEND_NAME}, range) ex->call_with_validate({t_r}, {t_start, t_stop, t_step}); - ASSERT_EQ(t_r->get_element_type(), element::i32); + ASSERT_EQ(t_r->get_element_type(), element::Type_t::i32); ASSERT_EQ(t_r->get_shape(), test.expected_result_shape); auto results = read_vector(t_r); diff --git a/ngraph/test/backend/reduce_max.in.cpp b/ngraph/test/backend/reduce_max.in.cpp index efd3bc68b24..8a4022af26f 100644 --- a/ngraph/test/backend/reduce_max.in.cpp +++ b/ngraph/test/backend/reduce_max.in.cpp @@ -31,8 +31,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, reduce_max_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -48,8 +48,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -65,9 +65,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -83,9 +83,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -101,9 +101,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_int32) { Shape shape_a{3, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -119,9 +119,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -140,18 +140,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_zero_int32) { Shape shape_a{3, 0}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); copy_data(result, vector({3, 3, 3})); int32_t minval = std::numeric_limits::has_infinity @@ -168,18 +168,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -192,18 +192,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -214,18 +214,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -236,9 +236,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -255,9 +255,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -274,9 +274,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -293,9 +293,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -312,19 +312,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_scalar_int32) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -334,19 +334,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_scalar_double) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f64, shape_a); + auto A = make_shared(element::Type_t::f64, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f64, shape_a); + auto a = backend->create_tensor(element::Type_t::f64, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f64, shape_rt); + auto result = backend->create_tensor(element::Type_t::f64, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -356,18 +356,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_to_scalar_double) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -385,17 +385,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -405,17 +405,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); + auto a = backend->create_tensor(element::Type_t::i8, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::i8, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -425,18 +425,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -446,18 +446,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -467,18 +467,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_int32) { Shape shape_a{3, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -488,18 +488,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3, 3})); auto handle = backend->compile(f); @@ -513,18 +513,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_zero_int32) { Shape shape_a{3, 0}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); copy_data(result, vector({3, 3, 3})); int32_t minval = std::numeric_limits::has_infinity @@ -541,18 +541,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -565,18 +565,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -587,18 +587,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -609,19 +609,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -633,19 +633,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3, 1}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -657,19 +657,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -681,19 +681,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -704,19 +704,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_scalar_int32) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -726,19 +726,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_scalar_double) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f64, shape_a); + auto A = make_shared(element::Type_t::f64, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f64, shape_a); + auto a = backend->create_tensor(element::Type_t::f64, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f64, shape_rt); + auto result = backend->create_tensor(element::Type_t::f64, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -748,18 +748,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_to_scalar_double) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -776,8 +776,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -785,9 +785,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -796,8 +796,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -805,9 +805,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -816,8 +816,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_matrix_rows_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -825,9 +825,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -836,8 +836,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -845,11 +845,11 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_max_keep_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); EXPECT_TRUE(test::all_close_f((vector{2, 4, 6}), read_vector(result))); -} \ No newline at end of file +} diff --git a/ngraph/test/backend/reduce_mean.in.cpp b/ngraph/test/backend/reduce_mean.in.cpp index 242f6907ea7..fc268aa88d8 100644 --- a/ngraph/test/backend/reduce_mean.in.cpp +++ b/ngraph/test/backend/reduce_mean.in.cpp @@ -33,17 +33,17 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -54,17 +54,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); + auto a = backend->create_tensor(element::Type_t::i8, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{}); + auto result = backend->create_tensor(element::Type_t::i8, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -74,18 +74,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -96,18 +96,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -118,18 +118,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_rows_int32) { Shape shape_a{3, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -141,17 +141,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_rows_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -162,17 +162,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); + auto a = backend->create_tensor(element::Type_t::i8, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::i8, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -182,18 +182,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -204,18 +204,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -226,18 +226,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_rows_int32) { Shape shape_a{3, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -248,8 +248,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_rows_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -257,9 +257,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -269,8 +269,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -278,9 +278,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -290,8 +290,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_matrix_rows_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -299,9 +299,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -311,8 +311,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -320,9 +320,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_mean_keep_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/reduce_min.in.cpp b/ngraph/test/backend/reduce_min.in.cpp index ca95bacaf67..31ed9a00de6 100644 --- a/ngraph/test/backend/reduce_min.in.cpp +++ b/ngraph/test/backend/reduce_min.in.cpp @@ -33,17 +33,17 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, reduce_min_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -54,17 +54,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); + auto a = backend->create_tensor(element::Type_t::i8, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{}); + auto result = backend->create_tensor(element::Type_t::i8, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -74,18 +74,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -96,18 +96,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -118,18 +118,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows_int32) { Shape shape_a{3, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -139,18 +139,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3, 3})); auto handle = backend->compile(f); @@ -165,18 +165,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -189,18 +189,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -211,18 +211,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -233,19 +233,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -257,19 +257,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -281,19 +281,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -304,19 +304,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -327,19 +327,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_scalar_int32) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -349,18 +349,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -378,17 +378,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -399,17 +399,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); + auto a = backend->create_tensor(element::Type_t::i8, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::i8, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -419,18 +419,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -441,18 +441,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -463,18 +463,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_rows_int32) { Shape shape_a{3, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -484,18 +484,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_rows_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3, 3})); auto handle = backend->compile(f); @@ -510,18 +510,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -534,18 +534,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -556,18 +556,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -578,19 +578,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -602,19 +602,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3, 1}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -626,19 +626,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -649,19 +649,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -672,19 +672,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_scalar_int32) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -694,18 +694,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -722,8 +722,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -731,9 +731,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -743,8 +743,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -752,9 +752,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -764,8 +764,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_matrix_rows_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -773,9 +773,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -785,8 +785,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -794,9 +794,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_min_keep_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/reduce_prod.in.cpp b/ngraph/test/backend/reduce_prod.in.cpp index 46d7427b1d4..87df2a57535 100644 --- a/ngraph/test/backend/reduce_prod.in.cpp +++ b/ngraph/test/backend/reduce_prod.in.cpp @@ -33,17 +33,17 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, reduce_product_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -53,18 +53,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -74,18 +74,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -95,18 +95,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3, 3})); auto handle = backend->compile(f); @@ -118,18 +118,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -140,18 +140,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -162,18 +162,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -184,19 +184,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -215,19 +215,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -246,19 +246,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -272,19 +272,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -298,18 +298,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -323,18 +323,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_2d_to_scalar_int32) { Shape shape_a{3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -344,17 +344,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_2d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_to_scalar_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i32, Shape{}); + auto result = backend->create_tensor(element::Type_t::i32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -364,17 +364,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); + auto a = backend->create_tensor(element::Type_t::i8, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{}); + auto result = backend->create_tensor(element::Type_t::i8, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -386,17 +386,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -406,18 +406,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -427,18 +427,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -448,18 +448,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3, 3})); auto handle = backend->compile(f); @@ -471,18 +471,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -493,18 +493,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -515,18 +515,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -537,19 +537,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -568,19 +568,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3, 1}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -599,19 +599,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -625,19 +625,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -651,18 +651,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -676,18 +676,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_2d_to_scalar_int32) { Shape shape_a{3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{1, 1}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -697,17 +697,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_2d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_to_scalar_int32) { Shape shape{2, 2}; - auto A = make_shared(element::i32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i32, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::i32, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -717,17 +717,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_to_scalar_int8) { Shape shape{2, 2}; - auto A = make_shared(element::i8, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::i8, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape); + auto a = backend->create_tensor(element::Type_t::i8, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::i8, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::i8, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -738,8 +738,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_to_scalar_int8) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -747,9 +747,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -758,8 +758,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -767,9 +767,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -778,8 +778,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_matrix_rows_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -787,9 +787,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -798,8 +798,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -807,9 +807,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_product_keep_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/reduce_sum.in.cpp b/ngraph/test/backend/reduce_sum.in.cpp index 9d49ae671d1..0f93b0c1efd 100644 --- a/ngraph/test/backend/reduce_sum.in.cpp +++ b/ngraph/test/backend/reduce_sum.in.cpp @@ -43,17 +43,17 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -63,8 +63,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_large_1d_to_scalar) { Shape shape{1000000}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -79,9 +79,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_large_1d_to_scalar) v_a[i] = static_cast(random_generator() % 255); r += static_cast(v_a[i]); } - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, v_a); - auto result = backend->create_tensor(element::f32, Shape{}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -93,18 +93,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_large_1d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -114,9 +114,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_6d) { Shape shape_a{2, 6, 4, 5, 7, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2, 4, 5, 3}; - auto axes = make_shared(element::i32, Shape{2}, vector{1, 4}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{1, 4}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -124,10 +124,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_6d) auto backend_ref = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a_wrk = backend_wrk->create_tensor(element::f32, shape_a); - auto a_ref = backend_ref->create_tensor(element::f32, shape_a); - auto result_wrk = backend_wrk->create_tensor(element::f32, shape_rt); - auto result_ref = backend_ref->create_tensor(element::f32, shape_rt); + auto a_wrk = backend_wrk->create_tensor(element::Type_t::f32, shape_a); + auto a_ref = backend_ref->create_tensor(element::Type_t::f32, shape_a); + auto result_wrk = backend_wrk->create_tensor(element::Type_t::f32, shape_rt); + auto result_ref = backend_ref->create_tensor(element::Type_t::f32, shape_rt); vector inp_data(shape_size(shape_a)); iota(inp_data.begin(), inp_data.end(), 1.f); @@ -145,18 +145,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_6d) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -166,18 +166,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3, 3})); auto handle = backend->compile(f); @@ -189,18 +189,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -211,18 +211,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -233,18 +233,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -255,19 +255,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -286,19 +286,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -317,19 +317,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -342,19 +342,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -367,19 +367,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_scalar_int32) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{0x40000001, 10, 19, 4, 13, 22, 7, 16, 25, 2, 11, 20, 5, 14, 23, 8, 17, 26, 3, 12, 21, 6, 15, 24, 9, 18, 27}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -391,18 +391,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -416,18 +416,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_eliminate_zero_dim_int32) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -441,18 +441,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_3d_eliminate_zero_dim_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_5d_to_scalar) { Shape shape_a{3, 3, 3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{5}, vector{0, 1, 2, 3, 4}); + auto axes = + make_shared(element::Type_t::i32, Shape{5}, vector{0, 1, 2, 3, 4}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, std::vector(std::pow(3, 5), 1)); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -462,18 +463,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_5d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_5d_to_scalar_int32) { Shape shape_a{3, 3, 3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{5}, vector{0, 1, 2, 3, 4}); + auto axes = + make_shared(element::Type_t::i32, Shape{5}, vector{0, 1, 2, 3, 4}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, std::vector(std::pow(3, 5), 1)); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -483,18 +485,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_5d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_2d_to_scalar_int8) { Shape shape_a{3, 3}; - auto A = make_shared(element::i8, shape_a); + auto A = make_shared(element::Type_t::i8, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape_a); + auto a = backend->create_tensor(element::Type_t::i8, shape_a); copy_data(a, std::vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - auto result = backend->create_tensor(element::i8, shape_rt); + auto result = backend->create_tensor(element::Type_t::i8, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -505,17 +507,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_trivial_in_double) { Shape shape{4, 3}; Shape rshape{3}; - auto A = make_shared(element::f64, shape); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f64, shape); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f64, shape); + auto a = backend->create_tensor(element::Type_t::f64, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result = backend->create_tensor(element::f64, rshape); + auto result = backend->create_tensor(element::Type_t::f64, rshape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -533,10 +535,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_acc) return; } Shape shape_a{10, 10, 10, 30}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{10}; - auto axes = make_shared(element::i32, Shape{3}, vector{1, 2, 3}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{1, 2, 3}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -568,10 +570,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_acc_double) return; } Shape shape_a{10, 10, 20, 300}; - auto A = make_shared(element::f64, shape_a); + auto A = make_shared(element::Type_t::f64, shape_a); Shape shape_rt{10}; - auto axes = make_shared(element::i32, Shape{3}, vector{1, 2, 3}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{1, 2, 3}); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -601,10 +603,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_simple_float) return; } Shape shape_a{20}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -632,10 +634,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_simple_double) return; } Shape shape_a{20}; - auto A = make_shared(element::f64, shape_a); + auto A = make_shared(element::Type_t::f64, shape_a); Shape shape_rt{}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -676,9 +678,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_stable_simple_double) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_dynamic) { // Create a graph for f(x,axes:int32) = Sum(x,Convert(axes)). - auto x = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto axes_i64 = make_shared(axes, element::i64); + auto x = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic()}); + auto axes_i64 = make_shared(axes, element::Type_t::i64); auto sum = make_shared(x, axes_i64, false); ASSERT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic()); @@ -689,7 +692,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_dynamic) auto ex = backend->compile(f); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); std::vector x_shapes{ Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}}; @@ -707,8 +710,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_dynamic) for (size_t i = 0; i < x_shapes.size(); i++) { - auto t_x = backend->create_tensor(element::f32, x_shapes[i]); - auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()}); + auto t_x = backend->create_tensor(element::Type_t::f32, x_shapes[i]); + auto t_axes = backend->create_tensor(element::Type_t::i32, Shape{axeses[i].size()}); copy_data(t_x, inputs[i]); copy_data(t_axes, axeses[i]); @@ -726,8 +729,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_inf) { Shape shape{7, 4}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -736,7 +739,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_inf) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{-infi, 0, 0, infi}, {infi, 100, -100, -infi}, @@ -746,7 +749,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_inf) {infi, infi, infi, -infi}, {infi, std::nanf(""), 42, infi}}) .get_vector()); - auto result = backend->create_tensor(element::f32, Shape{7}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{7}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -766,17 +769,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_inf) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_to_scalar) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4}); - auto result = backend->create_tensor(element::f32, Shape{1, 1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -786,8 +789,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_large_1d_to_scalar) { Shape shape{1000000}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -802,9 +805,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_large_1d_to_scalar) v_a[i] = static_cast(random_generator() % 255); r += static_cast(v_a[i]); } - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, v_a); - auto result = backend->create_tensor(element::f32, Shape{1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -816,18 +819,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_large_1d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_columns) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -837,9 +840,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_columns) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_6d) { Shape shape_a{2, 6, 4, 5, 7, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2, 1, 4, 5, 1, 3}; - auto axes = make_shared(element::i32, Shape{2}, vector{1, 4}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{1, 4}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -847,10 +850,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_6d) auto backend_ref = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a_wrk = backend_wrk->create_tensor(element::f32, shape_a); - auto a_ref = backend_ref->create_tensor(element::f32, shape_a); - auto result_wrk = backend_wrk->create_tensor(element::f32, shape_rt); - auto result_ref = backend_ref->create_tensor(element::f32, shape_rt); + auto a_wrk = backend_wrk->create_tensor(element::Type_t::f32, shape_a); + auto a_ref = backend_ref->create_tensor(element::Type_t::f32, shape_a); + auto result_wrk = backend_wrk->create_tensor(element::Type_t::f32, shape_rt); + auto result_ref = backend_ref->create_tensor(element::Type_t::f32, shape_rt); vector inp_data(shape_size(shape_a)); iota(inp_data.begin(), inp_data.end(), 1.f); @@ -868,18 +871,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_6d) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_rows) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -889,18 +892,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_rows) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_rows_zero) { Shape shape_a{3, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3, 3})); auto handle = backend->compile(f); @@ -912,18 +915,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_cols_zero) { // Now the reduction (g(x:float32[2,2],y:float32[]) = reduce(x,y,f,axes={})). Shape shape_a{0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 2}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3, 3})); auto handle = backend->compile(f); @@ -934,18 +937,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_cols_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_vector_zero) { Shape shape_a{0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -956,18 +959,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_vector_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_to_scalar_zero_by_zero) { Shape shape_a{0, 0}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); copy_data(result, vector({3})); auto handle = backend->compile(f); @@ -978,19 +981,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_to_scalar_zero_by_zero) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_matrix_most_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 3, 3}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1009,19 +1012,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_matrix_most_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_matrix_least_sig) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 3, 1}; - auto axes = make_shared(element::i32, Shape{}, 2); + auto axes = make_shared(element::Type_t::i32, Shape{}, 2); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1040,19 +1043,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_matrix_least_sig) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_vector) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 3}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1065,19 +1068,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_vector) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_scalar) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1090,19 +1093,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_scalar_int32) { Shape shape_a{3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{0, 1, 2}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{0, 1, 2}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{0x40000001, 10, 19, 4, 13, 22, 7, 16, 25, 2, 11, 20, 5, 14, 23, 8, 17, 26, 3, 12, 21, 6, 15, 24, 9, 18, 27}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1114,18 +1117,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_eliminate_zero_dim) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{3, 1, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -1139,18 +1142,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_eliminate_zero_dim) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_eliminate_zero_dim_int32) { Shape shape_a{3, 0, 2}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{3, 1, 2}; - auto axes = make_shared(element::i32, Shape{}, 1); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); // Overwrite the initial result vector to make sure we're not just coincidentally getting the // right value. @@ -1164,18 +1167,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_3d_eliminate_zero_dim_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_5d_to_scalar) { Shape shape_a{3, 3, 3, 3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1, 1, 1, 1, 1}; - auto axes = make_shared(element::i32, Shape{5}, vector{0, 1, 2, 3, 4}); + auto axes = + make_shared(element::Type_t::i32, Shape{5}, vector{0, 1, 2, 3, 4}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, std::vector(std::pow(3, 5), 1)); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1185,18 +1189,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_5d_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_5d_to_scalar_int32) { Shape shape_a{3, 3, 3, 3, 3}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); Shape shape_rt{1, 1, 1, 1, 1}; - auto axes = make_shared(element::i32, Shape{5}, vector{0, 1, 2, 3, 4}); + auto axes = + make_shared(element::Type_t::i32, Shape{5}, vector{0, 1, 2, 3, 4}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, std::vector(std::pow(3, 5), 1)); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1206,18 +1211,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_5d_to_scalar_int32) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_2d_to_scalar_int8) { Shape shape_a{3, 3}; - auto A = make_shared(element::i8, shape_a); + auto A = make_shared(element::Type_t::i8, shape_a); Shape shape_rt{1, 1}; - auto axes = make_shared(element::i32, Shape{2}, vector{0, 1}); + auto axes = make_shared(element::Type_t::i32, Shape{2}, vector{0, 1}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i8, shape_a); + auto a = backend->create_tensor(element::Type_t::i8, shape_a); copy_data(a, std::vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - auto result = backend->create_tensor(element::i8, shape_rt); + auto result = backend->create_tensor(element::Type_t::i8, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1228,17 +1233,17 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_trivial_in_double) { Shape shape{4, 3}; Shape rshape{1, 3}; - auto A = make_shared(element::f64, shape); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f64, shape); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f64, shape); + auto a = backend->create_tensor(element::Type_t::f64, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result = backend->create_tensor(element::f64, rshape); + auto result = backend->create_tensor(element::Type_t::f64, rshape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1256,10 +1261,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_acc) return; } Shape shape_a{10, 10, 10, 30}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{10, 1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{1, 2, 3}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{1, 2, 3}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -1291,10 +1296,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_acc_double) return; } Shape shape_a{10, 10, 20, 300}; - auto A = make_shared(element::f64, shape_a); + auto A = make_shared(element::Type_t::f64, shape_a); Shape shape_rt{10, 1, 1, 1}; - auto axes = make_shared(element::i32, Shape{3}, vector{1, 2, 3}); + auto axes = make_shared(element::Type_t::i32, Shape{3}, vector{1, 2, 3}); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -1324,10 +1329,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_simple_float) return; } Shape shape_a{20}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{1}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -1355,10 +1360,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_simple_double) return; } Shape shape_a{20}; - auto A = make_shared(element::f64, shape_a); + auto A = make_shared(element::Type_t::f64, shape_a); Shape shape_rt{1}; - auto axes = make_shared(element::i32, Shape{}, 0); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -1399,9 +1404,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_stable_simple_double) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_dynamic) { // Create a graph for f(x,axes:int32) = Sum(x,Convert(axes)). - auto x = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto axes_i64 = make_shared(axes, element::i64); + auto x = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic()}); + auto axes_i64 = make_shared(axes, element::Type_t::i64); auto sum = make_shared(x, axes_i64, true); ASSERT_TRUE(sum->get_output_partial_shape(0).rank().is_dynamic()); @@ -1412,7 +1418,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_dynamic) auto ex = backend->compile(f); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); std::vector x_shapes{ Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{2, 3}, Shape{5}, Shape{5}}; @@ -1430,8 +1436,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_dynamic) for (size_t i = 0; i < x_shapes.size(); i++) { - auto t_x = backend->create_tensor(element::f32, x_shapes[i]); - auto t_axes = backend->create_tensor(element::i32, Shape{axeses[i].size()}); + auto t_x = backend->create_tensor(element::Type_t::f32, x_shapes[i]); + auto t_axes = backend->create_tensor(element::Type_t::i32, Shape{axeses[i].size()}); copy_data(t_x, inputs[i]); copy_data(t_axes, axeses[i]); @@ -1449,8 +1455,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_inf) { Shape shape{7, 4}; - auto A = make_shared(element::f32, shape); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, shape); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -1459,7 +1465,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_inf) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{-infi, 0, 0, infi}, {infi, 100, -100, -infi}, @@ -1469,7 +1475,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_inf) {infi, infi, infi, -infi}, {infi, std::nanf(""), 42, infi}}) .get_vector()); - auto result = backend->create_tensor(element::f32, Shape{7, 1}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{7, 1}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1488,8 +1494,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_inf) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -1497,9 +1503,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1508,8 +1514,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, false), ParameterVector{A}); @@ -1517,9 +1523,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1528,8 +1534,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_matrix_rows_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_columns_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 0); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 0); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -1537,9 +1543,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_columns_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -1548,8 +1554,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_columns_dynamic) NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_rows_dynamic) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto axes = make_shared(element::i32, Shape{}, 1); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto axes = make_shared(element::Type_t::i32, Shape{}, 1); auto f = make_shared(make_shared(A, axes, true), ParameterVector{A}); @@ -1557,9 +1563,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reduce_sum_keep_matrix_rows_dynamic) // Create some tensors for input/output Shape shape_a{3, 2}; - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto result = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/region_yolo.in.cpp b/ngraph/test/backend/region_yolo.in.cpp index 8d520c4929a..74fb9242391 100644 --- a/ngraph/test/backend/region_yolo.in.cpp +++ b/ngraph/test/backend/region_yolo.in.cpp @@ -45,7 +45,7 @@ NGRAPH_TEST(${BACKEND_NAME}, region_yolo_v2_caffe) Shape input_shape{batch, channels, height, width}; Shape output_shape{batch, channels * height * width}; - auto A = make_shared(element::f32, input_shape); + auto A = make_shared(element::Type_t::f32, input_shape); auto R = make_shared(A, coords, classes, num, true, mask, 1, 3); auto f = make_shared(R, ParameterVector{A}); @@ -71,7 +71,7 @@ NGRAPH_TEST(${BACKEND_NAME}, region_yolo_v3_mxnet) Shape shape{batch, channels, height, width}; const auto count = shape_size(shape); - const auto A = make_shared(element::f32, shape); + const auto A = make_shared(element::Type_t::f32, shape); const auto R = make_shared(A, coords, classes, num, false, mask, 1, 3); const auto f = make_shared(R, ParameterVector{A}); diff --git a/ngraph/test/backend/relu.in.cpp b/ngraph/test/backend/relu.in.cpp index e36f45240f5..00aa5d4e51d 100644 --- a/ngraph/test/backend/relu.in.cpp +++ b/ngraph/test/backend/relu.in.cpp @@ -35,16 +35,16 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, relu_2Dfprop) { auto shape_a = Shape{2, 5}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); auto relu = make_shared(A); auto shape_rt = Shape{2, 5}; auto f = make_shared(relu, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); vector expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0}; auto handle = backend->compile(f); @@ -55,16 +55,16 @@ NGRAPH_TEST(${BACKEND_NAME}, relu_2Dfprop) NGRAPH_TEST(${BACKEND_NAME}, relu_2Dfprop_i32) { auto shape_a = Shape{2, 5}; - auto A = make_shared(element::i32, shape_a); + auto A = make_shared(element::Type_t::i32, shape_a); auto relu = make_shared(A); auto shape_rt = Shape{2, 5}; auto f = make_shared(relu, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::i32, shape_a); + auto a = backend->create_tensor(element::Type_t::i32, shape_a); copy_data(a, vector{1, 8, -8, 17, -2, 1, 8, -8, 17, -1}); - auto result = backend->create_tensor(element::i32, shape_rt); + auto result = backend->create_tensor(element::Type_t::i32, shape_rt); vector expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0}; auto handle = backend->compile(f); @@ -75,16 +75,16 @@ NGRAPH_TEST(${BACKEND_NAME}, relu_2Dfprop_i32) NGRAPH_TEST(${BACKEND_NAME}, relu_4Dfprop) { auto shape_a = Shape{2, 2, 2, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); auto relu = make_shared(A); auto shape_rt = Shape{2, 2, 2, 2}; auto f = make_shared(relu, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5, 1}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); vector expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0, 1, 8, 0, 17, 0, 1}; auto handle = backend->compile(f); @@ -95,17 +95,17 @@ NGRAPH_TEST(${BACKEND_NAME}, relu_4Dfprop) NGRAPH_TEST(${BACKEND_NAME}, fuse_max_with_constant_zero_input_as_relu) { auto shape_a = Shape{2, 5}; - auto A = op::Constant::create(element::f32, shape_a, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); - auto B = make_shared(element::f32, shape_a); + auto A = op::Constant::create(element::Type_t::f32, shape_a, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}); + auto B = make_shared(element::Type_t::f32, shape_a); auto max = make_shared(A, B); auto shape_rt = Shape{2, 5}; auto f = make_shared(max, ParameterVector{B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto b = backend->create_tensor(element::f32, shape_a); + auto b = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(b, vector{1, 8, -8, 17, -0.5, 1, 8, -8, 17, -0.5}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); vector expected{1, 8, 0, 17, 0, 1, 8, 0, 17, 0}; auto handle = backend->compile(f); diff --git a/ngraph/test/backend/reorg_yolo.in.cpp b/ngraph/test/backend/reorg_yolo.in.cpp index 0389a2c4b25..229407e8a85 100644 --- a/ngraph/test/backend/reorg_yolo.in.cpp +++ b/ngraph/test/backend/reorg_yolo.in.cpp @@ -48,7 +48,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reorg_yolo_stride_2) { // in_shape [N,C,H,W] const auto in_shape = Shape{1, 8, 4, 4}; - auto p = make_shared(element::f32, in_shape); + auto p = make_shared(element::Type_t::f32, in_shape); size_t stride = 2; auto reorg_yolo = make_shared(p, Strides{stride}); auto fun = make_shared(OutputVector{reorg_yolo}, ParameterVector{p}); @@ -78,7 +78,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reorg_yolo_stride_3) { // in_shape [N,C,H,W] const auto in_shape = Shape{1, 9, 3, 3}; - auto p = make_shared(element::f32, in_shape); + auto p = make_shared(element::Type_t::f32, in_shape); size_t stride = 3; auto reorg_yolo = make_shared(p, Strides{stride}); auto fun = make_shared(OutputVector{reorg_yolo}, ParameterVector{p}); diff --git a/ngraph/test/backend/reshape.in.cpp b/ngraph/test/backend/reshape.in.cpp index 130629430b5..5034e8e20ec 100644 --- a/ngraph/test/backend/reshape.in.cpp +++ b/ngraph/test/backend/reshape.in.cpp @@ -47,18 +47,18 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, reshape_t2v_012) { Shape shape_a{2, 2, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{12}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -70,18 +70,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_t2v_012) NGRAPH_TEST(${BACKEND_NAME}, reshape_t2s_012) { Shape shape_a{1, 1, 1}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{6}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -92,18 +92,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_t2s_012) NGRAPH_TEST(${BACKEND_NAME}, reshape_t2s_120) { Shape shape_a{1, 1, 1}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{6}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -114,18 +114,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_t2s_120) NGRAPH_TEST(${BACKEND_NAME}, reshape_s2t) { Shape shape_a{}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{1, 1, 1, 1, 1, 1}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{42}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -136,18 +136,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_s2t) NGRAPH_TEST(${BACKEND_NAME}, reshape_s2t1) { Shape shape_a{}; - auto A = make_shared(element::boolean, shape_a); + auto A = make_shared(element::Type_t::boolean, shape_a); Shape shape_r{1}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape_a); + auto a = backend->create_tensor(element::Type_t::boolean, shape_a); copy_data(a, vector{42}); - auto result = backend->create_tensor(element::boolean, shape_r); + auto result = backend->create_tensor(element::Type_t::boolean, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -157,18 +157,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_s2t1) NGRAPH_TEST(${BACKEND_NAME}, reshape_v2m_col) { Shape shape_a{3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{3, 1}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -179,18 +179,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_v2m_col) NGRAPH_TEST(${BACKEND_NAME}, reshape_v2m_row) { Shape shape_a{3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{1, 3}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -201,18 +201,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_v2m_row) NGRAPH_TEST(${BACKEND_NAME}, reshape_v2t_middle) { Shape shape_a{3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{1, 3, 1}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -223,18 +223,18 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_v2t_middle) NGRAPH_TEST(${BACKEND_NAME}, reshape_m2m_same) { Shape shape_a{3, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{3, 3}; auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -247,9 +247,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_special_zero) { Shape shape_a{2, 2, 5, 5}; Shape shape_r{2, 5, 5, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); auto r = make_shared( - A, op::Constant::create(element::u64, {4}, Shape{0, 5, 0, 2}), true); + A, op::Constant::create(element::Type_t::u64, {4}, Shape{0, 5, 0, 2}), true); auto f = make_shared(r, ParameterVector{A}); vector a_data(shape_size(shape_a)); @@ -258,9 +258,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_special_zero) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, a_data); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -311,23 +311,23 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_special_zero) NGRAPH_TEST(${BACKEND_NAME}, reshape_6d) { Shape shape_a{2, 2, 3, 3, 2, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{3, 2, 2, 4, 3, 2}; vector a_data(shape_size(shape_a)); iota(a_data.begin(), a_data.end(), 1.f); auto r = make_shared( - A, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + A, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, a_data); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -338,7 +338,7 @@ NGRAPH_TEST(${BACKEND_NAME}, reshape_6d) NGRAPH_TEST(${BACKEND_NAME}, builder_reshape_1D_to_scalar) { const Shape input_shape{1}; - const auto input = make_shared(element::f32, input_shape); + const auto input = make_shared(element::Type_t::f32, input_shape); const auto reshape_builder = builder::opset1::reshape(input, Shape{}); auto function = make_shared(reshape_builder, ParameterVector{input}); @@ -353,7 +353,7 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_reshape_1D_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, builder_reshape_3D_to_scalar) { const Shape input_shape{1, 1, 1}; - const auto input = make_shared(element::f32, input_shape); + const auto input = make_shared(element::Type_t::f32, input_shape); const auto reshape_builder = builder::opset1::reshape(input, Shape{}); auto function = make_shared(reshape_builder, ParameterVector{input}); @@ -370,22 +370,22 @@ NGRAPH_TEST(${BACKEND_NAME}, builder_reshape_3D_to_scalar) NGRAPH_TEST(${BACKEND_NAME}, reshape_shufflenet_5d) { Shape shape_a{1, 112, 56, 56}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_b{1, 4, 28, 56, 56}; - auto B = make_shared(element::f32, shape_b); + auto B = make_shared(element::Type_t::f32, shape_b); Shape shape_c{1, 28, 4, 56, 56}; - auto C = make_shared(element::f32, shape_c); + auto C = make_shared(element::Type_t::f32, shape_c); Shape shape_r{1, 112, 56, 56}; vector a_data(shape_size(shape_a)); iota(a_data.begin(), a_data.end(), 1.f); auto r0 = make_shared( - A, op::Constant::create(element::u64, {shape_b.size()}, shape_b), false); + A, op::Constant::create(element::Type_t::u64, {shape_b.size()}, shape_b), false); auto r1 = make_shared( - r0, op::Constant::create(element::u64, {shape_c.size()}, shape_c), false); + r0, op::Constant::create(element::Type_t::u64, {shape_c.size()}, shape_c), false); auto r2 = make_shared( - r1, op::Constant::create(element::u64, {shape_r.size()}, shape_r), false); + r1, op::Constant::create(element::Type_t::u64, {shape_r.size()}, shape_r), false); auto f = make_shared(r2, ParameterVector{A}); auto ref_func = clone_function(*f); diff --git a/ngraph/test/backend/reverse.in.cpp b/ngraph/test/backend/reverse.in.cpp index 90caa46a9b9..e8e21cfe341 100644 --- a/ngraph/test/backend/reverse.in.cpp +++ b/ngraph/test/backend/reverse.in.cpp @@ -32,18 +32,18 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, reverse_1d) { Shape shape{8}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared( - A, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX), + A, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -55,19 +55,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_1d) NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_0) { Shape shape{4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared( - A, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX), + A, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}).get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -80,19 +80,19 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_0) NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_1) { Shape shape{4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared( - A, op::Constant::create(element::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX), + A, op::Constant::create(element::Type_t::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}).get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -105,20 +105,21 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_1) NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_1_mask) { Shape shape{4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( - make_shared(A, - op::Constant::create(element::boolean, {2}, {false, true}), - op::v1::Reverse::Mode::MASK), + make_shared( + A, + op::Constant::create(element::Type_t::boolean, {2}, {false, true}), + op::v1::Reverse::Mode::MASK), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}).get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -131,19 +132,20 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_1_mask) NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_01) { Shape shape{4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( - make_shared( - A, op::Constant::create(element::i64, {2}, {0, 1}), op::v1::Reverse::Mode::INDEX), + make_shared(A, + op::Constant::create(element::Type_t::i64, {2}, {0, 1}), + op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}).get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -156,20 +158,21 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_01) NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_01_mask) { Shape shape{4, 3}; - auto A = make_shared(element::f32, shape); - auto f = make_shared( - make_shared(A, - op::Constant::create(element::boolean, {2}, {true, true}), - op::v1::Reverse::Mode::MASK), - ParameterVector{A}); + auto A = make_shared(element::Type_t::f32, shape); + auto f = + make_shared(make_shared( + A, + op::Constant::create(element::Type_t::boolean, {2}, {true, true}), + op::v1::Reverse::Mode::MASK), + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}).get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -182,21 +185,21 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_2d_01_mask) NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_0) { Shape shape{2, 4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared( - A, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX), + A, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}, {{12, 13, 14}, {15, 16, 17}, {18, 19, 20}, {21, 22, 23}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -211,21 +214,21 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_0) NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_1) { Shape shape{2, 4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared( - A, op::Constant::create(element::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX), + A, op::Constant::create(element::Type_t::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}, {{12, 13, 14}, {15, 16, 17}, {18, 19, 20}, {21, 22, 23}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -240,21 +243,21 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_1) NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_2) { Shape shape{2, 4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared( - A, op::Constant::create(element::i64, {1}, {2}), op::v1::Reverse::Mode::INDEX), + A, op::Constant::create(element::Type_t::i64, {1}, {2}), op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}, {{12, 13, 14}, {15, 16, 17}, {18, 19, 20}, {21, 22, 23}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -269,21 +272,22 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_2) NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_01) { Shape shape{2, 4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( - make_shared( - A, op::Constant::create(element::i64, {2}, {0, 1}), op::v1::Reverse::Mode::INDEX), + make_shared(A, + op::Constant::create(element::Type_t::i64, {2}, {0, 1}), + op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}, {{12, 13, 14}, {15, 16, 17}, {18, 19, 20}, {21, 22, 23}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -298,21 +302,22 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_01) NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_02) { Shape shape{2, 4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( - make_shared( - A, op::Constant::create(element::i64, {2}, {0, 2}), op::v1::Reverse::Mode::INDEX), + make_shared(A, + op::Constant::create(element::Type_t::i64, {2}, {0, 2}), + op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}, {{12, 13, 14}, {15, 16, 17}, {18, 19, 20}, {21, 22, 23}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -327,21 +332,22 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_02) NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_12) { Shape shape{2, 4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( - make_shared( - A, op::Constant::create(element::i64, {2}, {1, 2}), op::v1::Reverse::Mode::INDEX), + make_shared(A, + op::Constant::create(element::Type_t::i64, {2}, {1, 2}), + op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}, {{12, 13, 14}, {15, 16, 17}, {18, 19, 20}, {21, 22, 23}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -356,21 +362,22 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_12) NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_012) { Shape shape{2, 4, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( - make_shared( - A, op::Constant::create(element::i64, {3}, {0, 1, 2}), op::v1::Reverse::Mode::INDEX), + make_shared(A, + op::Constant::create(element::Type_t::i64, {3}, {0, 1, 2}), + op::v1::Reverse::Mode::INDEX), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, test::NDArray({{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}, {9, 10, 11}}, {{12, 13, 14}, {15, 16, 17}, {18, 19, 20}, {21, 22, 23}}}) .get_vector()); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -384,8 +391,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_3d_012) NGRAPH_TEST(${BACKEND_NAME}, reverse_v1_incorrect_rev_axes_rank_index_mode) { - const auto Data = make_shared(element::f32, Shape{2, 2, 2}); - const auto Rev_Axes = make_shared(element::i64, Shape{1, 1}); // correct: 1D + const auto Data = make_shared(element::Type_t::f32, Shape{2, 2, 2}); + const auto Rev_Axes = + make_shared(element::Type_t::i64, Shape{1, 1}); // correct: 1D EXPECT_THROW(make_shared( make_shared(Data, Rev_Axes, op::v1::Reverse::Mode::INDEX), @@ -395,8 +403,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_v1_incorrect_rev_axes_rank_index_mode) NGRAPH_TEST(${BACKEND_NAME}, reverse_v1_incorrect_rev_axes_elems_mask_mode) { - const auto Data = make_shared(element::f32, Shape{2, 2, 2}); - const auto Rev_Axes = make_shared(element::boolean, Shape{2}); // correct: 3 + const auto Data = make_shared(element::Type_t::f32, Shape{2, 2, 2}); + const auto Rev_Axes = + make_shared(element::Type_t::boolean, Shape{2}); // correct: 3 EXPECT_THROW(make_shared(Data, Rev_Axes, op::v1::Reverse::Mode::MASK), ngraph::NodeValidationFailure); @@ -404,8 +413,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_v1_incorrect_rev_axes_elems_mask_mode) NGRAPH_TEST(${BACKEND_NAME}, reverse_v1_axes_out_of_bounds) { - const auto Data = make_shared(element::f32, Shape{2, 2, 2}); - const auto Rev_Axes = op::Constant::create(element::i64, Shape{2}, {1, 10}); + const auto Data = make_shared(element::Type_t::f32, Shape{2, 2, 2}); + const auto Rev_Axes = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 10}); EXPECT_THROW(make_shared(Data, Rev_Axes, op::v1::Reverse::Mode::INDEX), ngraph::NodeValidationFailure); @@ -413,8 +422,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_v1_axes_out_of_bounds) NGRAPH_TEST(${BACKEND_NAME}, reverse_v1_too_many_axes) { - const auto Data = make_shared(element::f32, Shape{2, 2, 2}); - const auto Rev_Axes = op::Constant::create(element::i64, Shape{4}, {0, 1, 2, 3}); + const auto Data = make_shared(element::Type_t::f32, Shape{2, 2, 2}); + const auto Rev_Axes = op::Constant::create(element::Type_t::i64, Shape{4}, {0, 1, 2, 3}); EXPECT_THROW(make_shared(Data, Rev_Axes, op::v1::Reverse::Mode::INDEX), ngraph::NodeValidationFailure); diff --git a/ngraph/test/backend/reverse_sequence.in.cpp b/ngraph/test/backend/reverse_sequence.in.cpp index 1fcca9cf820..aa76919bf4e 100644 --- a/ngraph/test/backend/reverse_sequence.in.cpp +++ b/ngraph/test/backend/reverse_sequence.in.cpp @@ -34,8 +34,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n2c3h4w2) { Shape shape{2, 3, 4, 2}; Shape seq_len_shape{4}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, seq_len_shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, seq_len_shape); size_t batch_axis = 2; size_t sequence_axis = 1; @@ -46,10 +46,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n2c3h4w2) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::i32, shape); - shared_ptr b = backend->create_tensor(element::i32, seq_len_shape); + shared_ptr a = backend->create_tensor(element::Type_t::i32, shape); + shared_ptr b = backend->create_tensor(element::Type_t::i32, seq_len_shape); - shared_ptr result = backend->create_tensor(element::i32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::i32, shape); std::vector input{ 0, 0, 3, 0, 6, 0, 9, 0, 1, 0, 4, 0, 7, 0, 10, 0, 2, 0, 5, 0, 8, 0, 11, 0, @@ -74,9 +74,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n2c3h4w2) NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n4c3h2w2) { Shape shape{4, 3, 2, 2}; - auto A = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); Shape seq_len_shape{4}; - auto B = make_shared(element::i32, seq_len_shape); + auto B = make_shared(element::Type_t::i32, seq_len_shape); size_t batch_axis = 0; size_t sequence_axis = 1; @@ -88,10 +88,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n4c3h2w2) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::i32, shape); - shared_ptr b = backend->create_tensor(element::i32, seq_len_shape); + shared_ptr a = backend->create_tensor(element::Type_t::i32, shape); + shared_ptr b = backend->create_tensor(element::Type_t::i32, seq_len_shape); - shared_ptr result = backend->create_tensor(element::i32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::i32, shape); std::vector seq_lenghts{1, 2, 3, 3}; copy_data(b, seq_lenghts); @@ -114,9 +114,9 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n4c3h2w2) NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n4d2c3h2w2) { Shape shape{4, 2, 3, 2, 2}; - auto A = make_shared(element::i32, shape); + auto A = make_shared(element::Type_t::i32, shape); Shape seq_len_shape{4}; - auto B = make_shared(element::i32, seq_len_shape); + auto B = make_shared(element::Type_t::i32, seq_len_shape); size_t batch_axis = 0; size_t sequence_axis = 2; @@ -128,10 +128,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_n4d2c3h2w2) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::i32, shape); - shared_ptr b = backend->create_tensor(element::i32, seq_len_shape); + shared_ptr a = backend->create_tensor(element::Type_t::i32, shape); + shared_ptr b = backend->create_tensor(element::Type_t::i32, seq_len_shape); - shared_ptr result = backend->create_tensor(element::i32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::i32, shape); std::vector input{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -161,8 +161,8 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_negative_axes) { Shape shape{2, 3, 4, 2}; Shape seq_len_shape{4}; - auto A = make_shared(element::i32, shape); - auto B = make_shared(element::i32, seq_len_shape); + auto A = make_shared(element::Type_t::i32, shape); + auto B = make_shared(element::Type_t::i32, seq_len_shape); int64_t batch_axis = -2; int64_t sequence_axis = -3; @@ -173,10 +173,10 @@ NGRAPH_TEST(${BACKEND_NAME}, reverse_sequence_negative_axes) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - shared_ptr a = backend->create_tensor(element::i32, shape); - shared_ptr b = backend->create_tensor(element::i32, seq_len_shape); + shared_ptr a = backend->create_tensor(element::Type_t::i32, shape); + shared_ptr b = backend->create_tensor(element::Type_t::i32, seq_len_shape); - shared_ptr result = backend->create_tensor(element::i32, shape); + shared_ptr result = backend->create_tensor(element::Type_t::i32, shape); std::vector input{ 0, 0, 3, 0, 6, 0, 9, 0, 1, 0, 4, 0, 7, 0, 10, 0, 2, 0, 5, 0, 8, 0, 11, 0, diff --git a/ngraph/test/backend/roi_pooling.in.cpp b/ngraph/test/backend/roi_pooling.in.cpp index 37004ba1d41..cd1814b2024 100644 --- a/ngraph/test/backend/roi_pooling.in.cpp +++ b/ngraph/test/backend/roi_pooling.in.cpp @@ -45,8 +45,8 @@ NGRAPH_TEST(${BACKEND_NAME}, roi_pooling_1x1_max) Shape pooled_shape{pooled_h, pooled_w}; Shape output_shape{num_rois, channels, pooled_h, pooled_w}; - const auto feat_maps = make_shared(element::f32, feat_maps_shape); - const auto rois = make_shared(element::f32, rois_shape); + const auto feat_maps = make_shared(element::Type_t::f32, feat_maps_shape); + const auto rois = make_shared(element::Type_t::f32, rois_shape); const auto roi_pooling = make_shared(feat_maps, rois, pooled_shape, spatial_scale, "max"); const auto f = make_shared(roi_pooling, ParameterVector{feat_maps, rois}); @@ -85,8 +85,8 @@ NGRAPH_TEST(${BACKEND_NAME}, roi_pooling_2x2_max) Shape pooled_shape{pooled_h, pooled_w}; Shape output_shape{num_rois, channels, pooled_h, pooled_w}; - const auto feat_maps = make_shared(element::f32, feat_maps_shape); - const auto rois = make_shared(element::f32, rois_shape); + const auto feat_maps = make_shared(element::Type_t::f32, feat_maps_shape); + const auto rois = make_shared(element::Type_t::f32, rois_shape); const auto roi_pooling = make_shared(feat_maps, rois, pooled_shape, spatial_scale, "max"); const auto f = make_shared(roi_pooling, ParameterVector{feat_maps, rois}); @@ -126,8 +126,8 @@ NGRAPH_TEST(${BACKEND_NAME}, roi_pooling_1x1_bilinear) Shape pooled_shape{pooled_h, pooled_w}; Shape output_shape{num_rois, channels, pooled_h, pooled_w}; - const auto feat_maps = make_shared(element::f32, feat_maps_shape); - const auto rois = make_shared(element::f32, rois_shape); + const auto feat_maps = make_shared(element::Type_t::f32, feat_maps_shape); + const auto rois = make_shared(element::Type_t::f32, rois_shape); const auto roi_pooling = make_shared(feat_maps, rois, pooled_shape, spatial_scale, "bilinear"); const auto f = make_shared(roi_pooling, ParameterVector{feat_maps, rois}); @@ -166,8 +166,8 @@ NGRAPH_TEST(${BACKEND_NAME}, roi_pooling_2x2_bilinear) Shape pooled_shape{pooled_h, pooled_w}; Shape output_shape{num_rois, channels, pooled_h, pooled_w}; - const auto feat_maps = make_shared(element::f32, feat_maps_shape); - const auto rois = make_shared(element::f32, rois_shape); + const auto feat_maps = make_shared(element::Type_t::f32, feat_maps_shape); + const auto rois = make_shared(element::Type_t::f32, rois_shape); const auto roi_pooling = make_shared(feat_maps, rois, pooled_shape, spatial_scale, "bilinear"); const auto f = make_shared(roi_pooling, ParameterVector{feat_maps, rois}); diff --git a/ngraph/test/backend/round.in.cpp b/ngraph/test/backend/round.in.cpp index 3e23132ef35..392492b4a53 100644 --- a/ngraph/test/backend/round.in.cpp +++ b/ngraph/test/backend/round.in.cpp @@ -34,15 +34,15 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, round) { Shape shape{5}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared(A, op::v5::Round::RoundMode::HALF_TO_EVEN), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0.9f, 2.5f, 2.3f, 1.5f, -4.5f}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -54,16 +54,16 @@ NGRAPH_TEST(${BACKEND_NAME}, round) NGRAPH_TEST(${BACKEND_NAME}, round_away_from_zero) { Shape shape{5}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared(A, op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0.9f, 2.5f, 2.3f, 1.5f, -4.5f}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -75,13 +75,13 @@ NGRAPH_TEST(${BACKEND_NAME}, round_away_from_zero) NGRAPH_TEST(${BACKEND_NAME}, round_2D) { Shape shape{3, 5}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared( make_shared(A, op::v5::Round::RoundMode::HALF_TO_EVEN), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{0.1f, 0.5f, @@ -98,7 +98,7 @@ NGRAPH_TEST(${BACKEND_NAME}, round_2D) -2.2f, -2.5f, -2.8f}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -113,16 +113,16 @@ NGRAPH_TEST(${BACKEND_NAME}, round_int64) { // This tests large numbers that will not fit in a double Shape shape{3}; - auto A = make_shared(element::i64, shape); + auto A = make_shared(element::Type_t::i64, shape); auto f = make_shared( make_shared(A, op::v5::Round::RoundMode::HALF_TO_EVEN), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::i64, shape); + auto a = backend->create_tensor(element::Type_t::i64, shape); vector expected{0, 1, 0x4000000000000001}; copy_data(a, expected); - auto result = backend->create_tensor(element::i64, shape); + auto result = backend->create_tensor(element::Type_t::i64, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/select.in.cpp b/ngraph/test/backend/select.in.cpp index 1affb447e03..9530b3fceda 100644 --- a/ngraph/test/backend/select.in.cpp +++ b/ngraph/test/backend/select.in.cpp @@ -34,21 +34,21 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, select) { Shape shape{2, 2, 2}; - auto A = make_shared(element::boolean, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::boolean, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B, C), ParameterVector{A, B, C}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); + auto a = backend->create_tensor(element::Type_t::boolean, shape); copy_data(a, vector{0, 1, 1, 0, 0, 1, 0, 1}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 3, 4, 5, 6, 7, 8}); - auto c = backend->create_tensor(element::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); copy_data(c, vector{11, 12, 13, 14, 15, 16, 17, 18}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -59,21 +59,21 @@ NGRAPH_TEST(${BACKEND_NAME}, select) NGRAPH_TEST(${BACKEND_NAME}, select_v1) { - auto A = make_shared(element::boolean, Shape{4}); - auto B = make_shared(element::f32, Shape{4}); - auto C = make_shared(element::f32, Shape{2, 4}); + auto A = make_shared(element::Type_t::boolean, Shape{4}); + auto B = make_shared(element::Type_t::f32, Shape{4}); + auto C = make_shared(element::Type_t::f32, Shape{2, 4}); auto f = make_shared(make_shared(A, B, C), ParameterVector{A, B, C}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, Shape{4}); + auto a = backend->create_tensor(element::Type_t::boolean, Shape{4}); copy_data(a, vector{0, 1, 1, 0}); - auto b = backend->create_tensor(element::f32, Shape{4}); + auto b = backend->create_tensor(element::Type_t::f32, Shape{4}); copy_data(b, vector{1, 2, 3, 4}); - auto c = backend->create_tensor(element::f32, Shape{2, 4}); + auto c = backend->create_tensor(element::Type_t::f32, Shape{2, 4}); copy_data(c, vector{11, 12, 13, 14, 15, 16, 17, 18}); - auto result = backend->create_tensor(element::f32, Shape{2, 4}); + auto result = backend->create_tensor(element::Type_t::f32, Shape{2, 4}); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); @@ -84,21 +84,21 @@ NGRAPH_TEST(${BACKEND_NAME}, select_v1) NGRAPH_TEST(${BACKEND_NAME}, select_double) { Shape shape{2, 2, 2}; - auto A = make_shared(element::boolean, shape); - auto B = make_shared(element::f64, shape); - auto C = make_shared(element::f64, shape); + auto A = make_shared(element::Type_t::boolean, shape); + auto B = make_shared(element::Type_t::f64, shape); + auto C = make_shared(element::Type_t::f64, shape); auto f = make_shared(make_shared(A, B, C), ParameterVector{A, B, C}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::boolean, shape); + auto a = backend->create_tensor(element::Type_t::boolean, shape); copy_data(a, vector{0, 1, 1, 0, 0, 1, 0, 1}); - auto b = backend->create_tensor(element::f64, shape); + auto b = backend->create_tensor(element::Type_t::f64, shape); copy_data(b, vector{1, 2, 3, 4, 5, 6, 7, 8}); - auto c = backend->create_tensor(element::f64, shape); + auto c = backend->create_tensor(element::Type_t::f64, shape); copy_data(c, vector{11, 12, 13, 14, 15, 16, 17, 18}); - auto result = backend->create_tensor(element::f64, shape); + auto result = backend->create_tensor(element::Type_t::f64, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b, c}); diff --git a/ngraph/test/backend/shape_of.in.cpp b/ngraph/test/backend/shape_of.in.cpp index 05a1269f269..076efcce153 100644 --- a/ngraph/test/backend/shape_of.in.cpp +++ b/ngraph/test/backend/shape_of.in.cpp @@ -35,14 +35,14 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_scalar_v0) Shape input_shape{}; Shape output_shape{0}; - auto A = std::make_shared(element::f32, input_shape); + auto A = std::make_shared(element::Type_t::f32, input_shape); auto f = std::make_shared(std::make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector{0}); - auto result = backend->create_tensor(element::i64, output_shape); + auto result = backend->create_tensor(element::Type_t::i64, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -55,18 +55,18 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_scalar_v3) Shape input_shape{}; Shape output_shape{0}; - auto A = std::make_shared(element::f32, input_shape); - auto f = - std::make_shared(OutputVector{std::make_shared(A), - std::make_shared(A, element::i32)}, - ParameterVector{A}); + auto A = std::make_shared(element::Type_t::f32, input_shape); + auto f = std::make_shared( + OutputVector{std::make_shared(A), + std::make_shared(A, element::Type_t::i32)}, + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector{0}); - auto result64 = backend->create_tensor(element::i64, output_shape); - auto result32 = backend->create_tensor(element::i32, output_shape); + auto result64 = backend->create_tensor(element::Type_t::i64, output_shape); + auto result32 = backend->create_tensor(element::Type_t::i32, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result64, result32}, {a}); @@ -81,14 +81,14 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_vector_v0) Shape input_shape{2}; Shape output_shape{1}; - auto A = std::make_shared(element::f32, input_shape); + auto A = std::make_shared(element::Type_t::f32, input_shape); auto f = std::make_shared(std::make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector(2, 0)); - auto result = backend->create_tensor(element::i64, output_shape); + auto result = backend->create_tensor(element::Type_t::i64, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -101,18 +101,18 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_vector_v3) Shape input_shape{2}; Shape output_shape{1}; - auto A = std::make_shared(element::f32, input_shape); - auto f = - std::make_shared(OutputVector{std::make_shared(A), - std::make_shared(A, element::i32)}, - ParameterVector{A}); + auto A = std::make_shared(element::Type_t::f32, input_shape); + auto f = std::make_shared( + OutputVector{std::make_shared(A), + std::make_shared(A, element::Type_t::i32)}, + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector{2, 0}); - auto result64 = backend->create_tensor(element::i64, output_shape); - auto result32 = backend->create_tensor(element::i32, output_shape); + auto result64 = backend->create_tensor(element::Type_t::i64, output_shape); + auto result32 = backend->create_tensor(element::Type_t::i32, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result64, result32}, {a}); @@ -127,14 +127,14 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_matrix_v0) Shape input_shape{2, 4}; Shape output_shape{2}; - auto A = std::make_shared(element::f32, input_shape); + auto A = std::make_shared(element::Type_t::f32, input_shape); auto f = std::make_shared(std::make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector(2 * 4, 0)); - auto result = backend->create_tensor(element::i64, output_shape); + auto result = backend->create_tensor(element::Type_t::i64, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -147,18 +147,18 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_matrix_v3) Shape input_shape{2, 4}; Shape output_shape{2}; - auto A = std::make_shared(element::f32, input_shape); - auto f = - std::make_shared(OutputVector{std::make_shared(A), - std::make_shared(A, element::i32)}, - ParameterVector{A}); + auto A = std::make_shared(element::Type_t::f32, input_shape); + auto f = std::make_shared( + OutputVector{std::make_shared(A), + std::make_shared(A, element::Type_t::i32)}, + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector(2 * 4, 0)); - auto result64 = backend->create_tensor(element::i64, output_shape); - auto result32 = backend->create_tensor(element::i32, output_shape); + auto result64 = backend->create_tensor(element::Type_t::i64, output_shape); + auto result32 = backend->create_tensor(element::Type_t::i32, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result64, result32}, {a}); @@ -173,14 +173,14 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_5d_v0) Shape input_shape{2, 4, 8, 16, 32}; Shape output_shape{5}; - auto A = std::make_shared(element::f32, input_shape); + auto A = std::make_shared(element::Type_t::f32, input_shape); auto f = std::make_shared(std::make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector(2 * 4 * 8 * 16 * 32, 0)); - auto result = backend->create_tensor(element::i64, output_shape); + auto result = backend->create_tensor(element::Type_t::i64, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -193,18 +193,18 @@ NGRAPH_TEST(${BACKEND_NAME}, shape_of_5d_v3) Shape input_shape{2, 4, 8, 16, 32}; Shape output_shape{5}; - auto A = std::make_shared(element::f32, input_shape); - auto f = - std::make_shared(OutputVector{std::make_shared(A), - std::make_shared(A, element::i32)}, - ParameterVector{A}); + auto A = std::make_shared(element::Type_t::f32, input_shape); + auto f = std::make_shared( + OutputVector{std::make_shared(A), + std::make_shared(A, element::Type_t::i32)}, + ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, input_shape); + auto a = backend->create_tensor(element::Type_t::f32, input_shape); copy_data(a, vector(2 * 4 * 8 * 16 * 32, 0)); - auto result64 = backend->create_tensor(element::i64, output_shape); - auto result32 = backend->create_tensor(element::i32, output_shape); + auto result64 = backend->create_tensor(element::Type_t::i64, output_shape); + auto result32 = backend->create_tensor(element::Type_t::i32, output_shape); auto handle = backend->compile(f); handle->call_with_validate({result64, result32}, {a}); diff --git a/ngraph/test/backend/sigmoid.in.cpp b/ngraph/test/backend/sigmoid.in.cpp index 23bdc501e54..931359da6a4 100644 --- a/ngraph/test/backend/sigmoid.in.cpp +++ b/ngraph/test/backend/sigmoid.in.cpp @@ -42,14 +42,16 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, sigmoid_n1c1h2w2) { - auto input = make_shared(element::f32, Shape{1, 1, 2, 2}); + auto input = make_shared(element::Type_t::f32, Shape{1, 1, 2, 2}); auto sigmoid_node = make_shared(input); auto func = make_shared(sigmoid_node, ParameterVector{input}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - shared_ptr a = backend->create_tensor(element::f32, input->get_shape()); - shared_ptr result = backend->create_tensor(element::f32, input->get_shape()); + shared_ptr a = + backend->create_tensor(element::Type_t::f32, input->get_shape()); + shared_ptr result = + backend->create_tensor(element::Type_t::f32, input->get_shape()); float x1 = 1.0f; float x2 = 4.0f; @@ -67,14 +69,16 @@ NGRAPH_TEST(${BACKEND_NAME}, sigmoid_n1c1h2w2) NGRAPH_TEST(${BACKEND_NAME}, sigmoid_n1c1h4) { - auto input = make_shared(element::f32, Shape{1, 1, 4}); + auto input = make_shared(element::Type_t::f32, Shape{1, 1, 4}); auto sigmoid_node = make_shared(input); auto func = make_shared(sigmoid_node, ParameterVector{input}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - shared_ptr a = backend->create_tensor(element::f32, input->get_shape()); - shared_ptr result = backend->create_tensor(element::f32, input->get_shape()); + shared_ptr a = + backend->create_tensor(element::Type_t::f32, input->get_shape()); + shared_ptr result = + backend->create_tensor(element::Type_t::f32, input->get_shape()); float x1 = 1.0f; float x2 = 4.0f; diff --git a/ngraph/test/backend/sign.in.cpp b/ngraph/test/backend/sign.in.cpp index 20e3dbf3f89..751801831df 100644 --- a/ngraph/test/backend/sign.in.cpp +++ b/ngraph/test/backend/sign.in.cpp @@ -49,15 +49,15 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, sign) { Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, -2, 0, -4.8f, 4.8f, -0.0f}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/sin.in.cpp b/ngraph/test/backend/sin.in.cpp index f424bc5e580..3b69a5cbdd6 100644 --- a/ngraph/test/backend/sin.in.cpp +++ b/ngraph/test/backend/sin.in.cpp @@ -49,16 +49,16 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, sin) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f}; copy_data(a, input); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); EXPECT_TRUE(test::all_close_f(vector{0.00000000f, diff --git a/ngraph/test/backend/sinh.in.cpp b/ngraph/test/backend/sinh.in.cpp index b2dca5b2175..85a9e5caa64 100644 --- a/ngraph/test/backend/sinh.in.cpp +++ b/ngraph/test/backend/sinh.in.cpp @@ -49,16 +49,16 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, sinh) { Shape shape{6}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector input{1.0f, 0.0f, -0.0f, -1.0f, 5.0f, -5.0f}; copy_data(a, input); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return sinhf(x); }); diff --git a/ngraph/test/backend/slice.in.cpp b/ngraph/test/backend/slice.in.cpp index dcdb3fb955c..ba8b352a3bf 100644 --- a/ngraph/test/backend/slice.in.cpp +++ b/ngraph/test/backend/slice.in.cpp @@ -35,7 +35,7 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, slice_scalar) { Shape shape_a{}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{}; auto r = make_shared(A, Coordinate{}, Coordinate{}); auto f = make_shared(make_shared(r), ParameterVector{A}); @@ -43,9 +43,9 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_scalar) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{312}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -56,7 +56,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_scalar) NGRAPH_TEST(${BACKEND_NAME}, slice_matrix) { Shape shape_a{4, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{3, 2}; auto r = make_shared(A, Coordinate{0, 1}, Coordinate{3, 3}); auto f = make_shared(r, ParameterVector{A}); @@ -64,9 +64,9 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -77,7 +77,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix) NGRAPH_TEST(${BACKEND_NAME}, slice_vector) { Shape shape_a{16}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{12}; auto r = make_shared(A, Coordinate{2}, Coordinate{14}); auto f = make_shared(r, ParameterVector{A}); @@ -85,9 +85,9 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_vector) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -99,8 +99,8 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_vector) NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_overlap) { Shape shape_a{4, 4}; - auto A = make_shared(element::f32, shape_a); - auto B = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); + auto B = make_shared(element::Type_t::f32, shape_a); auto C = make_shared(A, B); Shape shape_r{2, 4}; auto D = make_shared(C, Coordinate{0, 0}, Coordinate{2, 4}); @@ -111,11 +111,11 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_overlap) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); - auto b = backend->create_tensor(element::f32, shape_a); + auto b = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(b, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -127,7 +127,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_overlap) NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place) { Shape shape_a{4, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 4}; auto D = make_shared(A, Coordinate{0, 0}, Coordinate{2, 4}); auto E = make_shared(A, Coordinate{2, 0}, Coordinate{4, 4}); @@ -137,9 +137,9 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -151,7 +151,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place) NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_twice) { Shape shape_a{4, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{1, 4}; auto B = make_shared(A, Coordinate{0, 0}, Coordinate{2, 4}); auto D = make_shared(B, Coordinate{1, 0}, Coordinate{2, 4}); @@ -162,9 +162,9 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_twice) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -175,7 +175,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_twice) NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_twice_overlap) { Shape shape_a{5, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 4}; auto B = make_shared(A, Coordinate{1, 0}, Coordinate{5, 4}); auto D = make_shared(B, Coordinate{1, 0}, Coordinate{3, 4}); @@ -186,10 +186,10 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_twice_overlap) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -201,7 +201,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_twice_overlap) NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_with_transpose) { Shape shape_a{4, 5}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 4}; auto B = make_shared(A, Coordinate{1, 0}, Coordinate{4, 5}); auto C = builder::opset1::transpose(B); @@ -213,10 +213,10 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_with_transpose) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -228,7 +228,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_axis_0_in_place_with_transpose) NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_strided) { Shape shape_a{4, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2}; auto r = make_shared(A, Coordinate{1, 0}, Coordinate{4, 4}, Strides{2, 3}); auto f = make_shared(r, ParameterVector{A}); @@ -236,9 +236,9 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_strided) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -249,7 +249,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_matrix_strided) NGRAPH_TEST(${BACKEND_NAME}, slice_3d) { Shape shape_a{4, 4, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2, 2}; auto r = make_shared(A, Coordinate{1, 1, 1}, Coordinate{3, 3, 3}); auto f = make_shared(r, ParameterVector{A}); @@ -257,7 +257,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -265,7 +265,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d) 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -277,7 +277,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d) NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided) { Shape shape_a{4, 4, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2, 2}; auto r = make_shared(A, Coordinate{0, 0, 0}, Coordinate{4, 4, 4}, Strides{2, 2, 2}); auto f = make_shared(r, ParameterVector{A}); @@ -285,7 +285,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -293,7 +293,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided) 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -305,7 +305,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided) NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides) { Shape shape_a{4, 4, 4}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{2, 2, 2}; auto r = make_shared(A, Coordinate{0, 0, 0}, Coordinate{4, 4, 4}, Strides{2, 2, 3}); auto f = make_shared(r, ParameterVector{A}); @@ -313,7 +313,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -321,7 +321,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides) 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -333,7 +333,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides) NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides_int64) { Shape shape_a{4, 4, 4}; - auto A = make_shared(element::i64, shape_a); + auto A = make_shared(element::Type_t::i64, shape_a); Shape shape_r{2, 2, 2}; auto r = make_shared(A, Coordinate{0, 0, 0}, Coordinate{4, 4, 4}, Strides{2, 2, 3}); auto f = make_shared(r, ParameterVector{A}); @@ -341,7 +341,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides_int64) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i64, shape_a); + auto a = backend->create_tensor(element::Type_t::i64, shape_a); copy_data(a, vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, @@ -349,7 +349,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides_int64) 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}); - auto result = backend->create_tensor(element::i64, shape_r); + auto result = backend->create_tensor(element::Type_t::i64, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -359,7 +359,7 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_strided_different_strides_int64) NGRAPH_TEST(${BACKEND_NAME}, slice_3d_start_just_oob) { Shape shape_a{20, 10, 5}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_r{20, 0, 5}; auto r = make_shared(A, Coordinate{0, 10, 0}, Coordinate{20, 10, 5}, Strides{1, 1, 1}); @@ -368,10 +368,10 @@ NGRAPH_TEST(${BACKEND_NAME}, slice_3d_start_just_oob) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); vector a_data(20 * 10 * 5, 222.0f); copy_data(a, a_data); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/softmax.in.cpp b/ngraph/test/backend/softmax.in.cpp index 9645ca066e2..b649e5dfc45 100644 --- a/ngraph/test/backend/softmax.in.cpp +++ b/ngraph/test/backend/softmax.in.cpp @@ -43,14 +43,14 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d) { Shape shape{2, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, 0), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-10, -20, -30, -40, -50, -60, -1, -2, -3, -4, -5, -6}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto d0 = expf(-10) + expf(-1); auto d1 = expf(-20) + expf(-2); @@ -80,14 +80,14 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d) NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d_double) { Shape shape{2, 2, 3}; - auto A = make_shared(element::f64, shape); + auto A = make_shared(element::Type_t::f64, shape); auto f = make_shared(make_shared(A, 0), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f64, shape); + auto a = backend->create_tensor(element::Type_t::f64, shape); copy_data(a, vector{-10, -20, -30, -40, -50, -60, -1, -2, -3, -4, -5, -6}); - auto result = backend->create_tensor(element::f64, shape); + auto result = backend->create_tensor(element::Type_t::f64, shape); auto d0 = exp(-10) + exp(-1); auto d1 = exp(-20) + exp(-2); @@ -117,14 +117,14 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d_double) NGRAPH_TEST(${BACKEND_NAME}, softmax_2d_axis_1) { Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, 1), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-10, -20, -30, -40, -50, -60}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto d0 = expf(-10) + expf(-20) + expf(-30); auto d1 = expf(-40) + expf(-50) + expf(-60); @@ -143,14 +143,14 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_2d_axis_1) NGRAPH_TEST(${BACKEND_NAME}, softmax_2d_axis_0) { Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, 0), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-10, -20, -30, -40, -50, -60}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto d0 = expf(-10) + expf(-40); auto d1 = expf(-20) + expf(-50); @@ -170,14 +170,14 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_2d_axis_0) NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d_trivial) { Shape shape{1, 2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, 0), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-10, -20, -30, -40, -50, -60}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -188,16 +188,16 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_axis_3d_trivial) NGRAPH_TEST(${BACKEND_NAME}, softmax_underflow) { Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, 0), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); auto low = std::numeric_limits::lowest(); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{low, 1, 2, 3, 4, 5}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto d0 = expf(low) + expf(3); auto d1 = expf(1) + expf(4); @@ -213,16 +213,16 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_underflow) NGRAPH_TEST(${BACKEND_NAME}, softmax_overflow) { Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, 0), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); auto high = std::numeric_limits::max(); - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{high, 1, 2, 3, 4, 5}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto d0 = expf(high - high) + expf(3 - high); auto d1 = expf(1) + expf(4); diff --git a/ngraph/test/backend/split.in.cpp b/ngraph/test/backend/split.in.cpp index 953295d07b1..ce0642bd041 100644 --- a/ngraph/test/backend/split.in.cpp +++ b/ngraph/test/backend/split.in.cpp @@ -28,8 +28,8 @@ using TestEngine = test::ENGINE_CLASS_NAME(${BACKEND_NAME}); NGRAPH_TEST(${BACKEND_NAME}, split_1d) { - const auto data = make_shared(element::i32, Shape{6}); - const auto axis = op::Constant::create(element::i64, Shape{}, {0}); + const auto data = make_shared(element::Type_t::i32, Shape{6}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {0}); const auto tested_op = make_shared(data, axis, 3); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -47,8 +47,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_1d) NGRAPH_TEST(${BACKEND_NAME}, split_2d_axis_0) { Shape shape{6, 2}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {0}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {0}); const auto tested_op = make_shared(data, axis, 2); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -67,8 +67,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_2d_axis_0) NGRAPH_TEST(${BACKEND_NAME}, split_2d_axis_1) { Shape shape{6, 2}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto tested_op = make_shared(data, axis, 2); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -87,8 +87,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_2d_axis_1) NGRAPH_TEST(${BACKEND_NAME}, split_3d_axis_0) { Shape shape{2, 2, 3}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {0}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {0}); const auto tested_op = make_shared(data, axis, 2); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -107,8 +107,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_3d_axis_0) NGRAPH_TEST(${BACKEND_NAME}, split_3d_axis_1) { Shape shape{2, 8, 2}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto tested_op = make_shared(data, axis, 4); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -129,8 +129,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_3d_axis_1) NGRAPH_TEST(${BACKEND_NAME}, split_3d_axis_2) { Shape shape{2, 1, 6}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {2}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {2}); const auto tested_op = make_shared(data, axis, 2); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -149,8 +149,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_3d_axis_2) NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_0) { Shape shape{3, 2, 3, 1}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {0}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {0}); const auto tested_op = make_shared(data, axis, 3); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -170,8 +170,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_0) NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_1) { Shape shape{2, 8, 2, 2}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto tested_op = make_shared(data, axis, 4); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -196,8 +196,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_1) NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_2) { Shape shape{2, 1, 6, 2}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {2}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {2}); const auto tested_op = make_shared(data, axis, 3); const auto function = make_shared(tested_op, ParameterVector{data}); @@ -217,8 +217,8 @@ NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_2) NGRAPH_TEST(${BACKEND_NAME}, split_4d_axis_3) { Shape shape{2, 1, 2, 6}; - const auto data = make_shared(element::f32, shape); - const auto axis = op::Constant::create(element::i64, Shape{}, {3}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {3}); const auto tested_op = make_shared(data, axis, 3); const auto function = make_shared(tested_op, ParameterVector{data}); diff --git a/ngraph/test/backend/sqrt.in.cpp b/ngraph/test/backend/sqrt.in.cpp index 6c4b85aa09f..f6d5b6b17b2 100644 --- a/ngraph/test/backend/sqrt.in.cpp +++ b/ngraph/test/backend/sqrt.in.cpp @@ -49,15 +49,15 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, sqrt) { Shape shape{2, 3}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{16, 4, 81, 100, 10000, 0}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -67,15 +67,15 @@ NGRAPH_TEST(${BACKEND_NAME}, sqrt) NGRAPH_TEST(${BACKEND_NAME}, sqrt_negative_inputs) { Shape shape{4}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{-1, 4, -81, 100}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/strided_slice.in.cpp b/ngraph/test/backend/strided_slice.in.cpp index 192a0f505ef..2a0b2e54d43 100644 --- a/ngraph/test/backend/strided_slice.in.cpp +++ b/ngraph/test/backend/strided_slice.in.cpp @@ -44,10 +44,12 @@ void check_strided_slice_success(const element::Type& input_element_type, const std::vector& expected_values) { auto arg = std::make_shared(input_element_type, input_shape); - auto begin_op = make_shared(element::i64, Shape{begin_values.size()}); - auto end_op = make_shared(element::i64, Shape{end_values.size()}); + auto begin_op = + make_shared(element::Type_t::i64, Shape{begin_values.size()}); + auto end_op = + make_shared(element::Type_t::i64, Shape{end_values.size()}); auto strides_op = - make_shared(element::i64, Shape{strides_values.size()}); + make_shared(element::Type_t::i64, Shape{strides_values.size()}); std::vector input_values(shape_size(input_shape)); std::iota(input_values.begin(), input_values.end(), static_cast(0)); @@ -69,9 +71,10 @@ void check_strided_slice_success(const element::Type& input_element_type, auto ex = backend->compile(f); auto arg_tensor = backend->create_tensor(input_element_type, input_shape); - auto begin_tensor = backend->create_tensor(element::i64, Shape{begin_values.size()}); - auto end_tensor = backend->create_tensor(element::i64, Shape{end_values.size()}); - auto strides_tensor = backend->create_tensor(element::i64, Shape{strides_values.size()}); + auto begin_tensor = backend->create_tensor(element::Type_t::i64, Shape{begin_values.size()}); + auto end_tensor = backend->create_tensor(element::Type_t::i64, Shape{end_values.size()}); + auto strides_tensor = + backend->create_tensor(element::Type_t::i64, Shape{strides_values.size()}); copy_data(arg_tensor, input_values); copy_data(begin_tensor, begin_values); copy_data(end_tensor, end_values); @@ -103,8 +106,10 @@ void check_strided_slice_stride_optional_success(const element::Type& input_elem const std::vector& expected_values) { auto arg = std::make_shared(input_element_type, input_shape); - auto begin_op = make_shared(element::i64, Shape{begin_values.size()}); - auto end_op = make_shared(element::i64, Shape{end_values.size()}); + auto begin_op = + make_shared(element::Type_t::i64, Shape{begin_values.size()}); + auto end_op = + make_shared(element::Type_t::i64, Shape{end_values.size()}); std::vector input_values(shape_size(input_shape)); std::iota(input_values.begin(), input_values.end(), static_cast(0)); @@ -125,8 +130,8 @@ void check_strided_slice_stride_optional_success(const element::Type& input_elem auto ex = backend->compile(f); auto arg_tensor = backend->create_tensor(input_element_type, input_shape); - auto begin_tensor = backend->create_tensor(element::i64, Shape{begin_values.size()}); - auto end_tensor = backend->create_tensor(element::i64, Shape{end_values.size()}); + auto begin_tensor = backend->create_tensor(element::Type_t::i64, Shape{begin_values.size()}); + auto end_tensor = backend->create_tensor(element::Type_t::i64, Shape{end_values.size()}); copy_data(arg_tensor, input_values); copy_data(begin_tensor, begin_values); copy_data(end_tensor, end_values); @@ -150,7 +155,7 @@ void check_strided_slice_stride_optional_success(const element::Type& input_elem NGRAPH_TEST(${BACKEND_NAME}, strided_slice_0) { check_strided_slice_success( - element::u32, + element::Type_t::u32, Shape{2, 3, 4}, std::vector{1, 0}, std::vector{0, 0}, @@ -171,7 +176,7 @@ NGRAPH_TEST(${BACKEND_NAME}, strided_slice_0) NGRAPH_TEST(${BACKEND_NAME}, strided_slice_1) { check_strided_slice_success( - element::u32, + element::Type_t::u32, Shape{2, 4, 6, 8, 2, 2, 2}, std::vector{0, 0, 2, 7, 0, 0, 1}, std::vector{0, 4, 6, 3, 0, 0, 0}, @@ -201,7 +206,7 @@ NGRAPH_TEST(${BACKEND_NAME}, strided_slice_1) // expected output shape is Shape{1,4} NGRAPH_TEST(${BACKEND_NAME}, strided_slice_stride_optional) { - check_strided_slice_stride_optional_success(element::u32, + check_strided_slice_stride_optional_success(element::Type_t::u32, Shape{2, 3, 4}, std::vector{-1, -1, 0}, std::vector{0, 0, 0}, diff --git a/ngraph/test/backend/subtract.in.cpp b/ngraph/test/backend/subtract.in.cpp index 4d4b232f817..ce2b205bfae 100644 --- a/ngraph/test/backend/subtract.in.cpp +++ b/ngraph/test/backend/subtract.in.cpp @@ -51,18 +51,18 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, subtract) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 4, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); @@ -72,18 +72,18 @@ NGRAPH_TEST(${BACKEND_NAME}, subtract) NGRAPH_TEST(${BACKEND_NAME}, subtract_overload) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(A - B, ParameterVector{A, B}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, 8, 16}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 4, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a, b}); diff --git a/ngraph/test/backend/tan.in.cpp b/ngraph/test/backend/tan.in.cpp index 93a3600be2b..abbe7c25c9d 100644 --- a/ngraph/test/backend/tan.in.cpp +++ b/ngraph/test/backend/tan.in.cpp @@ -49,16 +49,16 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, tan) { Shape shape{11}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f}; copy_data(a, input); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); EXPECT_TRUE(test::all_close_f(vector{0.00000000f, diff --git a/ngraph/test/backend/tanh.in.cpp b/ngraph/test/backend/tanh.in.cpp index 404c0b6d6c4..08e5db9a49c 100644 --- a/ngraph/test/backend/tanh.in.cpp +++ b/ngraph/test/backend/tanh.in.cpp @@ -49,16 +49,16 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, tanh) { Shape shape{6}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A), ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector input{1.0f, 0.0f, -0.0f, -1.0f, 0.5f, -0.5f}; copy_data(a, input); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return tanhf(x); }); diff --git a/ngraph/test/backend/tile.in.cpp b/ngraph/test/backend/tile.in.cpp index d9b4b5d520e..bf1c2b9d676 100644 --- a/ngraph/test/backend/tile.in.cpp +++ b/ngraph/test/backend/tile.in.cpp @@ -39,9 +39,9 @@ static string s_manifest = "${MANIFEST}"; NGRAPH_TEST(${BACKEND_NAME}, tile_3d_small_data_rank) { Shape shape_a{3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_re{3}; - auto repeats = make_shared(element::i64, shape_re, vector{2, 2, 1}); + auto repeats = make_shared(element::Type_t::i64, shape_re, vector{2, 2, 1}); Shape shape_r{2, 2, 3}; auto tile = make_shared(A, repeats); @@ -51,10 +51,10 @@ NGRAPH_TEST(${BACKEND_NAME}, tile_3d_small_data_rank) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -66,9 +66,9 @@ NGRAPH_TEST(${BACKEND_NAME}, tile_3d_small_data_rank) NGRAPH_TEST(${BACKEND_NAME}, tile_3d_few_repeats) { Shape shape_a{2, 1, 3}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_re{2}; - auto repeats = make_shared(element::i64, shape_re, vector{2, 1}); + auto repeats = make_shared(element::Type_t::i64, shape_re, vector{2, 1}); Shape shape_r{2, 2, 3}; auto tile = make_shared(A, repeats); @@ -78,10 +78,10 @@ NGRAPH_TEST(${BACKEND_NAME}, tile_3d_few_repeats) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_r); + auto result = backend->create_tensor(element::Type_t::f32, shape_r); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/backend/topk.in.cpp b/ngraph/test/backend/topk.in.cpp index e61451b8bc1..288512b8db4 100644 --- a/ngraph/test/backend/topk.in.cpp +++ b/ngraph/test/backend/topk.in.cpp @@ -64,14 +64,14 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_resnet50) Shape shape{128, 1000}; Shape rshape5{128, 5}; Shape rshape1{128, 1}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto B = make_shared(A, - op::Constant::create(element::i64, {}, {5}), + op::Constant::create(element::Type_t::i64, {}, {5}), 1, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); auto C = make_shared(A, - op::Constant::create(element::i64, {}, {1}), + op::Constant::create(element::Type_t::i64, {}, {1}), 1, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -86,7 +86,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_resnet50) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector data; for (size_t i = 0; i < shape[0]; i++) { @@ -97,10 +97,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_resnet50) } copy_data(a, data); - auto result5_value = backend->create_tensor(element::f32, rshape5); - auto result5_index = backend->create_tensor(element::i32, rshape5); - auto result1_value = backend->create_tensor(element::f32, rshape1); - auto result1_index = backend->create_tensor(element::i32, rshape1); + auto result5_value = backend->create_tensor(element::Type_t::f32, rshape5); + auto result5_index = backend->create_tensor(element::Type_t::i32, rshape5); + auto result1_value = backend->create_tensor(element::Type_t::f32, rshape1); + auto result1_index = backend->create_tensor(element::Type_t::i32, rshape1); auto exec = backend->compile(f); exec->call({result5_value, result5_index, result1_value, result1_index}, {a}); @@ -142,8 +142,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_none) { Shape shape{128, 1000}; Shape rshape{128, 5}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {5}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {5}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::NONE); @@ -154,7 +154,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_none) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector data; for (size_t i = 0; i < shape[0]; i++) { @@ -165,8 +165,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_none) } copy_data(a, data); - auto result_value = backend->create_tensor(element::f32, rshape); - auto result_index = backend->create_tensor(element::i32, rshape); + auto result_value = backend->create_tensor(element::Type_t::f32, rshape); + auto result_index = backend->create_tensor(element::Type_t::i32, rshape); auto exec = backend->compile(f); exec->call({result_value, result_index}, {a}); @@ -196,8 +196,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_none) { Shape shape{128, 1000}; Shape rshape{128, 5}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {5}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {5}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::NONE); @@ -208,7 +208,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_none) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector data; for (size_t i = 0; i < shape[0]; i++) { @@ -219,8 +219,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_none) } copy_data(a, data); - auto result_value = backend->create_tensor(element::f32, rshape); - auto result_index = backend->create_tensor(element::i32, rshape); + auto result_value = backend->create_tensor(element::Type_t::f32, rshape); + auto result_index = backend->create_tensor(element::Type_t::i32, rshape); auto exec = backend->compile(f); exec->call({result_value, result_index}, {a}); @@ -250,8 +250,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_value) { Shape shape{128, 1000}; Shape rshape{128, 5}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {5}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {5}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -262,7 +262,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_value) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector data; for (size_t i = 0; i < shape[0]; i++) { @@ -273,8 +273,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_value) } copy_data(a, data); - auto result_value = backend->create_tensor(element::f32, rshape); - auto result_index = backend->create_tensor(element::i32, rshape); + auto result_value = backend->create_tensor(element::Type_t::f32, rshape); + auto result_index = backend->create_tensor(element::Type_t::i32, rshape); auto exec = backend->compile(f); exec->call({result_value, result_index}, {a}); @@ -300,8 +300,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_value) { Shape shape{128, 1000}; Shape rshape{128, 5}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {5}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {5}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -312,7 +312,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_value) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector data; for (size_t i = 0; i < shape[0]; i++) { @@ -323,8 +323,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_value) } copy_data(a, data); - auto result_value = backend->create_tensor(element::f32, rshape); - auto result_index = backend->create_tensor(element::i32, rshape); + auto result_value = backend->create_tensor(element::Type_t::f32, rshape); + auto result_index = backend->create_tensor(element::Type_t::i32, rshape); auto exec = backend->compile(f); exec->call({result_value, result_index}, {a}); @@ -354,8 +354,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_index) { Shape shape{128, 1000}; Shape rshape{128, 5}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {5}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {5}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_INDICES); @@ -366,7 +366,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_index) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector data; for (size_t i = 0; i < shape[0]; i++) { @@ -377,8 +377,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_max_sort_index) } copy_data(a, data); - auto result_value = backend->create_tensor(element::f32, rshape); - auto result_index = backend->create_tensor(element::i32, rshape); + auto result_value = backend->create_tensor(element::Type_t::f32, rshape); + auto result_index = backend->create_tensor(element::Type_t::i32, rshape); auto exec = backend->compile(f); exec->call({result_value, result_index}, {a}); @@ -408,8 +408,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_index) { Shape shape{128, 1000}; Shape rshape{128, 5}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {5}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {5}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_INDICES); @@ -420,7 +420,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_index) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); vector data; for (size_t i = 0; i < shape[0]; i++) { @@ -431,8 +431,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_min_sort_index) } copy_data(a, data); - auto result_value = backend->create_tensor(element::f32, rshape); - auto result_index = backend->create_tensor(element::i32, rshape); + auto result_value = backend->create_tensor(element::Type_t::f32, rshape); + auto result_index = backend->create_tensor(element::Type_t::i32, rshape); auto exec = backend->compile(f); exec->call({result_value, result_index}, {a}); @@ -462,8 +462,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_all) { Shape shape{6}; Shape rshape{6}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {6}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {6}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -473,10 +473,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_all) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -491,8 +491,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_i32_max_all) { Shape shape{6}; Shape rshape{6}; - auto A = make_shared(element::i32, shape); - auto k = op::Constant::create(element::i64, {}, {6}); + auto A = make_shared(element::Type_t::i32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {6}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -502,10 +502,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_i32_max_all) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::i32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result0 = backend->create_tensor(element::i32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::i32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -519,8 +519,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_partial) { Shape shape{6}; Shape rshape{3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {3}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {3}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -530,10 +530,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_partial) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -548,8 +548,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_one) { Shape shape{6}; Shape rshape{1}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {1}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {1}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -559,10 +559,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_max_one) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -577,8 +577,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_all) { Shape shape{6}; Shape rshape{6}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {6}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {6}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -588,10 +588,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_all) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{6, 5, 4, 3, 2, 1}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -606,8 +606,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_partial) { Shape shape{6}; Shape rshape{3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {3}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {3}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -617,10 +617,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_partial) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{6, 5, 4, 3, 2, 1}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -635,8 +635,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_one) { Shape shape{6}; Shape rshape{1}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {1}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {1}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -646,10 +646,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_1d_min_one) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{6, 5, 4, 3, 2, 1}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -664,8 +664,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_all) { Shape shape{2, 3, 2}; Shape rshape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {3}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {3}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -675,10 +675,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_all) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -694,21 +694,25 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_int64) { Shape shape{2, 3, 2}; Shape rshape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {3}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {3}); int64_t axis = 1; - auto B = make_shared( - A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES, element::i64); + auto B = make_shared(A, + k, + axis, + op::v1::TopK::Mode::MAX, + op::v1::TopK::SortType::SORT_VALUES, + element::Type_t::i64); auto f0 = make_shared(OutputVector{B->output(0)}, ParameterVector{A}); auto f1 = make_shared(OutputVector{B->output(1)}, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i64, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i64, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -724,8 +728,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_5d_max_partial) { Shape shape{2, 6, 3, 2, 4}; Shape rshape{2, 2, 3, 2, 4}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {2}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {2}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -735,7 +739,7 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_5d_max_partial) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data( a, vector{ @@ -761,8 +765,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_5d_max_partial) 205., 277., 213., 285., 198., 270., 206., 278., 214., 286., 199., 271., 207., 279., 215., 287., 200., 272., 208., 280., 216., 288.}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -790,8 +794,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_partial) { Shape shape{2, 3, 2}; Shape rshape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {2}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {2}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -801,10 +805,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_partial) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -820,8 +824,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_one) { Shape shape{2, 3, 2}; Shape rshape{2, 1, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {1}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {1}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -831,10 +835,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_max_one) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -849,8 +853,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_all) { Shape shape{2, 3, 2}; Shape rshape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {3}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {3}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -860,10 +864,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_all) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -879,8 +883,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_partial) { Shape shape{2, 3, 2}; Shape rshape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {2}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {2}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -890,10 +894,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_partial) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -909,8 +913,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_one) { Shape shape{2, 3, 2}; Shape rshape{2, 1, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {1}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {1}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -920,10 +924,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_min_one) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -938,8 +942,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_all) { Shape shape{4, 3}; Shape rshape{4, 3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {4}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {4}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -949,10 +953,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_all) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -968,8 +972,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_partial) { Shape shape{4, 3}; Shape rshape{2, 3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {2}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {2}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -979,10 +983,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_partial) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -998,8 +1002,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_one) { Shape shape{4, 3}; Shape rshape{1, 3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {1}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {1}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -1009,10 +1013,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_one) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{9, 2, 10, 12, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -1027,8 +1031,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_one_with_equal_values) { Shape shape{2, 4}; Shape rshape{2, 1}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {1}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {1}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -1038,10 +1042,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_max_one_with_equal_values) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{1, 3, 2, 4, 1, 3, 3, 2}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -1056,8 +1060,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_all) { Shape shape{4, 3}; Shape rshape{4, 3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {4}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {4}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -1067,10 +1071,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_all) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -1086,8 +1090,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_partial) { Shape shape{4, 3}; Shape rshape{2, 3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {2}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {2}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -1097,10 +1101,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_partial) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -1115,8 +1119,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_one) { Shape shape{4, 3}; Shape rshape{1, 3}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {1}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {1}); int64_t axis = 0; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::NONE); @@ -1126,10 +1130,10 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_one) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::f32, rshape); - auto result1 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::f32, rshape); + auto result1 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -1143,9 +1147,9 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_2d_min_one) NGRAPH_TEST(${BACKEND_NAME}, topk_3d_large_input_max) { Shape shape{4, 8192, 5}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); - auto k = op::Constant::create(element::i64, {}, {10}); + auto k = op::Constant::create(element::Type_t::i64, {}, {10}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES); @@ -1183,9 +1187,9 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_large_input_max) NGRAPH_TEST(${BACKEND_NAME}, topk_3d_large_input_min) { Shape shape{4, 8192, 5}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); - auto k = op::Constant::create(element::i64, {}, {10}); + auto k = op::Constant::create(element::Type_t::i64, {}, {10}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -1224,8 +1228,8 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_single_output) { Shape shape{2, 3, 2}; Shape rshape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - auto k = op::Constant::create(element::i64, {}, {2}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = op::Constant::create(element::Type_t::i64, {}, {2}); int64_t axis = 1; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MIN, op::v1::TopK::SortType::SORT_VALUES); @@ -1234,9 +1238,9 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_single_output) auto backend = runtime::Backend::create("${BACKEND_NAME}"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}); - auto result0 = backend->create_tensor(element::i32, rshape); + auto result0 = backend->create_tensor(element::Type_t::i32, rshape); auto h0 = backend->compile(f0); h0->call_with_validate({result0}, {a}); @@ -1245,27 +1249,27 @@ NGRAPH_TEST(${BACKEND_NAME}, topk_3d_single_output) NGRAPH_TEST(${BACKEND_NAME}, topk_v1_invalid_strings) { - const auto data = make_shared(element::f32, Shape{1, 2, 3}); - const auto k = op::Constant::create(element::i64, Shape{}, {1}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto k = op::Constant::create(element::Type_t::i64, Shape{}, {1}); EXPECT_THROW(op::v1::TopK(data, k, 0, "max", "invalid_mode"), ngraph::CheckFailure); EXPECT_THROW(op::v1::TopK(data, k, 0, "invalid_sort", "index"), ngraph::CheckFailure); } NGRAPH_TEST(${BACKEND_NAME}, topk_v1_invalid_k) { - const auto data = make_shared(element::f32, Shape{1, 2, 3}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3}); // K must be a scalar - const auto k_non_scalar = op::Constant::create(element::i64, Shape{2}, {1, 2}); + const auto k_non_scalar = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); EXPECT_THROW(op::v1::TopK(data, k_non_scalar, 0, "max", "index"), ngraph::NodeValidationFailure); // K can only be i8, i32 or i64 - const auto k_float = op::Constant::create(element::f32, Shape{}, {1.0f}); + const auto k_float = op::Constant::create(element::Type_t::f32, Shape{}, {1.0f}); EXPECT_THROW(op::v1::TopK(data, k_float, 0, "max", "index"), ngraph::NodeValidationFailure); // the value of K must be positive - const auto k_negative = op::Constant::create(element::i8, Shape{}, {-1}); + const auto k_negative = op::Constant::create(element::Type_t::i8, Shape{}, {-1}); EXPECT_THROW(op::v1::TopK(data, k_negative, 0, "max", "index"), ngraph::NodeValidationFailure); } @@ -1299,8 +1303,8 @@ TYPED_TEST_P(topk_backend, topk_mode_sort_order) { const Shape shape{5}; const Shape rshape{3}; - const auto data = make_shared(element::f32, shape); - const auto k = op::Constant::create(element::i64, {}, {3}); + const auto data = make_shared(element::Type_t::f32, shape); + const auto k = op::Constant::create(element::Type_t::i64, {}, {3}); const int64_t axis = 0; // helpers to reduce code verbosity diff --git a/ngraph/test/backend/transpose.in.cpp b/ngraph/test/backend/transpose.in.cpp index a7ebbf2a816..000f86f27a2 100644 --- a/ngraph/test/backend/transpose.in.cpp +++ b/ngraph/test/backend/transpose.in.cpp @@ -33,9 +33,10 @@ NGRAPH_TEST(${BACKEND_NAME}, transpose) // Create a graph for f(x,perm) = Transpose(x,Convert(perm)). We'll do the permutation in // i32 and cast it to i64, just for fun (and to mirror the TensorFlow test I am porting here). // - auto x = make_shared(element::f32, PartialShape::dynamic()); - auto perm = make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto perm_i64 = make_shared(perm, element::i64); + auto x = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto perm = + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic()}); + auto perm_i64 = make_shared(perm, element::Type_t::i64); auto x_transpose = make_shared(x, perm_i64); @@ -45,7 +46,7 @@ NGRAPH_TEST(${BACKEND_NAME}, transpose) auto ex = backend->compile(f); - auto t_r = backend->create_dynamic_tensor(element::f32, PartialShape::dynamic()); + auto t_r = backend->create_dynamic_tensor(element::Type_t::f32, PartialShape::dynamic()); std::vector x_shapes{Shape{2, 3}, Shape{2, 3}, Shape{2, 2, 3}}; std::vector> perms{{0, 1}, {1, 0}, {2, 1, 0}}; @@ -58,8 +59,8 @@ NGRAPH_TEST(${BACKEND_NAME}, transpose) for (size_t i = 0; i < x_shapes.size(); i++) { - auto t_x = backend->create_tensor(element::f32, x_shapes[i]); - auto t_perm = backend->create_tensor(element::i32, Shape{perms[i].size()}); + auto t_x = backend->create_tensor(element::Type_t::f32, x_shapes[i]); + auto t_perm = backend->create_tensor(element::Type_t::i32, Shape{perms[i].size()}); copy_data(t_x, inputs[i]); copy_data(t_perm, perms[i]); diff --git a/ngraph/test/backend/unhandled_op.in.cpp b/ngraph/test/backend/unhandled_op.in.cpp index ad243408ae6..d3264b54416 100644 --- a/ngraph/test/backend/unhandled_op.in.cpp +++ b/ngraph/test/backend/unhandled_op.in.cpp @@ -56,7 +56,7 @@ namespace NGRAPH_TEST(${BACKEND_NAME}, unhandled_op) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto unhandled = make_shared(A); auto f = make_shared(unhandled, ParameterVector{A}); auto backend = runtime::Backend::create("${BACKEND_NAME}"); diff --git a/ngraph/test/backend/validate_call.in.cpp b/ngraph/test/backend/validate_call.in.cpp index 97e908caa84..5630d57bfec 100644 --- a/ngraph/test/backend/validate_call.in.cpp +++ b/ngraph/test/backend/validate_call.in.cpp @@ -38,13 +38,13 @@ NGRAPH_TEST(${BACKEND_NAME}, validate_call_input_count) Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto a = backend->create_tensor(element::f32, shape); - auto b = backend->create_tensor(element::f32, shape); - auto c = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); EXPECT_ANY_THROW(auto handle = backend->compile(f); handle->call_with_validate({c}, {a})); } @@ -55,13 +55,13 @@ NGRAPH_TEST(${BACKEND_NAME}, validate_call_input_type) Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto a = backend->create_tensor(element::i32, shape); - auto b = backend->create_tensor(element::f32, shape); - auto c = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); EXPECT_ANY_THROW(auto handle = backend->compile(f); handle->call_with_validate({c}, {a, b})); } @@ -72,13 +72,13 @@ NGRAPH_TEST(${BACKEND_NAME}, validate_call_input_shape) Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto a = backend->create_tensor(element::f32, {2, 3}); - auto b = backend->create_tensor(element::f32, shape); - auto c = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, {2, 3}); + auto b = backend->create_tensor(element::Type_t::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); EXPECT_ANY_THROW(auto handle = backend->compile(f); handle->call_with_validate({c}, {a, b})); } @@ -89,14 +89,14 @@ NGRAPH_TEST(${BACKEND_NAME}, validate_call_output_count) Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto a = backend->create_tensor(element::f32, shape); - auto b = backend->create_tensor(element::f32, shape); - auto c = backend->create_tensor(element::f32, shape); - auto d = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); + auto d = backend->create_tensor(element::Type_t::f32, shape); EXPECT_ANY_THROW(auto handle = backend->compile(f); handle->call_with_validate({c, d}, {a, b})); } @@ -107,13 +107,13 @@ NGRAPH_TEST(${BACKEND_NAME}, validate_call_output_type) Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto a = backend->create_tensor(element::i32, shape); - auto b = backend->create_tensor(element::f32, shape); - auto c = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::i32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); EXPECT_ANY_THROW(auto handle = backend->compile(f); handle->call_with_validate({a}, {b, c})); } @@ -124,13 +124,13 @@ NGRAPH_TEST(${BACKEND_NAME}, validate_call_output_shape) Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); - auto a = backend->create_tensor(element::f32, {2, 3}); - auto b = backend->create_tensor(element::f32, shape); - auto c = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, {2, 3}); + auto b = backend->create_tensor(element::Type_t::f32, shape); + auto c = backend->create_tensor(element::Type_t::f32, shape); EXPECT_ANY_THROW(auto handle = backend->compile(f); handle->call_with_validate({a}, {c, b})); } diff --git a/ngraph/test/backend_debug_api.cpp b/ngraph/test/backend_debug_api.cpp index d9172c5a773..5124a3c4290 100644 --- a/ngraph/test/backend_debug_api.cpp +++ b/ngraph/test/backend_debug_api.cpp @@ -33,18 +33,18 @@ using namespace ngraph; TEST(INTERPRETER, nan_check_input) { Shape shape{4}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); shared_ptr backend = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, NAN, 16}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 1, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); shared_ptr handle = backend->compile(f); @@ -57,18 +57,18 @@ TEST(INTERPRETER, nan_check_input) TEST(INTERPRETER, nan_check_output) { Shape shape{4}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); shared_ptr backend = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape); + auto a = backend->create_tensor(element::Type_t::f32, shape); copy_data(a, vector{2, 4, 0, 16}); - auto b = backend->create_tensor(element::f32, shape); + auto b = backend->create_tensor(element::Type_t::f32, shape); copy_data(b, vector{1, 2, 0, 8}); - auto result = backend->create_tensor(element::f32, shape); + auto result = backend->create_tensor(element::Type_t::f32, shape); shared_ptr handle = backend->compile(f); shared_ptr ihandle = diff --git a/ngraph/test/build_graph.cpp b/ngraph/test/build_graph.cpp index b91dff59560..c771382b4ec 100644 --- a/ngraph/test/build_graph.cpp +++ b/ngraph/test/build_graph.cpp @@ -31,10 +31,10 @@ using namespace ngraph; TEST(build_graph, build_simple) { // Function with 4 parameters - auto arg0 = make_shared(element::f32, Shape{7, 3}); - auto arg1 = make_shared(element::f32, Shape{3}); - auto arg2 = make_shared(element::f32, Shape{32, 7}); - auto arg3 = make_shared(element::f32, Shape{32, 7}); + auto arg0 = make_shared(element::Type_t::f32, Shape{7, 3}); + auto arg1 = make_shared(element::Type_t::f32, Shape{3}); + auto arg2 = make_shared(element::Type_t::f32, Shape{32, 7}); + auto arg3 = make_shared(element::Type_t::f32, Shape{32, 7}); auto broadcast_1 = builder::opset1::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); auto b1 = builder::opset1::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); auto dot = make_shared(arg2, arg0); @@ -51,18 +51,18 @@ TEST(build_graph, literal) // float scalar from a float // auto float0 = FloatConstant::make(3.0); vector float_t{3.0}; - auto float0 = make_shared(element::f32, Shape{1}, float_t); + auto float0 = make_shared(element::Type_t::f32, Shape{1}, float_t); ASSERT_EQ(float0->get_vector(), std::vector{3.0}); - ASSERT_EQ(float0->get_element_type(), element::f32); + ASSERT_EQ(float0->get_element_type(), element::Type_t::f32); ASSERT_EQ(float0->get_shape(), Shape{1}); auto d = make_shared(float0, float0); ASSERT_EQ(d->input_values().at(0).get_node_shared_ptr(), float0); ASSERT_EQ(d->input_values().at(1).get_node_shared_ptr(), float0); vector int32{3}; - auto int32_0 = make_shared(element::i32, Shape{}, int32); + auto int32_0 = make_shared(element::Type_t::i32, Shape{}, int32); ASSERT_EQ(int32_0->get_vector(), std::vector{3}); - ASSERT_EQ(int32_0->get_element_type(), element::i32); + ASSERT_EQ(int32_0->get_element_type(), element::Type_t::i32); ASSERT_EQ(int32_0->get_shape(), Shape{}); } @@ -72,8 +72,8 @@ TEST(build_graph, tensor) // auto float0 = FloatConstant::make(3.0); Shape shape{2, 3}; vector float_t(shape_size(shape), 0); - auto float0 = make_shared(element::f32, shape, float_t); - ASSERT_EQ(float0->get_element_type(), element::f32); + auto float0 = make_shared(element::Type_t::f32, shape, float_t); + ASSERT_EQ(float0->get_element_type(), element::Type_t::f32); ASSERT_EQ(float0->get_shape(), shape); auto d = make_shared(float0, float0); ASSERT_EQ(d->input_values().at(0).get_node_shared_ptr(), float0); @@ -81,8 +81,8 @@ TEST(build_graph, tensor) Shape ishape{3, 5}; vector idata(shape_size(ishape), 0); - auto int32_0 = make_shared(element::i32, ishape, idata); - ASSERT_EQ(int32_0->get_element_type(), element::i32); + auto int32_0 = make_shared(element::Type_t::i32, ishape, idata); + ASSERT_EQ(int32_0->get_element_type(), element::Type_t::i32); ASSERT_EQ(int32_0->get_shape(), ishape); } @@ -90,10 +90,10 @@ TEST(build_graph, tensor) TEST(build_graph, function_undeclared_parameters) { // Function with 4 parameters - auto arg0 = make_shared(element::f32, Shape{7, 3}); - auto arg1 = make_shared(element::f32, Shape{3}); - auto arg2 = make_shared(element::f32, Shape{32, 7}); - auto arg3 = make_shared(element::f32, Shape{32, 7}); + auto arg0 = make_shared(element::Type_t::f32, Shape{7, 3}); + auto arg1 = make_shared(element::Type_t::f32, Shape{3}); + auto arg2 = make_shared(element::Type_t::f32, Shape{32, 7}); + auto arg3 = make_shared(element::Type_t::f32, Shape{32, 7}); auto broadcast_1 = builder::opset1::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); auto b1 = builder::opset1::make_broadcast(arg3, Shape{10, 32, 7}, AxisSet{0}); auto dot = make_shared(arg2, arg0); @@ -121,10 +121,10 @@ TEST(build_graph, no_arg_construction) { // The ops // Parameters aren't converted yet - auto arg0 = make_shared(element::f32, Shape{7}); - auto arg1 = make_shared(element::f32, Shape{7}); - auto arg2 = make_shared(element::f32, Shape{7}); - auto arg3 = make_shared(element::f32, Shape{7}); + auto arg0 = make_shared(element::Type_t::f32, Shape{7}); + auto arg1 = make_shared(element::Type_t::f32, Shape{7}); + auto arg2 = make_shared(element::Type_t::f32, Shape{7}); + auto arg3 = make_shared(element::Type_t::f32, Shape{7}); auto add0 = make_shared(); auto abs0 = make_shared(); auto acos0 = make_shared(); @@ -142,13 +142,13 @@ TEST(build_graph, no_arg_construction) TEST(build_graph, multi_output_split_dynamic) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto split = make_shared(data, axis, 2); auto abs = make_shared(split->output(1)); EXPECT_TRUE(abs->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); - auto new_parameter = make_shared(element::f32, Shape{2, 4}); + auto new_parameter = make_shared(element::Type_t::f32, Shape{2, 4}); split->input(0).replace_source_output(new_parameter->output(0)); auto f = make_shared(abs, ParameterVector{new_parameter}); @@ -159,18 +159,18 @@ TEST(build_graph, multi_output_split_dynamic) TEST(build_graph, function_revalidate_and_infer) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto pattern = op::Constant::create(element::i64, Shape{6}, {1, 3, 16, 2, 2, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto pattern = op::Constant::create(element::Type_t::i64, Shape{6}, {1, 3, 16, 2, 2, 2}); auto r = make_shared(arg, pattern, true); auto relu = make_shared(r); auto f = make_shared(relu, ParameterVector{arg}); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(r->get_output_shape(0), (Shape{1, 3, 16, 2, 2, 2})); EXPECT_EQ(f->get_output_shape(0), (Shape{1, 3, 16, 2, 2, 2})); - auto new_pattern = op::Constant::create(element::i64, Shape{2}, {32, 12}); + auto new_pattern = op::Constant::create(element::Type_t::i64, Shape{2}, {32, 12}); r->input(1).replace_source_output(new_pattern->output(0)); f->validate_nodes_and_infer_types(); @@ -193,13 +193,13 @@ TEST(build_graph, default_output_checks) TEST(build_graph, build_graph_with_sink) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto init_const = op::Constant::create(element::f32, Shape{2, 2}, {0, 0, 0, 0}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto init_const = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {0, 0, 0, 0}); auto read = make_shared(init_const, "v0"); std::vector> args = {arg, read}; auto pattern = make_shared(args, 1); auto res = make_shared(pattern); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto crop = make_shared(pattern, axis, 3); auto assign = make_shared(crop, "v0"); @@ -214,13 +214,13 @@ TEST(build_graph, build_graph_with_sink) TEST(build_graph, build_graph_with_sink_output_ctor) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto init_const = op::Constant::create(element::f32, Shape{2, 2}, {0, 0, 0, 0}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto init_const = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {0, 0, 0, 0}); auto read = make_shared(init_const, "v0"); std::vector> args = {arg, read}; auto pattern = make_shared(args, 1); auto res = make_shared(pattern); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto crop = make_shared(pattern, axis, 3); auto assign = make_shared(crop, "v0"); @@ -236,13 +236,13 @@ TEST(build_graph, build_graph_with_sink_output_ctor) TEST(build_graph, build_graph_with_add_sink) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto init_const = op::Constant::create(element::f32, Shape{2, 2}, {0, 0, 0, 0}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto init_const = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {0, 0, 0, 0}); auto read = make_shared(init_const, "v0"); std::vector> args = {arg, read}; auto pattern = make_shared(args, 1); auto res = make_shared(pattern); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto crop = make_shared(pattern, axis, 3); auto assign = make_shared(crop, "v0"); @@ -263,13 +263,13 @@ TEST(build_graph, build_graph_with_add_sink) TEST(build_graph, build_graph_with_wrong_remove_sink) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto init_const = op::Constant::create(element::f32, Shape{2, 2}, {0, 0, 0, 0}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto init_const = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {0, 0, 0, 0}); auto read = make_shared(init_const, "v0"); std::vector> args = {arg, read}; auto pattern = make_shared(args, 1); auto res = make_shared(pattern); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto crop = make_shared(pattern, axis, 3); auto assign = make_shared(crop, "v0"); @@ -287,13 +287,13 @@ TEST(build_graph, build_graph_with_wrong_remove_sink) TEST(build_graph, build_graph_with_remove_sink) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto init_const = op::Constant::create(element::f32, Shape{2, 2}, {0, 0, 0, 0}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto init_const = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {0, 0, 0, 0}); auto read = make_shared(init_const, "v0"); std::vector> args = {arg, read}; auto pattern = make_shared(args, 1); auto res = make_shared(pattern); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto crop = make_shared(pattern, axis, 3); auto assign = make_shared(crop, "v0"); @@ -313,13 +313,13 @@ TEST(build_graph, build_graph_with_remove_sink) TEST(build_graph, build_graph_with_add_result) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto init_const = op::Constant::create(element::f32, Shape{2, 2}, {0, 0, 0, 0}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto init_const = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {0, 0, 0, 0}); auto read = make_shared(init_const, "v0"); std::vector> args = {arg, read}; auto pattern = make_shared(args, 1); auto res = make_shared(pattern); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto crop = make_shared(pattern, axis, 3); auto res2 = make_shared(crop, "v0"); @@ -340,13 +340,13 @@ TEST(build_graph, build_graph_with_add_result) TEST(build_graph, build_graph_with_remove_result) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto init_const = op::Constant::create(element::f32, Shape{2, 2}, {0, 0, 0, 0}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto init_const = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {0, 0, 0, 0}); auto read = make_shared(init_const, "v0"); std::vector> args = {arg, read}; auto pattern = make_shared(args, 1); auto res = make_shared(pattern); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto crop = make_shared(pattern, axis, 3); auto res2 = make_shared(crop, "v0"); diff --git a/ngraph/test/builder.cpp b/ngraph/test/builder.cpp index 8658b0cbed8..16550820343 100644 --- a/ngraph/test/builder.cpp +++ b/ngraph/test/builder.cpp @@ -26,14 +26,14 @@ shared_ptr make_reduce_result(function(const shared_ptr&, const AxisSet&)> func) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; auto f = make_shared(func(A, {0}), ParameterVector{A}); auto backend = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -44,14 +44,14 @@ shared_ptr make_reduce_result_true( function(const shared_ptr&, const AxisSet&, bool)> func) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; auto f = make_shared(func(A, {0}, true), ParameterVector{A}); auto backend = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); @@ -62,14 +62,14 @@ shared_ptr make_reduce_result_false( function(const shared_ptr&, const AxisSet&, bool)> func) { Shape shape_a{3, 2}; - auto A = make_shared(element::f32, shape_a); + auto A = make_shared(element::Type_t::f32, shape_a); Shape shape_rt{2}; auto f = make_shared(func(A, {0}, false), ParameterVector{A}); auto backend = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, shape_a); + auto a = backend->create_tensor(element::Type_t::f32, shape_a); copy_data(a, vector{1, 2, 3, 4, 5, 6}); - auto result = backend->create_tensor(element::f32, shape_rt); + auto result = backend->create_tensor(element::Type_t::f32, shape_rt); auto handle = backend->compile(f); handle->call_with_validate({result}, {a}); diff --git a/ngraph/test/builder_autobroadcast.cpp b/ngraph/test/builder_autobroadcast.cpp index a9b1bdf23a8..ea412bcb5f3 100644 --- a/ngraph/test/builder_autobroadcast.cpp +++ b/ngraph/test/builder_autobroadcast.cpp @@ -26,7 +26,7 @@ using namespace ngraph; shared_ptr getParamFromShape(const Shape& shape) { - return make_shared(element::f32, shape); + return make_shared(element::Type_t::f32, shape); } inline const Shape& getShapeFromParam(const shared_ptr& node) @@ -217,8 +217,8 @@ TEST(autobroadcast, numpy_broadcast_for_matmul_op_2d) { const Shape lhs{3, 1, 4, 6}; const Shape rhs{6, 5}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const OutputVector result = builder::numpy_broadcast_for_matmul_operation(lhs_node, rhs_node); @@ -230,8 +230,8 @@ TEST(autobroadcast, numpy_broadcast_for_matmul_op_3d) { const Shape lhs{3, 1, 4, 6}; const Shape rhs{2, 6, 5}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const OutputVector result = builder::numpy_broadcast_for_matmul_operation(lhs_node, rhs_node); @@ -243,8 +243,8 @@ TEST(autobroadcast, numpy_broadcast_for_matmul_op_nop) { const Shape lhs{4, 6}; const Shape rhs{6, 5}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const OutputVector result = builder::numpy_broadcast_for_matmul_operation(lhs_node, rhs_node); @@ -257,8 +257,8 @@ TEST(autobroadcast, opset1_legacy_broadcast_scalar) const Shape lhs{2, 3, 4, 5}; const Shape rhs{}; size_t start_match_axis{3}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const Output result = builder::opset1::legacy_broadcast_for_binary_operation( lhs_node, rhs_node, start_match_axis); @@ -271,8 +271,8 @@ TEST(autobroadcast, opset1_legacy_broadcast_1elem_tensor) const Shape lhs{2, 3, 4, 5}; const Shape rhs{1, 1, 1}; size_t start_match_axis{1}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const Output result = builder::opset1::legacy_broadcast_for_binary_operation( lhs_node, rhs_node, start_match_axis); @@ -285,8 +285,8 @@ TEST(autobroadcast, opset1_legacy_broadcast_1d) const Shape lhs{2, 3, 4, 5}; const Shape rhs{5}; size_t start_match_axis{3}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const Output result = builder::opset1::legacy_broadcast_for_binary_operation( lhs_node, rhs_node, start_match_axis); @@ -299,8 +299,8 @@ TEST(autobroadcast, opset1_legacy_broadcast_2d) const Shape lhs{2, 3, 4, 5}; const Shape rhs{4, 5}; size_t start_match_axis{2}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const Output result = builder::opset1::legacy_broadcast_for_binary_operation( lhs_node, rhs_node, start_match_axis); @@ -313,8 +313,8 @@ TEST(autobroadcast, opset1_legacy_broadcast_2d_inside) const Shape lhs{2, 3, 4, 5}; const Shape rhs{3, 4}; size_t start_match_axis{1}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const Output result = builder::opset1::legacy_broadcast_for_binary_operation( lhs_node, rhs_node, start_match_axis); @@ -327,8 +327,8 @@ TEST(autobroadcast, opset1_legacy_broadcast_1d_left) const Shape lhs{2, 3, 4, 5}; const Shape rhs{2}; size_t start_match_axis{0}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, rhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, rhs); const Output result = builder::opset1::legacy_broadcast_for_binary_operation( lhs_node, rhs_node, start_match_axis); @@ -340,8 +340,8 @@ TEST(autobroadcast, opset1_legacy_broadcast_identical) { const Shape lhs{2, 3, 4, 5}; size_t start_match_axis{0}; - const auto lhs_node = make_shared(element::f32, lhs); - const auto rhs_node = make_shared(element::f32, lhs); + const auto lhs_node = make_shared(element::Type_t::f32, lhs); + const auto rhs_node = make_shared(element::Type_t::f32, lhs); const Output result = builder::opset1::legacy_broadcast_for_binary_operation( lhs_node, rhs_node, start_match_axis); diff --git a/ngraph/test/constant.cpp b/ngraph/test/constant.cpp index b11934ff342..a0e20110e14 100644 --- a/ngraph/test/constant.cpp +++ b/ngraph/test/constant.cpp @@ -31,7 +31,7 @@ using namespace std; TEST(constant, boolean_string) { Shape shape{4}; - op::Constant c(element::boolean, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::boolean, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -49,7 +49,7 @@ TEST(constant, boolean_string) TEST(constant, boolean_string_broadcast) { Shape shape{4}; - op::Constant c(element::boolean, shape, vector{"1"}); + op::Constant c(element::Type_t::boolean, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -67,7 +67,7 @@ TEST(constant, boolean_string_broadcast) TEST(constant, boolean_vector) { Shape shape{4}; - op::Constant c(element::boolean, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::boolean, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -85,7 +85,7 @@ TEST(constant, boolean_vector) TEST(constant, boolean_vector_broadcast) { Shape shape{4}; - op::Constant c(element::boolean, shape, vector{1}); + op::Constant c(element::Type_t::boolean, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -107,7 +107,7 @@ TEST(constant, boolean_vector_broadcast) TEST(constant, float_string) { Shape shape{4}; - op::Constant c(element::f32, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::f32, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -125,7 +125,7 @@ TEST(constant, float_string) TEST(constant, float_string_broadcast) { Shape shape{4}; - op::Constant c(element::f32, shape, vector{"1"}); + op::Constant c(element::Type_t::f32, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -143,7 +143,7 @@ TEST(constant, float_string_broadcast) TEST(constant, float_vector) { Shape shape{4}; - op::Constant c(element::f32, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::f32, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -161,7 +161,7 @@ TEST(constant, float_vector) TEST(constant, float_vector_broadcast) { Shape shape{4}; - op::Constant c(element::f32, shape, vector{1}); + op::Constant c(element::Type_t::f32, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -183,7 +183,7 @@ TEST(constant, float_vector_broadcast) TEST(constant, double_string) { Shape shape{4}; - op::Constant c(element::f64, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::f64, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -201,7 +201,7 @@ TEST(constant, double_string) TEST(constant, double_string_broadcast) { Shape shape{4}; - op::Constant c(element::f64, shape, vector{"1"}); + op::Constant c(element::Type_t::f64, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -219,7 +219,7 @@ TEST(constant, double_string_broadcast) TEST(constant, double_vector) { Shape shape{4}; - op::Constant c(element::f64, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::f64, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -237,7 +237,7 @@ TEST(constant, double_vector) TEST(constant, double_vector_broadcast) { Shape shape{4}; - op::Constant c(element::f64, shape, vector{1}); + op::Constant c(element::Type_t::f64, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -259,7 +259,7 @@ TEST(constant, double_vector_broadcast) TEST(constant, int8_string) { Shape shape{4}; - op::Constant c(element::i8, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::i8, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -277,7 +277,7 @@ TEST(constant, int8_string) TEST(constant, int8_string_broadcast) { Shape shape{4}; - op::Constant c(element::i8, shape, vector{"1"}); + op::Constant c(element::Type_t::i8, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -295,7 +295,7 @@ TEST(constant, int8_string_broadcast) TEST(constant, int8_vector) { Shape shape{4}; - op::Constant c(element::i8, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::i8, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -313,7 +313,7 @@ TEST(constant, int8_vector) TEST(constant, int8_vector_broadcast) { Shape shape{4}; - op::Constant c(element::i8, shape, vector{1}); + op::Constant c(element::Type_t::i8, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -335,7 +335,7 @@ TEST(constant, int8_vector_broadcast) TEST(constant, int16_string) { Shape shape{4}; - op::Constant c(element::i16, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::i16, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -353,7 +353,7 @@ TEST(constant, int16_string) TEST(constant, int16_string_broadcast) { Shape shape{4}; - op::Constant c(element::i16, shape, vector{"1"}); + op::Constant c(element::Type_t::i16, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -371,7 +371,7 @@ TEST(constant, int16_string_broadcast) TEST(constant, int16_vector) { Shape shape{4}; - op::Constant c(element::i16, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::i16, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -389,7 +389,7 @@ TEST(constant, int16_vector) TEST(constant, int16_vector_broadcast) { Shape shape{4}; - op::Constant c(element::i16, shape, vector{1}); + op::Constant c(element::Type_t::i16, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -411,7 +411,7 @@ TEST(constant, int16_vector_broadcast) TEST(constant, int32_string) { Shape shape{4}; - op::Constant c(element::i32, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::i32, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -429,7 +429,7 @@ TEST(constant, int32_string) TEST(constant, int32_string_broadcast) { Shape shape{4}; - op::Constant c(element::i32, shape, vector{"1"}); + op::Constant c(element::Type_t::i32, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -447,7 +447,7 @@ TEST(constant, int32_string_broadcast) TEST(constant, int32_vector) { Shape shape{4}; - op::Constant c(element::i32, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::i32, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -465,7 +465,7 @@ TEST(constant, int32_vector) TEST(constant, int32_vector_broadcast) { Shape shape{4}; - op::Constant c(element::i32, shape, vector{1}); + op::Constant c(element::Type_t::i32, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -487,7 +487,7 @@ TEST(constant, int32_vector_broadcast) TEST(constant, int64_string) { Shape shape{4}; - op::Constant c(element::i64, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::i64, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -505,7 +505,7 @@ TEST(constant, int64_string) TEST(constant, int64_string_broadcast) { Shape shape{4}; - op::Constant c(element::i64, shape, vector{"1"}); + op::Constant c(element::Type_t::i64, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -523,7 +523,7 @@ TEST(constant, int64_string_broadcast) TEST(constant, int64_vector) { Shape shape{4}; - op::Constant c(element::i64, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::i64, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -541,7 +541,7 @@ TEST(constant, int64_vector) TEST(constant, int64_vector_broadcast) { Shape shape{4}; - op::Constant c(element::i64, shape, vector{1}); + op::Constant c(element::Type_t::i64, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -563,7 +563,7 @@ TEST(constant, int64_vector_broadcast) TEST(constant, uint8_string) { Shape shape{4}; - op::Constant c(element::u8, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::u8, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -581,7 +581,7 @@ TEST(constant, uint8_string) TEST(constant, uint8_string_broadcast) { Shape shape{4}; - op::Constant c(element::u8, shape, vector{"1"}); + op::Constant c(element::Type_t::u8, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -599,7 +599,7 @@ TEST(constant, uint8_string_broadcast) TEST(constant, uint8_vector) { Shape shape{4}; - op::Constant c(element::u8, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::u8, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -617,7 +617,7 @@ TEST(constant, uint8_vector) TEST(constant, uint8_vector_broadcast) { Shape shape{4}; - op::Constant c(element::u8, shape, vector{1}); + op::Constant c(element::Type_t::u8, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -639,7 +639,7 @@ TEST(constant, uint8_vector_broadcast) TEST(constant, uint16_string) { Shape shape{4}; - op::Constant c(element::u16, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::u16, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -657,7 +657,7 @@ TEST(constant, uint16_string) TEST(constant, uint16_string_broadcast) { Shape shape{4}; - op::Constant c(element::u16, shape, vector{"1"}); + op::Constant c(element::Type_t::u16, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -675,7 +675,7 @@ TEST(constant, uint16_string_broadcast) TEST(constant, uint16_vector) { Shape shape{4}; - op::Constant c(element::u16, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::u16, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -693,7 +693,7 @@ TEST(constant, uint16_vector) TEST(constant, uint16_vector_broadcast) { Shape shape{4}; - op::Constant c(element::u16, shape, vector{1}); + op::Constant c(element::Type_t::u16, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -715,7 +715,7 @@ TEST(constant, uint16_vector_broadcast) TEST(constant, uint32_string) { Shape shape{4}; - op::Constant c(element::u32, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::u32, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -733,7 +733,7 @@ TEST(constant, uint32_string) TEST(constant, uint32_string_broadcast) { Shape shape{4}; - op::Constant c(element::u32, shape, vector{"1"}); + op::Constant c(element::Type_t::u32, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -751,7 +751,7 @@ TEST(constant, uint32_string_broadcast) TEST(constant, uint32_vector) { Shape shape{4}; - op::Constant c(element::u32, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::u32, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -769,7 +769,7 @@ TEST(constant, uint32_vector) TEST(constant, uint32_vector_broadcast) { Shape shape{4}; - op::Constant c(element::u32, shape, vector{1}); + op::Constant c(element::Type_t::u32, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -791,7 +791,7 @@ TEST(constant, uint32_vector_broadcast) TEST(constant, uint64_string) { Shape shape{4}; - op::Constant c(element::u64, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::u64, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -809,7 +809,7 @@ TEST(constant, uint64_string) TEST(constant, uint64_string_broadcast) { Shape shape{4}; - op::Constant c(element::u64, shape, vector{"1"}); + op::Constant c(element::Type_t::u64, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -827,7 +827,7 @@ TEST(constant, uint64_string_broadcast) TEST(constant, uint64_vector) { Shape shape{4}; - op::Constant c(element::u64, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::u64, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -845,7 +845,7 @@ TEST(constant, uint64_vector) TEST(constant, uint64_vector_broadcast) { Shape shape{4}; - op::Constant c(element::u64, shape, vector{1}); + op::Constant c(element::Type_t::u64, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], 1); @@ -867,7 +867,7 @@ TEST(constant, uint64_vector_broadcast) TEST(constant, bfloat16_string) { Shape shape{4}; - op::Constant c(element::bf16, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::bf16, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], bfloat16(1)); @@ -885,7 +885,7 @@ TEST(constant, bfloat16_string) TEST(constant, bfloat16_string_broadcast) { Shape shape{4}; - op::Constant c(element::bf16, shape, vector{"1"}); + op::Constant c(element::Type_t::bf16, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], bfloat16(1)); @@ -903,7 +903,7 @@ TEST(constant, bfloat16_string_broadcast) TEST(constant, bfloat16_vector) { Shape shape{4}; - op::Constant c(element::bf16, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::bf16, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], bfloat16(1)); @@ -921,7 +921,7 @@ TEST(constant, bfloat16_vector) TEST(constant, bfloat16_vector_broadcast) { Shape shape{4}; - op::Constant c(element::bf16, shape, vector{1}); + op::Constant c(element::Type_t::bf16, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], bfloat16(1)); @@ -943,7 +943,7 @@ TEST(constant, bfloat16_vector_broadcast) TEST(constant, float16_string) { Shape shape{4}; - op::Constant c(element::f16, shape, vector{"1", "0", "1", "0"}); + op::Constant c(element::Type_t::f16, shape, vector{"1", "0", "1", "0"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], float16(1)); @@ -961,7 +961,7 @@ TEST(constant, float16_string) TEST(constant, float16_string_broadcast) { Shape shape{4}; - op::Constant c(element::f16, shape, vector{"1"}); + op::Constant c(element::Type_t::f16, shape, vector{"1"}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], float16(1)); @@ -979,7 +979,7 @@ TEST(constant, float16_string_broadcast) TEST(constant, float16_vector) { Shape shape{4}; - op::Constant c(element::f16, shape, vector{1, 0, 1, 0}); + op::Constant c(element::Type_t::f16, shape, vector{1, 0, 1, 0}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], float16(1)); @@ -997,7 +997,7 @@ TEST(constant, float16_vector) TEST(constant, float16_vector_broadcast) { Shape shape{4}; - op::Constant c(element::f16, shape, vector{1}); + op::Constant c(element::Type_t::f16, shape, vector{1}); auto v = c.get_vector(); ASSERT_EQ(v.size(), shape_size(shape)); EXPECT_EQ(v[0], float16(1)); @@ -1015,7 +1015,7 @@ TEST(constant, float16_vector_broadcast) TEST(constant, shared_data) { Shape shape{100, 200}; - auto c1 = make_shared(element::f16, shape, vector{123}); + auto c1 = make_shared(element::Type_t::f16, shape, vector{123}); auto c2 = static_pointer_cast(c1->clone_with_new_inputs({})); const int16_t* p1 = c1->get_data_ptr(); const int16_t* p2 = c2->get_data_ptr(); @@ -1368,7 +1368,7 @@ TEST(constant, construct_uniform) TEST(constant, bad_get_data_ptr) { - op::Constant c(element::f32, Shape{}, vector{1.0}); + op::Constant c(element::Type_t::f32, Shape{}, vector{1.0}); EXPECT_EQ(*c.get_data_ptr(), 1.0); try { diff --git a/ngraph/test/constant_folding.cpp b/ngraph/test/constant_folding.cpp index be87409a08a..a7b635aa20b 100644 --- a/ngraph/test/constant_folding.cpp +++ b/ngraph/test/constant_folding.cpp @@ -62,7 +62,7 @@ TEST(constant_folding, acosh) { expected.push_back(std::acosh(f)); } - auto constant = make_shared(element::f32, shape_in, values_in); + auto constant = make_shared(element::Type_t::f32, shape_in, values_in); auto acosh = make_shared(constant); acosh->set_friendly_name("test"); auto f = make_shared(acosh, ParameterVector{}); @@ -94,7 +94,7 @@ TEST(constant_folding, asinh) { expected.push_back(std::asinh(f)); } - auto constant = make_shared(element::f32, shape_in, values_in); + auto constant = make_shared(element::Type_t::f32, shape_in, values_in); auto asinh = make_shared(constant); asinh->set_friendly_name("test"); auto f = make_shared(asinh, ParameterVector{}); @@ -126,7 +126,7 @@ TEST(constant_folding, atanh) { expected.push_back(std::atanh(f)); } - auto constant = make_shared(element::f32, shape_in, values_in); + auto constant = make_shared(element::Type_t::f32, shape_in, values_in); auto atanh = make_shared(constant); atanh->set_friendly_name("test"); auto f = make_shared(atanh, ParameterVector{}); @@ -155,9 +155,9 @@ TEST(constant_folding, constant_squeeze) Shape axes_shape{1}; vector values_in{0, 1, 2, 3, 4, 5, 6, 7}; - auto constant = make_shared(element::f32, shape_in, values_in); + auto constant = make_shared(element::Type_t::f32, shape_in, values_in); vector values_axes{2}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto squeeze = make_shared(constant, constant_axes); squeeze->set_friendly_name("test"); auto f = make_shared(squeeze, ParameterVector{}); @@ -186,9 +186,9 @@ TEST(constant_folding, constant_unsqueeze) Shape axes_shape{2}; vector values_in{0, 1, 2, 3, 4, 5, 6, 7}; - auto constant = make_shared(element::f32, shape_in, values_in); + auto constant = make_shared(element::Type_t::f32, shape_in, values_in); vector values_axes{2, 3}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto unsqueeze = make_shared(constant, constant_axes); unsqueeze->set_friendly_name("test"); auto f = make_shared(unsqueeze, ParameterVector{}); @@ -213,11 +213,11 @@ TEST(constant_folding, constant_unsqueeze) TEST(constant_folding, constant_broadcast_v1) { vector values_in{0, 1}; - auto constant_in = make_shared(element::i32, Shape{2}, values_in); + auto constant_in = make_shared(element::Type_t::i32, Shape{2}, values_in); vector shape_in{2, 4}; - auto constant_shape = make_shared(element::i64, Shape{2}, shape_in); + auto constant_shape = make_shared(element::Type_t::i64, Shape{2}, shape_in); vector axes_in{0}; - auto constant_axes = make_shared(element::i64, Shape{1}, axes_in); + auto constant_axes = make_shared(element::Type_t::i64, Shape{1}, axes_in); auto broadcast_v1 = make_shared(constant_in, constant_shape, constant_axes); broadcast_v1->set_friendly_name("test"); auto f = make_shared(broadcast_v1, ParameterVector{}); @@ -242,9 +242,10 @@ TEST(constant_folding, constant_broadcast_v1) TEST(constant_folding, constant_broadcast_v1_with_target_shape) { vector values_in{1}; - auto constant_in = make_shared(element::i32, Shape{1, 1, 1, 1}, values_in); + auto constant_in = + make_shared(element::Type_t::i32, Shape{1, 1, 1, 1}, values_in); vector shape_in{1, 3, 1, 1}; - auto target_shape = make_shared(element::i64, Shape{4}, shape_in); + auto target_shape = make_shared(element::Type_t::i64, Shape{4}, shape_in); auto broadcast_v1 = make_shared(constant_in, target_shape); broadcast_v1->set_friendly_name("test"); auto f = make_shared(broadcast_v1, ParameterVector{}); @@ -269,9 +270,9 @@ TEST(constant_folding, constant_broadcast_v1_with_target_shape) TEST(constant_folding, constant_broadcast_v1_numpy) { vector values_in{0, 1}; - auto constant_in = make_shared(element::i32, Shape{2}, values_in); + auto constant_in = make_shared(element::Type_t::i32, Shape{2}, values_in); vector shape_in{4, 2}; - auto constant_shape = make_shared(element::i64, Shape{2}, shape_in); + auto constant_shape = make_shared(element::Type_t::i64, Shape{2}, shape_in); auto broadcast_v1 = make_shared(constant_in, constant_shape); broadcast_v1->set_friendly_name("test"); auto f = make_shared(broadcast_v1, ParameterVector{}); @@ -304,15 +305,15 @@ TEST(constant_folding, constant_unary_binary) vector values_g{1, 4}; vector values_h{0, 0, 1, 1}; vector values_i{0, 1}; - auto a = make_shared(element::i32, Shape{2, 2}, values_a); - auto b = make_shared(element::i32, Shape{2, 2}, values_b); - auto c = make_shared(element::i32, Shape{2, 2}, values_c); - auto d = make_shared(element::i32, Shape{2, 2}, values_d); - auto e = make_shared(element::i32, Shape{2}, values_e); - auto f = make_shared(element::i32, Shape{2}, values_f); - auto g = make_shared(element::i32, Shape{2}, values_g); - auto h = make_shared(element::boolean, Shape{2, 2}, values_h); - auto i = make_shared(element::boolean, Shape{2}, values_i); + auto a = make_shared(element::Type_t::i32, Shape{2, 2}, values_a); + auto b = make_shared(element::Type_t::i32, Shape{2, 2}, values_b); + auto c = make_shared(element::Type_t::i32, Shape{2, 2}, values_c); + auto d = make_shared(element::Type_t::i32, Shape{2, 2}, values_d); + auto e = make_shared(element::Type_t::i32, Shape{2}, values_e); + auto f = make_shared(element::Type_t::i32, Shape{2}, values_f); + auto g = make_shared(element::Type_t::i32, Shape{2}, values_g); + auto h = make_shared(element::Type_t::boolean, Shape{2, 2}, values_h); + auto i = make_shared(element::Type_t::boolean, Shape{2}, values_i); auto add = a + b; auto sub = a - b; @@ -434,8 +435,8 @@ TEST(constant_folding, const_convert) Shape input_shape{3, 4}; vector values_in{1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7}; - auto constant = op::Constant::create(element::f32, input_shape, values_in); - auto convert = make_shared(constant, element::u64); + auto constant = op::Constant::create(element::Type_t::f32, input_shape, values_in); + auto convert = make_shared(constant, element::Type_t::u64); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -450,7 +451,7 @@ TEST(constant_folding, const_convert) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); - ASSERT_EQ(new_const->get_output_element_type(0), element::u64); + ASSERT_EQ(new_const->get_output_element_type(0), element::Type_t::u64); auto values_out = new_const->get_vector(); vector values_expected{1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7}; @@ -461,7 +462,7 @@ TEST(constant_folding, shape_of_v0) { Shape input_shape{3, 4, 0, 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -477,7 +478,7 @@ TEST(constant_folding, shape_of_v0) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); - ASSERT_EQ(new_const->get_output_element_type(0), element::i64); + ASSERT_EQ(new_const->get_output_element_type(0), element::Type_t::i64); auto values_out = new_const->get_vector(); ASSERT_EQ((vector{3, 4, 0, 22, 608, 909, 3}), values_out); @@ -487,7 +488,7 @@ TEST(constant_folding, shape_of_v3) { Shape input_shape{3, 4, 0, 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -503,7 +504,7 @@ TEST(constant_folding, shape_of_v3) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); - ASSERT_EQ(new_const->get_output_element_type(0), element::i64); + ASSERT_EQ(new_const->get_output_element_type(0), element::Type_t::i64); auto values_out = new_const->get_vector(); ASSERT_EQ((vector{3, 4, 0, 22, 608, 909, 3}), values_out); @@ -513,8 +514,8 @@ TEST(constant_folding, shape_of_i32_v3) { Shape input_shape{3, 4, 0, 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); - auto shape_of = make_shared(param, element::i32); + auto param = make_shared(element::Type_t::boolean, input_shape); + auto shape_of = make_shared(param, element::Type_t::i32); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -529,7 +530,7 @@ TEST(constant_folding, shape_of_i32_v3) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); - ASSERT_EQ(new_const->get_output_element_type(0), element::i32); + ASSERT_EQ(new_const->get_output_element_type(0), element::Type_t::i32); auto values_out = new_const->get_vector(); ASSERT_EQ((vector{3, 4, 0, 22, 608, 909, 3}), values_out); @@ -539,7 +540,7 @@ TEST(constant_folding, shape_of_dynamic_v0) { PartialShape input_shape{3, 4, Dimension::dynamic(), 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -564,7 +565,7 @@ TEST(constant_folding, shape_of_dynamic_v3) { PartialShape input_shape{3, 4, Dimension::dynamic(), 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -583,15 +584,15 @@ TEST(constant_folding, shape_of_dynamic_v3) ASSERT_TRUE(result_as_concat); ASSERT_EQ(result_as_concat->get_friendly_name(), "test"); ASSERT_EQ(result_as_concat->get_output_shape(0), Shape{7}); - ASSERT_EQ(result_as_concat->get_output_element_type(0), element::i64); + ASSERT_EQ(result_as_concat->get_output_element_type(0), element::Type_t::i64); } TEST(constant_folding, shape_of_dynamic_i32_v3) { PartialShape input_shape{3, 4, Dimension::dynamic(), 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); - auto shape_of = make_shared(param, element::i32); + auto param = make_shared(element::Type_t::boolean, input_shape); + auto shape_of = make_shared(param, element::Type_t::i32); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -609,7 +610,7 @@ TEST(constant_folding, shape_of_dynamic_i32_v3) ASSERT_TRUE(result_as_concat); ASSERT_EQ(result_as_concat->get_friendly_name(), "test"); ASSERT_EQ(result_as_concat->get_output_shape(0), Shape{7}); - ASSERT_EQ(result_as_concat->get_output_element_type(0), element::i32); + ASSERT_EQ(result_as_concat->get_output_element_type(0), element::Type_t::i32); } // We need to be sure that constant folding won't be calculated endlessly. @@ -617,7 +618,7 @@ TEST(constant_folding, shape_of_dynamic_double_folding_v0) { PartialShape input_shape{3, 4, Dimension::dynamic(), 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -643,7 +644,7 @@ TEST(constant_folding, shape_of_dynamic_double_folding_v3) { PartialShape input_shape{3, 4, Dimension::dynamic(), 22, 608, 909, 3}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -671,7 +672,7 @@ TEST(constant_folding, shape_of_rank_dynamic_v0) { PartialShape input_shape{PartialShape::dynamic()}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -692,7 +693,7 @@ TEST(constant_folding, shape_of_rank_dynamic_v3) { PartialShape input_shape{PartialShape::dynamic()}; - auto param = make_shared(element::boolean, input_shape); + auto param = make_shared(element::Type_t::boolean, input_shape); auto shape_of = make_shared(param); shape_of->set_friendly_name("test"); auto f = make_shared(shape_of, ParameterVector{param}); @@ -714,7 +715,7 @@ void const_reverse(const element::Type& axes_elem_type) Shape input_shape{3, 3}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); auto axes = op::Constant::create(axes_elem_type, {1}, {1}); auto convert = make_shared(constant, axes, op::v1::Reverse::Mode::INDEX); convert->set_friendly_name("test"); @@ -739,14 +740,14 @@ void const_reverse(const element::Type& axes_elem_type) TEST(constant_folding, const_reverse) { - for (auto&& axes_elem_type : {element::i8, - element::u8, - element::i16, - element::u16, - element::i32, - element::u32, - element::i64, - element::u64}) + for (auto&& axes_elem_type : {element::Type_t::i8, + element::Type_t::u8, + element::Type_t::i16, + element::Type_t::u16, + element::Type_t::i32, + element::Type_t::u32, + element::Type_t::i64, + element::Type_t::u64}) { const_reverse(axes_elem_type); } @@ -758,10 +759,10 @@ TEST(constant_folding, const_reduceprod) Shape output_shape{3}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -792,10 +793,10 @@ TEST(constant_folding, const_reduceprod_keepdims) Shape output_shape{3, 1}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -826,10 +827,10 @@ TEST(constant_folding, const_reducesum) Shape output_shape{3}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -860,10 +861,10 @@ TEST(constant_folding, const_reducesum_keepdims) Shape output_shape{3, 1}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -894,10 +895,10 @@ TEST(constant_folding, const_reducemax) Shape output_shape{3}; vector values_in{1, 2, 3, 4, 5, 6}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -928,10 +929,10 @@ TEST(constant_folding, const_reducemax_keepdims) Shape output_shape{3, 1}; vector values_in{1, 2, 3, 4, 5, 6}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -962,10 +963,10 @@ TEST(constant_folding, const_reducemin) Shape output_shape{3}; vector values_in{1, 2, 3, 4, 5, 6}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -996,10 +997,10 @@ TEST(constant_folding, const_reducemin_keepdims) Shape output_shape{3, 1}; vector values_in{1, 2, 3, 4, 5, 6}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -1030,10 +1031,10 @@ TEST(constant_folding, const_reducemean) Shape output_shape{3}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -1064,10 +1065,10 @@ TEST(constant_folding, const_reducemean_keepdims) Shape output_shape{3, 1}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9}; - auto constant = op::Constant::create(element::i32, input_shape, values_in); + auto constant = op::Constant::create(element::Type_t::i32, input_shape, values_in); Shape axes_shape{1}; vector values_axes{1}; - auto constant_axes = op::Constant::create(element::i64, axes_shape, values_axes); + auto constant_axes = op::Constant::create(element::Type_t::i64, axes_shape, values_axes); auto convert = make_shared(constant, constant_axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -1097,8 +1098,8 @@ TEST(constant_folding, const_reduce_logical_and__no_keepdims) const Shape input_shape{3, 3}; const vector values_in{0, 1, 1, 0, 1, 0, 1, 1, 1}; - const auto data = op::Constant::create(element::boolean, input_shape, values_in); - const auto axes = op::Constant::create(element::i64, {1}, {1}); + const auto data = op::Constant::create(element::Type_t::boolean, input_shape, values_in); + const auto axes = op::Constant::create(element::Type_t::i64, {1}, {1}); const auto convert = make_shared(data, axes, false); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -1130,8 +1131,8 @@ TEST(constant_folding, const_reduce_logical_and__keepdims) const Shape input_shape{3, 3}; const vector values_in{0, 1, 1, 0, 1, 0, 1, 1, 1}; - const auto data = op::Constant::create(element::boolean, input_shape, values_in); - const auto axes = op::Constant::create(element::i64, {1}, {1}); + const auto data = op::Constant::create(element::Type_t::boolean, input_shape, values_in); + const auto axes = op::Constant::create(element::Type_t::i64, {1}, {1}); const auto convert = make_shared(data, axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -1165,8 +1166,8 @@ TEST(constant_folding, const_reduce_logical_and__keepdims_3d) const Shape input_shape{2, 2, 2}; const vector values_in{1, 1, 0, 0, 1, 0, 0, 1}; - const auto data = op::Constant::create(element::boolean, input_shape, values_in); - const auto axes = op::Constant::create(element::i64, {2}, {0, 2}); + const auto data = op::Constant::create(element::Type_t::boolean, input_shape, values_in); + const auto axes = op::Constant::create(element::Type_t::i64, {2}, {0, 2}); const auto convert = make_shared(data, axes, true); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -1198,8 +1199,8 @@ TEST(constant_folding, const_reduce_logical_or__no_keepdims) const Shape input_shape{3, 3}; const vector values_in{1, 0, 0, 1, 0, 1, 0, 0, 0}; - const auto data = op::Constant::create(element::boolean, input_shape, values_in); - const auto axes = op::Constant::create(element::i64, {1}, {1}); + const auto data = op::Constant::create(element::Type_t::boolean, input_shape, values_in); + const auto axes = op::Constant::create(element::Type_t::i64, {1}, {1}); const auto convert = make_shared(data, axes, false); convert->set_friendly_name("test"); auto f = make_shared(convert, ParameterVector{}); @@ -1229,8 +1230,8 @@ TEST(constant_folding, const_reduce_logical_or__no_keepdims) TEST(constant_folding, const_concat) { auto constant0 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); - auto constant1 = op::Constant::create(element::i32, Shape{2, 1}, vector{7, 8}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + auto constant1 = op::Constant::create(element::Type_t::i32, Shape{2, 1}, vector{7, 8}); auto concat = make_shared(NodeVector{constant0, constant1}, 1); concat->set_friendly_name("test"); auto f = make_shared(concat, ParameterVector{}); @@ -1255,8 +1256,10 @@ TEST(constant_folding, const_concat) TEST(constant_folding, const_concat_3d_single_elem) { - auto constant_1 = op::Constant::create(element::i32, Shape{1, 1, 1}, vector{1}); - auto constant_2 = op::Constant::create(element::i32, Shape{1, 1, 1}, vector{2}); + auto constant_1 = + op::Constant::create(element::Type_t::i32, Shape{1, 1, 1}, vector{1}); + auto constant_2 = + op::Constant::create(element::Type_t::i32, Shape{1, 1, 1}, vector{2}); auto concat = make_shared(NodeVector{constant_1, constant_2}, 0); concat->set_friendly_name("test"); auto f = make_shared(concat, ParameterVector{}); @@ -1282,10 +1285,12 @@ TEST(constant_folding, const_concat_3d_single_elem) TEST(constant_folding, const_concat_axis_2) { - auto constant_1 = - op::Constant::create(element::i32, Shape{3, 1, 2}, vector{1, 2, 3, 4, 5, 6}); - auto constant_2 = op::Constant::create( - element::i32, Shape{3, 1, 4}, vector{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); + auto constant_1 = op::Constant::create( + element::Type_t::i32, Shape{3, 1, 2}, vector{1, 2, 3, 4, 5, 6}); + auto constant_2 = + op::Constant::create(element::Type_t::i32, + Shape{3, 1, 4}, + vector{7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}); auto concat = make_shared(NodeVector{constant_1, constant_2}, 2); concat->set_friendly_name("test"); auto f = make_shared(concat, ParameterVector{}); @@ -1312,11 +1317,12 @@ TEST(constant_folding, const_concat_axis_2) TEST(constant_folding, const_concat_axis_1_bool_type) { auto constant_1 = - op::Constant::create(element::boolean, Shape{1, 1, 2}, vector{true, true}); + op::Constant::create(element::Type_t::boolean, Shape{1, 1, 2}, vector{true, true}); auto constant_2 = op::Constant::create( - element::boolean, Shape{1, 2, 2}, vector{true, false, true, false}); - auto constant_3 = op::Constant::create( - element::boolean, Shape{1, 3, 2}, vector{true, false, true, false, true, false}); + element::Type_t::boolean, Shape{1, 2, 2}, vector{true, false, true, false}); + auto constant_3 = op::Constant::create(element::Type_t::boolean, + Shape{1, 3, 2}, + vector{true, false, true, false, true, false}); auto concat = make_shared(NodeVector{constant_1, constant_2, constant_3}, 1); concat->set_friendly_name("test"); auto f = make_shared(concat, ParameterVector{}); @@ -1344,7 +1350,7 @@ TEST(constant_folding, const_concat_axis_1_bool_type) TEST(constant_folding, const_logical_not) { auto constant = - op::Constant::create(element::boolean, Shape{2, 3}, vector{0, 1, 0, 0, 1, 1}); + op::Constant::create(element::Type_t::boolean, Shape{2, 3}, vector{0, 1, 0, 0, 1, 1}); auto logical_not = make_shared(constant); logical_not->set_friendly_name("test"); auto f = make_shared(logical_not, ParameterVector{}); @@ -1370,9 +1376,9 @@ TEST(constant_folding, const_logical_not) TEST(constant_folding, const_equal) { auto constant0 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); auto constant1 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 2, 3, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 2, 3, 5, 6}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1398,9 +1404,9 @@ TEST(constant_folding, const_equal) TEST(constant_folding, const_not_equal) { auto constant0 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); auto constant1 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 2, 3, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 2, 3, 5, 6}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1426,9 +1432,9 @@ TEST(constant_folding, const_not_equal) TEST(constant_folding, const_greater) { auto constant0 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); auto constant1 = - op::Constant::create(element::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1454,9 +1460,9 @@ TEST(constant_folding, const_greater) TEST(constant_folding, const_greater_eq) { auto constant0 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); auto constant1 = - op::Constant::create(element::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1482,9 +1488,9 @@ TEST(constant_folding, const_greater_eq) TEST(constant_folding, const_less) { auto constant0 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); auto constant1 = - op::Constant::create(element::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1510,9 +1516,9 @@ TEST(constant_folding, const_less) TEST(constant_folding, const_less_eq) { auto constant0 = - op::Constant::create(element::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{1, 2, 3, 4, 5, 6}); auto constant1 = - op::Constant::create(element::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); + op::Constant::create(element::Type_t::i32, Shape{2, 3}, vector{2, 2, 2, 5, 5, 5}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1537,10 +1543,10 @@ TEST(constant_folding, const_less_eq) TEST(constant_folding, const_or) { - auto constant0 = - op::Constant::create(element::boolean, Shape{2, 3}, vector{0, 0, 1, 0, 1, 1}); - auto constant1 = - op::Constant::create(element::boolean, Shape{2, 3}, vector{0, 1, 1, 1, 0, 1}); + auto constant0 = op::Constant::create( + element::Type_t::boolean, Shape{2, 3}, vector{0, 0, 1, 0, 1, 1}); + auto constant1 = op::Constant::create( + element::Type_t::boolean, Shape{2, 3}, vector{0, 1, 1, 1, 0, 1}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1565,10 +1571,10 @@ TEST(constant_folding, const_or) TEST(constant_folding, const_xor) { - auto constant0 = - op::Constant::create(element::boolean, Shape{2, 3}, vector{0, 0, 1, 0, 1, 1}); - auto constant1 = - op::Constant::create(element::boolean, Shape{2, 3}, vector{0, 1, 1, 1, 0, 1}); + auto constant0 = op::Constant::create( + element::Type_t::boolean, Shape{2, 3}, vector{0, 0, 1, 0, 1, 1}); + auto constant1 = op::Constant::create( + element::Type_t::boolean, Shape{2, 3}, vector{0, 1, 1, 1, 0, 1}); auto eq = make_shared(constant0, constant1); eq->set_friendly_name("test"); auto f = make_shared(eq, ParameterVector{}); @@ -1594,7 +1600,7 @@ TEST(constant_folding, const_xor) TEST(constant_folding, const_ceiling) { auto constant = op::Constant::create( - element::f32, Shape{2, 3}, vector{0.0f, 0.1f, -0.1f, -2.5f, 2.5f, 3.0f}); + element::Type_t::f32, Shape{2, 3}, vector{0.0f, 0.1f, -0.1f, -2.5f, 2.5f, 3.0f}); auto ceil = make_shared(constant); ceil->set_friendly_name("test"); auto f = make_shared(ceil, ParameterVector{}); @@ -1620,7 +1626,7 @@ TEST(constant_folding, const_ceiling) TEST(constant_folding, const_floor) { auto constant = op::Constant::create( - element::f32, Shape{2, 3}, vector{0.0f, 0.1f, -0.1f, -2.5f, 2.5f, 3.0f}); + element::Type_t::f32, Shape{2, 3}, vector{0.0f, 0.1f, -0.1f, -2.5f, 2.5f, 3.0f}); auto floor = make_shared(constant); floor->set_friendly_name("test"); auto f = make_shared(floor, ParameterVector{}); @@ -1646,12 +1652,12 @@ TEST(constant_folding, const_floor) TEST(constant_folding, const_gather_v1) { auto constant_data = op::Constant::create( - element::f32, + element::Type_t::f32, Shape{2, 5}, vector{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}); auto constant_indices = - op::Constant::create(element::i64, Shape{4}, vector{0, 3, 2, 2}); - auto constant_axis = op::Constant::create(element::i64, Shape{1}, vector{1}); + op::Constant::create(element::Type_t::i64, Shape{4}, vector{0, 3, 2, 2}); + auto constant_axis = op::Constant::create(element::Type_t::i64, Shape{1}, vector{1}); auto gather = make_shared(constant_data, constant_indices, constant_axis); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{}); @@ -1677,12 +1683,12 @@ TEST(constant_folding, const_gather_v1) TEST(constant_folding, const_gather_v1_scalar) { auto constant_data = op::Constant::create( - element::f32, + element::Type_t::f32, Shape{2, 5}, vector{1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f}); auto constant_indices = - op::Constant::create(element::i64, Shape{4}, vector{0, 3, 2, 2}); - auto constant_axis = op::Constant::create(element::i64, Shape{}, vector{1}); + op::Constant::create(element::Type_t::i64, Shape{4}, vector{0, 3, 2, 2}); + auto constant_axis = op::Constant::create(element::Type_t::i64, Shape{}, vector{1}); auto gather = make_shared(constant_data, constant_indices, constant_axis); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{}); @@ -1707,17 +1713,18 @@ TEST(constant_folding, const_gather_v1_scalar) TEST(constant_folding, const_gather_v1_subgraph) { - const auto A = make_shared(element::f32, Shape{1}); + const auto A = make_shared(element::Type_t::f32, Shape{1}); const float b_value = 3.21f; - const auto B_const = op::Constant::create(element::f32, {1}, {b_value}); - const auto C = make_shared(element::f32, Shape{1}); + const auto B_const = op::Constant::create(element::Type_t::f32, {1}, {b_value}); + const auto C = make_shared(element::Type_t::f32, Shape{1}); const int64_t axis = 0; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B_const, C}, axis); const vector indices{1}; - const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + const auto indices_const = + op::Constant::create(element::Type_t::i64, {indices.size()}, indices); const auto gather = make_shared(concat, indices_const, axis_const); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{A, C}); @@ -1741,17 +1748,18 @@ TEST(constant_folding, const_gather_v1_subgraph) TEST(constant_folding, const_gather_v1_subgraph_neg_axis) { - const auto A = make_shared(element::f32, Shape{1}); + const auto A = make_shared(element::Type_t::f32, Shape{1}); const float b_value = 1.23f; - const auto B = make_shared(element::f32, Shape{1}); - const auto C_const = op::Constant::create(element::f32, {1}, {b_value}); + const auto B = make_shared(element::Type_t::f32, Shape{1}); + const auto C_const = op::Constant::create(element::Type_t::f32, {1}, {b_value}); const int64_t axis = 0; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B, C_const}, axis); const vector indices{-1}; - const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + const auto indices_const = + op::Constant::create(element::Type_t::i64, {indices.size()}, indices); const auto gather = make_shared(concat, indices_const, axis_const); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{A, B}); @@ -1775,16 +1783,17 @@ TEST(constant_folding, const_gather_v1_subgraph_neg_axis) TEST(constant_folding, const_gather_v1_subgraph_no_constant_input) { - const auto A = make_shared(element::f32, Shape{1}); - const auto B = make_shared(element::f32, Shape{1}); - const auto C = make_shared(element::f32, Shape{1}); + const auto A = make_shared(element::Type_t::f32, Shape{1}); + const auto B = make_shared(element::Type_t::f32, Shape{1}); + const auto C = make_shared(element::Type_t::f32, Shape{1}); const int64_t axis = 0; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B, C}, axis); const vector indices{1}; - const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + const auto indices_const = + op::Constant::create(element::Type_t::i64, {indices.size()}, indices); const auto gather = make_shared(concat, indices_const, axis_const); gather->set_friendly_name("test"); auto f = make_shared(gather, ParameterVector{A, B, C}); @@ -1799,16 +1808,16 @@ TEST(constant_folding, const_gather_v1_subgraph_no_constant_input) TEST(constant_folding, const_gather_v1_subgraph_no_constant_input_scalar) { - const auto A = make_shared(element::f32, Shape{1}); - const auto B = make_shared(element::f32, Shape{1}); - const auto C = make_shared(element::f32, Shape{1}); + const auto A = make_shared(element::Type_t::f32, Shape{1}); + const auto B = make_shared(element::Type_t::f32, Shape{1}); + const auto C = make_shared(element::Type_t::f32, Shape{1}); const int64_t axis = 0; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B, C}, axis); const vector indices{1}; - const auto indices_const = op::Constant::create(element::i64, {}, indices); + const auto indices_const = op::Constant::create(element::Type_t::i64, {}, indices); const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); @@ -1823,16 +1832,17 @@ TEST(constant_folding, const_gather_v1_subgraph_no_constant_input_scalar) TEST(constant_folding, const_gather_v1_subgraph_skip_if_non_zero_axis) { - const auto A = make_shared(element::f32, Shape{2, 2}); - const auto B = make_shared(element::f32, Shape{2, 2}); - const auto C = make_shared(element::f32, Shape{2, 2}); + const auto A = make_shared(element::Type_t::f32, Shape{2, 2}); + const auto B = make_shared(element::Type_t::f32, Shape{2, 2}); + const auto C = make_shared(element::Type_t::f32, Shape{2, 2}); const int64_t axis = 1; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B, C}, axis); const vector indices{1}; - const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + const auto indices_const = + op::Constant::create(element::Type_t::i64, {indices.size()}, indices); const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); @@ -1846,16 +1856,17 @@ TEST(constant_folding, const_gather_v1_subgraph_skip_if_non_zero_axis) TEST(constant_folding, const_gather_v1_subgraph_skip_if_non_single_indices) { - const auto A = make_shared(element::f32, Shape{1}); - const auto B = make_shared(element::f32, Shape{1}); - const auto C = make_shared(element::f32, Shape{1}); + const auto A = make_shared(element::Type_t::f32, Shape{1}); + const auto B = make_shared(element::Type_t::f32, Shape{1}); + const auto C = make_shared(element::Type_t::f32, Shape{1}); const int64_t axis = 0; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B, C}, axis); const vector indices{0, 1}; - const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + const auto indices_const = + op::Constant::create(element::Type_t::i64, {indices.size()}, indices); const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); @@ -1869,16 +1880,17 @@ TEST(constant_folding, const_gather_v1_subgraph_skip_if_non_single_indices) TEST(constant_folding, const_gather_v1_subgraph_skip_if_concat_output_shape_dynamic) { - const auto A = make_shared(element::f32, PartialShape::dynamic()); - const auto B = make_shared(element::f32, Shape{1}); - const auto C = make_shared(element::f32, Shape{1}); + const auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto B = make_shared(element::Type_t::f32, Shape{1}); + const auto C = make_shared(element::Type_t::f32, Shape{1}); const int64_t axis = 0; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B, C}, axis); const vector indices{1}; - const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + const auto indices_const = + op::Constant::create(element::Type_t::i64, {indices.size()}, indices); const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); @@ -1892,16 +1904,17 @@ TEST(constant_folding, const_gather_v1_subgraph_skip_if_concat_output_shape_dyna TEST(constant_folding, const_gather_v1_subgraph_skip_if_not_single_input) { - const auto A = make_shared(element::f32, Shape{2}); - const auto B = make_shared(element::f32, Shape{1}); - const auto C = make_shared(element::f32, Shape{1}); + const auto A = make_shared(element::Type_t::f32, Shape{2}); + const auto B = make_shared(element::Type_t::f32, Shape{1}); + const auto C = make_shared(element::Type_t::f32, Shape{1}); const int64_t axis = 0; - const auto axis_const = op::Constant::create(element::i64, {}, {axis}); + const auto axis_const = op::Constant::create(element::Type_t::i64, {}, {axis}); const auto concat = make_shared(NodeVector{A, B, C}, axis); const vector indices{1}; - const auto indices_const = op::Constant::create(element::i64, {indices.size()}, indices); + const auto indices_const = + op::Constant::create(element::Type_t::i64, {indices.size()}, indices); const auto gather = make_shared(concat, indices_const, axis_const); auto f = make_shared(gather, ParameterVector{A, B, C}); @@ -1918,10 +1931,10 @@ TEST(constant_folding, const_strided_slice) Shape shape_in{16}; vector values_in{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}; - auto constant = make_shared(element::i32, shape_in, values_in); - auto begin = op::Constant::create(element::i64, {1}, {2}); - auto end = op::Constant::create(element::i64, {1}, {15}); - auto stride = op::Constant::create(element::i64, {1}, {3}); + auto constant = make_shared(element::Type_t::i32, shape_in, values_in); + auto begin = op::Constant::create(element::Type_t::i64, {1}, {2}); + auto end = op::Constant::create(element::Type_t::i64, {1}, {15}); + auto stride = op::Constant::create(element::Type_t::i64, {1}, {3}); auto slice = make_shared( constant, begin, end, stride, std::vector{0}, std::vector{0}); slice->set_friendly_name("test"); @@ -1953,8 +1966,9 @@ TEST(constant_folding, constant_dyn_reshape) Shape shape_shape{3}; vector values_shape{2, 4, 1}; - auto constant_in = make_shared(element::f32, shape_in, values_in); - auto constant_shape = make_shared(element::i64, shape_shape, values_shape); + auto constant_in = make_shared(element::Type_t::f32, shape_in, values_in); + auto constant_shape = + make_shared(element::Type_t::i64, shape_shape, values_shape); auto dyn_reshape = make_shared(constant_in, constant_shape, false); dyn_reshape->set_friendly_name("test"); auto f = make_shared(dyn_reshape, ParameterVector{}); @@ -1988,9 +2002,11 @@ TEST(constant_folding, constant_dyn_reshape_shape_not_originally_constant) vector values_shape_a{1, 3, 0}; vector values_shape_b{1, 1, 1}; - auto constant_in = make_shared(element::f32, shape_in, values_in); - auto constant_shape_a = make_shared(element::i64, shape_shape, values_shape_a); - auto constant_shape_b = make_shared(element::i64, shape_shape, values_shape_b); + auto constant_in = make_shared(element::Type_t::f32, shape_in, values_in); + auto constant_shape_a = + make_shared(element::Type_t::i64, shape_shape, values_shape_a); + auto constant_shape_b = + make_shared(element::Type_t::i64, shape_shape, values_shape_b); auto dyn_reshape = make_shared( constant_in, std::make_shared(constant_shape_a, constant_shape_b), false); dyn_reshape->set_friendly_name("test"); @@ -2022,8 +2038,8 @@ TEST(constant_folding, constant_transpose) Shape shape_perm{2}; vector values_perm{1, 0}; - auto constant_in = make_shared(element::f64, shape_in, values_in); - auto constant_perm = make_shared(element::i64, shape_perm, values_perm); + auto constant_in = make_shared(element::Type_t::f64, shape_in, values_in); + auto constant_perm = make_shared(element::Type_t::i64, shape_perm, values_perm); auto transpose = make_shared(constant_in, constant_perm); transpose->set_friendly_name("test"); auto f = make_shared(transpose, ParameterVector{}); @@ -2097,9 +2113,9 @@ TEST(constant_folding, constant_v1_select) vector values_f{11, 12, 13, 14, 15, 16, 17, 18}; auto constant_selection = - make_shared(element::boolean, Shape{4}, values_selection); - auto constant_t = make_shared(element::i64, Shape{4}, values_t); - auto constant_f = make_shared(element::i64, Shape{2, 4}, values_f); + make_shared(element::Type_t::boolean, Shape{4}, values_selection); + auto constant_t = make_shared(element::Type_t::i64, Shape{4}, values_t); + auto constant_f = make_shared(element::Type_t::i64, Shape{2, 4}, values_f); auto select = make_shared(constant_selection, constant_t, constant_f); select->set_friendly_name("test"); auto f = make_shared(select, ParameterVector{}); @@ -2124,8 +2140,8 @@ TEST(constant_folding, constant_v1_select) TEST(constant_folding, constant_v1_split) { vector data{.1f, .2f, .3f, .4f, .5f, .6f}; - const auto const_data = op::Constant::create(element::f32, Shape{data.size()}, data); - const auto const_axis = op::Constant::create(element::i64, Shape{}, {0}); + const auto const_data = op::Constant::create(element::Type_t::f32, Shape{data.size()}, data); + const auto const_axis = op::Constant::create(element::Type_t::i64, Shape{}, {0}); const auto num_splits = 3; auto split_v1 = make_shared(const_data, const_axis, num_splits); @@ -2159,8 +2175,8 @@ TEST(constant_folding, constant_v1_split) TEST(constant_folding, constant_v1_split_specialized) { vector data{.1f, .2f, .3f, .4f, .5f, .6f}; - const auto const_data = op::Constant::create(element::f32, Shape{data.size()}, data); - const auto const_axis = op::Constant::create(element::i64, Shape{}, {0}); + const auto const_data = op::Constant::create(element::Type_t::f32, Shape{data.size()}, data); + const auto const_axis = op::Constant::create(element::Type_t::i64, Shape{}, {0}); const auto num_splits = 3; auto split_v1 = make_shared(const_data, const_axis, num_splits); @@ -2201,8 +2217,8 @@ TEST(constant_folding, constant_v1_split_axis_1_4_splits) 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}; - const auto const_data = op::Constant::create(element::i64, Shape{4, 4, 4}, data); - const auto const_axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto const_data = op::Constant::create(element::Type_t::i64, Shape{4, 4, 4}, data); + const auto const_axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto num_splits = 4; auto split_v1 = make_shared(const_data, const_axis, num_splits); @@ -2257,8 +2273,8 @@ TEST(constant_folding, constant_v1_split_axis_1_2_splits) 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}; - const auto const_data = op::Constant::create(element::i64, Shape{4, 4, 4}, data); - const auto const_axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto const_data = op::Constant::create(element::Type_t::i64, Shape{4, 4, 4}, data); + const auto const_axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto num_splits = 2; auto split_v1 = make_shared(const_data, const_axis, num_splits); @@ -2298,11 +2314,11 @@ TEST(constant_folding, constant_v1_variadic_split_axis_1_2_splits) 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}; - const auto const_data = op::Constant::create(element::i64, Shape{4, 4, 4}, data); - const auto const_axis = op::Constant::create(element::i16, Shape{}, {1}); + const auto const_data = op::Constant::create(element::Type_t::i64, Shape{4, 4, 4}, data); + const auto const_axis = op::Constant::create(element::Type_t::i16, Shape{}, {1}); vector values_lengths{3, 1}; - auto constant_lengths = - make_shared(element::i64, Shape{values_lengths.size()}, values_lengths); + auto constant_lengths = make_shared( + element::Type_t::i64, Shape{values_lengths.size()}, values_lengths); auto variadic_split_v1 = make_shared(const_data, const_axis, constant_lengths); @@ -2342,11 +2358,11 @@ TEST(constant_folding, constant_v1_variadic_split_axis_1_3_splits_neg_length) 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}; - const auto const_data = op::Constant::create(element::i64, Shape{4, 4, 4}, data); - const auto const_axis = op::Constant::create(element::i32, Shape{}, {1}); + const auto const_data = op::Constant::create(element::Type_t::i64, Shape{4, 4, 4}, data); + const auto const_axis = op::Constant::create(element::Type_t::i32, Shape{}, {1}); vector values_lengths{1, 1, -1}; - auto constant_lengths = - make_shared(element::i64, Shape{values_lengths.size()}, values_lengths); + auto constant_lengths = make_shared( + element::Type_t::i64, Shape{values_lengths.size()}, values_lengths); auto variadic_split_v1 = make_shared(const_data, const_axis, constant_lengths); @@ -2387,10 +2403,10 @@ TEST(constant_folding, constant_v1_one_hot) const float on_value = 1.123f; const float off_value = 0.321f; - const auto indices_const = op::Constant::create(element::i64, Shape{3}, indices); - const auto depth_const = op::Constant::create(element::i64, Shape{}, {3}); - const auto on_const = op::Constant::create(element::f32, Shape{}, {on_value}); - const auto off_const = op::Constant::create(element::f32, Shape{}, {off_value}); + const auto indices_const = op::Constant::create(element::Type_t::i64, Shape{3}, indices); + const auto depth_const = op::Constant::create(element::Type_t::i64, Shape{}, {3}); + const auto on_const = op::Constant::create(element::Type_t::f32, Shape{}, {on_value}); + const auto off_const = op::Constant::create(element::Type_t::f32, Shape{}, {off_value}); int64_t axis = 1; auto one_hot_v1 = @@ -2427,10 +2443,10 @@ TEST(constant_folding, constant_v1_one_hot_negative_axes) const int32_t on_value = 4; const int32_t off_value = 1; - const auto indices_const = op::Constant::create(element::i64, Shape{4}, indices); - const auto depth_const = op::Constant::create(element::i64, Shape{}, {3}); - const auto on_const = op::Constant::create(element::i32, Shape{}, {on_value}); - const auto off_const = op::Constant::create(element::i32, Shape{}, {off_value}); + const auto indices_const = op::Constant::create(element::Type_t::i64, Shape{4}, indices); + const auto depth_const = op::Constant::create(element::Type_t::i64, Shape{}, {3}); + const auto on_const = op::Constant::create(element::Type_t::i32, Shape{}, {on_value}); + const auto off_const = op::Constant::create(element::Type_t::i32, Shape{}, {off_value}); int64_t axis = -1; auto one_hot_v1 = @@ -2470,10 +2486,10 @@ TEST(constant_folding, constant_v1_one_hot_negative_axes_2) auto on_value = true; auto off_value = false; - const auto indices_const = op::Constant::create(element::i64, Shape{2, 2}, indices); - const auto depth_const = op::Constant::create(element::i64, Shape{}, {3}); - const auto on_const = op::Constant::create(element::boolean, Shape{}, {on_value}); - const auto off_const = op::Constant::create(element::boolean, Shape{}, {off_value}); + const auto indices_const = op::Constant::create(element::Type_t::i64, Shape{2, 2}, indices); + const auto depth_const = op::Constant::create(element::Type_t::i64, Shape{}, {3}); + const auto on_const = op::Constant::create(element::Type_t::boolean, Shape{}, {on_value}); + const auto off_const = op::Constant::create(element::Type_t::boolean, Shape{}, {off_value}); int64_t axis = -1; auto one_hot_v1 = @@ -2516,9 +2532,9 @@ TEST(constant_folding, constant_tile_1d) Shape shape_out{4}; vector values_in{0, 1}; - auto data = make_shared(element::i32, shape_in, values_in); + auto data = make_shared(element::Type_t::i32, shape_in, values_in); vector values_repeats{2}; - auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + auto repeats = make_shared(element::Type_t::i64, shape_repeats, values_repeats); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); @@ -2547,9 +2563,9 @@ TEST(constant_folding, constant_tile_3d_small_data_rank) Shape shape_out{2, 2, 4}; vector values_in{0, 1}; - auto data = make_shared(element::i32, shape_in, values_in); + auto data = make_shared(element::Type_t::i32, shape_in, values_in); vector values_repeats{2, 2, 2}; - auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + auto repeats = make_shared(element::Type_t::i64, shape_repeats, values_repeats); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); @@ -2578,9 +2594,9 @@ TEST(constant_folding, constant_tile_3d_few_repeats) Shape shape_out{2, 2, 3}; vector values_in{1, 2, 3, 4, 5, 6}; - auto data = make_shared(element::i32, shape_in, values_in); + auto data = make_shared(element::Type_t::i32, shape_in, values_in); vector values_repeats{2, 1}; - auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + auto repeats = make_shared(element::Type_t::i64, shape_repeats, values_repeats); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); @@ -2609,9 +2625,9 @@ TEST(constant_folding, constant_tile_1d_0_repeats) Shape shape_out{}; vector values_in{0, 1}; - auto data = make_shared(element::i32, shape_in, values_in); + auto data = make_shared(element::Type_t::i32, shape_in, values_in); vector values_repeats{0}; - auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + auto repeats = make_shared(element::Type_t::i64, shape_repeats, values_repeats); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); @@ -2640,9 +2656,9 @@ TEST(constant_folding, constant_tile_0_rank_data) Shape shape_out{4}; vector values_in{1}; - auto data = make_shared(element::i32, shape_in, values_in); + auto data = make_shared(element::Type_t::i32, shape_in, values_in); vector values_repeats{4}; - auto repeats = make_shared(element::i64, shape_repeats, values_repeats); + auto repeats = make_shared(element::Type_t::i64, shape_repeats, values_repeats); auto tile = make_shared(data, repeats); tile->set_friendly_name("test"); auto f = make_shared(tile, ParameterVector{}); @@ -2666,7 +2682,7 @@ TEST(constant_folding, constant_tile_0_rank_data) TEST(constant_folding, constant_non_zero_0D) { - auto data = op::Constant::create(element::i32, Shape{}, {1}); + auto data = op::Constant::create(element::Type_t::i32, Shape{}, {1}); auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2694,7 +2710,7 @@ TEST(constant_folding, constant_non_zero_0D) TEST(constant_folding, constant_non_zero_1D) { vector values_in{0, 1, 0, 1}; - auto data = make_shared(element::i32, Shape{4}, values_in); + auto data = make_shared(element::Type_t::i32, Shape{4}, values_in); auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2720,8 +2736,8 @@ TEST(constant_folding, constant_non_zero_1D) TEST(constant_folding, constant_non_zero_int32_output_type) { vector values_in{0, 1, 0, 1}; - auto data = make_shared(element::i32, Shape{4}, values_in); - auto non_zero = make_shared(data, element::i32); + auto data = make_shared(element::Type_t::i32, Shape{4}, values_in); + auto non_zero = make_shared(data, element::Type_t::i32); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2736,7 +2752,7 @@ TEST(constant_folding, constant_non_zero_int32_output_type) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); - ASSERT_EQ(element::i32, new_const->get_element_type()); + ASSERT_EQ(element::Type_t::i32, new_const->get_element_type()); const auto values_out = new_const->get_vector(); const vector values_expected{1, 3}; @@ -2747,7 +2763,8 @@ TEST(constant_folding, constant_non_zero_int32_output_type) TEST(constant_folding, constant_non_zero_1D_all_indices) { const vector values_in{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}; - const auto data = make_shared(element::f32, Shape{values_in.size()}, values_in); + const auto data = + make_shared(element::Type_t::f32, Shape{values_in.size()}, values_in); const auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2773,7 +2790,7 @@ TEST(constant_folding, constant_non_zero_1D_all_indices) TEST(constant_folding, constant_non_zero_2D) { vector values_in{1, 0, 0, 0, 1, 0, 1, 1, 0}; - auto data = make_shared(element::i32, Shape{3, 3}, values_in); + auto data = make_shared(element::Type_t::i32, Shape{3, 3}, values_in); auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2799,7 +2816,7 @@ TEST(constant_folding, constant_non_zero_2D) TEST(constant_folding, DISABLED_constant_non_zero_2D_all_indices) { const vector values_in{1, 1, 1, 1, 1, 1, 1, 1, 1}; - const auto data = make_shared(element::i8, Shape{3, 3}, values_in); + const auto data = make_shared(element::Type_t::i8, Shape{3, 3}, values_in); const auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2825,7 +2842,7 @@ TEST(constant_folding, DISABLED_constant_non_zero_2D_all_indices) TEST(constant_folding, DISABLED_constant_non_zero_2D_all_zeros) { const vector values_in{0, 0, 0, 0, 0, 0}; - const auto data = make_shared(element::u8, Shape{2, 3}, values_in); + const auto data = make_shared(element::Type_t::u8, Shape{2, 3}, values_in); const auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2848,7 +2865,7 @@ TEST(constant_folding, DISABLED_constant_non_zero_2D_all_zeros) TEST(constant_folding, constant_non_zero_3D) { vector values_in{1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0}; - auto data = make_shared(element::i32, Shape{2, 3, 3}, values_in); + auto data = make_shared(element::Type_t::i32, Shape{2, 3, 3}, values_in); auto non_zero = make_shared(data); non_zero->set_friendly_name("test"); auto f = make_shared(non_zero, ParameterVector{}); @@ -2878,12 +2895,12 @@ TEST(constant_folding, constant_scatter_elements_update_basic) const Shape indices_shape{2, 3}; const auto data_const = op::Constant::create( - element::f32, data_shape, std::vector(shape_size(data_shape), 0.f)); + element::Type_t::f32, data_shape, std::vector(shape_size(data_shape), 0.f)); const auto indices_const = - op::Constant::create(element::i32, indices_shape, {1, 0, 2, 0, 2, 1}); - const auto updates_const = - op::Constant::create(element::f32, indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}); - const auto axis_const = op::Constant::create(element::i64, Shape{}, {0}); + op::Constant::create(element::Type_t::i32, indices_shape, {1, 0, 2, 0, 2, 1}); + const auto updates_const = op::Constant::create( + element::Type_t::f32, indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}); + const auto axis_const = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto scatter_elem_updt = make_shared( data_const, indices_const, updates_const, axis_const); @@ -2912,12 +2929,12 @@ TEST(constant_folding, constant_scatter_elements_update_negative_axis) const Shape indices_shape{2, 3}; const auto data_const = op::Constant::create( - element::f32, data_shape, std::vector(shape_size(data_shape), 0.f)); + element::Type_t::f32, data_shape, std::vector(shape_size(data_shape), 0.f)); const auto indices_const = - op::Constant::create(element::i32, indices_shape, {1, 0, 2, 0, 2, 1}); - const auto updates_const = - op::Constant::create(element::f32, indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}); - const auto axis_const = op::Constant::create(element::i64, Shape{}, {-1}); + op::Constant::create(element::Type_t::i32, indices_shape, {1, 0, 2, 0, 2, 1}); + const auto updates_const = op::Constant::create( + element::Type_t::f32, indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}); + const auto axis_const = op::Constant::create(element::Type_t::i64, Shape{}, {-1}); auto scatter_elem_updt = make_shared( data_const, indices_const, updates_const, axis_const); @@ -2944,12 +2961,12 @@ TEST(constant_folding, constant_scatter_elements_update_1d_axis) const Shape indices_shape{2, 3}; const auto data_const = op::Constant::create( - element::f32, data_shape, std::vector(shape_size(data_shape), 0.f)); + element::Type_t::f32, data_shape, std::vector(shape_size(data_shape), 0.f)); const auto indices_const = - op::Constant::create(element::i32, indices_shape, {1, 0, 2, 0, 2, 1}); - const auto updates_const = - op::Constant::create(element::f32, indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}); - const auto axis_const = op::Constant::create(element::i64, Shape{1}, {0}); + op::Constant::create(element::Type_t::i32, indices_shape, {1, 0, 2, 0, 2, 1}); + const auto updates_const = op::Constant::create( + element::Type_t::f32, indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}); + const auto axis_const = op::Constant::create(element::Type_t::i64, Shape{1}, {0}); auto scatter_elem_updt = make_shared( data_const, indices_const, updates_const, axis_const); @@ -2976,12 +2993,12 @@ TEST(constant_folding, constant_scatter_elements_update_3d_i16) const Shape indices_shape{2, 2, 3}; const auto data_const = op::Constant::create( - element::i16, data_shape, std::vector(shape_size(data_shape), 0)); - const auto indices_const = - op::Constant::create(element::i16, indices_shape, {1, 0, 2, 0, 2, 1, 2, 2, 2, 0, 1, 0}); - const auto updates_const = - op::Constant::create(element::i16, indices_shape, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); - const auto axis_const = op::Constant::create(element::i64, Shape{}, {1}); + element::Type_t::i16, data_shape, std::vector(shape_size(data_shape), 0)); + const auto indices_const = op::Constant::create( + element::Type_t::i16, indices_shape, {1, 0, 2, 0, 2, 1, 2, 2, 2, 0, 1, 0}); + const auto updates_const = op::Constant::create( + element::Type_t::i16, indices_shape, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}); + const auto axis_const = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto scatter_elem_updt = make_shared( data_const, indices_const, updates_const, axis_const); @@ -3009,10 +3026,10 @@ TEST(constant_folding, constant_scatter_elements_update_one_elem) const Shape indices_shape{1, 1, 1}; const auto input_data = std::vector(shape_size(data_shape), 0); - const auto data_const = op::Constant::create(element::i32, data_shape, input_data); - const auto indices_const = op::Constant::create(element::i32, indices_shape, {1}); - const auto updates_const = op::Constant::create(element::i32, indices_shape, {2}); - const auto axis_const = op::Constant::create(element::i64, Shape{}, {0}); + const auto data_const = op::Constant::create(element::Type_t::i32, data_shape, input_data); + const auto indices_const = op::Constant::create(element::Type_t::i32, indices_shape, {1}); + const auto updates_const = op::Constant::create(element::Type_t::i32, indices_shape, {2}); + const auto axis_const = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto scatter_elem_updt = make_shared( data_const, indices_const, updates_const, axis_const); @@ -3041,8 +3058,9 @@ void test_constant_folding_reshape_v1(Shape& shape_in, vector values_shape, bool zero_flag = false) { - auto constant_in = make_shared(element::f32, shape_in, values_in); - auto constant_shape = make_shared(element::i64, shape_shape, values_shape); + auto constant_in = make_shared(element::Type_t::f32, shape_in, values_in); + auto constant_shape = + make_shared(element::Type_t::i64, shape_shape, values_shape); auto dyn_reshape = make_shared(constant_in, constant_shape, zero_flag); dyn_reshape->set_friendly_name("test"); auto f = make_shared(dyn_reshape, ParameterVector{}); @@ -3094,8 +3112,8 @@ TEST(constant_folding, constant_dyn_reshape_v1_pattern_with_zero_dims) TEST(constant_folding, disable_constant_folding) { - auto input = make_shared(element::f32, Shape{1, 3}); - auto constant_shape = op::Constant::create(element::i64, Shape{1}, {3}); + auto input = make_shared(element::Type_t::f32, Shape{1, 3}); + auto constant_shape = op::Constant::create(element::Type_t::i64, Shape{1}, {3}); auto dyn_reshape = make_shared(input, constant_shape, true); auto& rt_info = dyn_reshape->get_rt_info(); rt_info["DISABLED_CONSTANT_FOLDING"]; diff --git a/ngraph/test/control_dependencies.cpp b/ngraph/test/control_dependencies.cpp index 370df36e5db..7d6e66da874 100644 --- a/ngraph/test/control_dependencies.cpp +++ b/ngraph/test/control_dependencies.cpp @@ -80,8 +80,8 @@ constexpr NodeTypeInfo ControlDependencyOp::type_info; TEST(control_dependencies, cdep_ops) { - auto A = make_shared(element::f32, Shape{}); - auto B = make_shared(element::f32, Shape{}); + auto A = make_shared(element::Type_t::f32, Shape{}); + auto B = make_shared(element::Type_t::f32, Shape{}); auto absn = make_shared(A); auto cdop = make_shared(OutputVector{A}, std::set>{absn}); @@ -92,10 +92,10 @@ TEST(control_dependencies, cdep_ops) TEST(control_dependencies, two_cdep_ops) { - auto A = make_shared(element::f32, Shape{}); - auto B = make_shared(element::f32, Shape{}); + auto A = make_shared(element::Type_t::f32, Shape{}); + auto B = make_shared(element::Type_t::f32, Shape{}); auto absn = make_shared(A); - auto C = make_shared(element::f32, Shape{}); + auto C = make_shared(element::Type_t::f32, Shape{}); auto absn_c = make_shared(C); auto cdop = make_shared(OutputVector{A}, std::set>{absn, absn_c}); @@ -106,9 +106,9 @@ TEST(control_dependencies, two_cdep_ops) TEST(control_dependencies, two_cdep_ops_op_on_top) { - auto A = make_shared(element::f32, Shape{}); + auto A = make_shared(element::Type_t::f32, Shape{}); auto absn = make_shared(A); - auto B = make_shared(element::f32, Shape{}); + auto B = make_shared(element::Type_t::f32, Shape{}); auto absn_b = make_shared(B); auto cdop = make_shared(OutputVector{A}, std::set>{absn, absn_b}); @@ -120,7 +120,7 @@ TEST(control_dependencies, two_cdep_ops_op_on_top) TEST(control_dependencies, clone_function_cdop) { - auto A = make_shared(element::f32, Shape{}); + auto A = make_shared(element::Type_t::f32, Shape{}); auto absn = make_shared(A); auto cdop = make_shared(OutputVector{A}, std::set>{absn}); @@ -139,9 +139,9 @@ TEST(control_dependencies, clone_function_cdop) TEST(control_dependencies, clone_function_cdop_abs) { - auto A = make_shared(element::f32, Shape{}); + auto A = make_shared(element::Type_t::f32, Shape{}); auto absn = make_shared(A); - auto B = make_shared(element::f32, Shape{}); + auto B = make_shared(element::Type_t::f32, Shape{}); auto absn_b = make_shared(B); auto cdop = make_shared(OutputVector{A}, std::set>{absn, absn_b}); @@ -175,8 +175,8 @@ static size_t count_control_dependencies(const shared_ptr& node, TEST(control_dependencies, replace_node) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto MUL_AB = A * B; auto MUL_BA = B * A; auto ADD = A + B; diff --git a/ngraph/test/convert_u1_to_string.cpp b/ngraph/test/convert_u1_to_string.cpp index fd12304831a..a994a73cbd5 100644 --- a/ngraph/test/convert_u1_to_string.cpp +++ b/ngraph/test/convert_u1_to_string.cpp @@ -25,7 +25,7 @@ using namespace std; TEST(convert_u1_to_string, convert_u1_to_string) { vector values{171, 16}; - auto constant = make_shared(element::u1, Shape{12}, &values[0]); + auto constant = make_shared(element::Type_t::u1, Shape{12}, &values[0]); vector ref{"1", "0", "1", "0", "1", "0", "1", "1", "0", "0", "0", "1"}; for (size_t i = 0; i < 12; ++i) diff --git a/ngraph/test/copy.cpp b/ngraph/test/copy.cpp index dfc7bac2674..f1c97ec4837 100644 --- a/ngraph/test/copy.cpp +++ b/ngraph/test/copy.cpp @@ -33,8 +33,8 @@ template bool check_unary() { Shape shape{1}; - auto arg0 = make_shared(element::f32, shape); - OutputVector new_args{make_shared(element::f32, shape)}; + auto arg0 = make_shared(element::Type_t::f32, shape); + OutputVector new_args{make_shared(element::Type_t::f32, shape)}; auto node = make_shared(arg0); auto new_node = node->copy_with_new_inputs(new_args); @@ -46,10 +46,10 @@ template bool check_binary() { Shape shape{1}; - auto arg0 = make_shared(element::f32, shape); - auto arg1 = make_shared(element::f32, shape); - OutputVector new_args{make_shared(element::f32, shape), - make_shared(element::f32, shape)}; + auto arg0 = make_shared(element::Type_t::f32, shape); + auto arg1 = make_shared(element::Type_t::f32, shape); + OutputVector new_args{make_shared(element::Type_t::f32, shape), + make_shared(element::Type_t::f32, shape)}; auto node = make_shared(arg0, arg1); auto new_node = node->copy_with_new_inputs(new_args); @@ -87,15 +87,16 @@ TEST(copy, broadcast) Shape shape{1, 3}; Shape new_shape{4, 1, 3}; AxisSet axes{1, 2}; - auto arg0 = make_shared(element::f32, shape); - OutputVector new_args{make_shared(element::f32, shape), - op::Constant::create(element::u64, Shape{new_shape.size()}, new_shape), - op::Constant::create(element::i64, Shape{axes.size()}, axes.to_vector())}; + auto arg0 = make_shared(element::Type_t::f32, shape); + OutputVector new_args{ + make_shared(element::Type_t::f32, shape), + op::Constant::create(element::Type_t::u64, Shape{new_shape.size()}, new_shape), + op::Constant::create(element::Type_t::i64, Shape{axes.size()}, axes.to_vector())}; auto node = make_shared( arg0, - op::Constant::create(element::u64, Shape{new_shape.size()}, new_shape), - op::Constant::create(element::i64, Shape{axes.size()}, axes.to_vector())); + op::Constant::create(element::Type_t::u64, Shape{new_shape.size()}, new_shape), + op::Constant::create(element::Type_t::i64, Shape{axes.size()}, axes.to_vector())); auto new_node = node->copy_with_new_inputs(new_args); auto node_cast = as_type_ptr(new_node); ASSERT_NE(node_cast, nullptr); @@ -117,10 +118,10 @@ TEST(copy, ceiling) TEST(copy, concat) { Shape shape{1}; - auto arg0 = make_shared(element::f32, shape); - auto arg1 = make_shared(element::f32, shape); - OutputVector new_args{make_shared(element::f32, shape), - make_shared(element::f32, shape)}; + auto arg0 = make_shared(element::Type_t::f32, shape); + auto arg1 = make_shared(element::Type_t::f32, shape); + OutputVector new_args{make_shared(element::Type_t::f32, shape), + make_shared(element::Type_t::f32, shape)}; size_t axis = 0; auto node = make_shared(NodeVector{arg0, arg1}, axis); auto new_node = node->clone_with_new_inputs(new_args); @@ -136,7 +137,7 @@ TEST(copy, constant) { Shape shape{}; vector c{2.4f}; - auto& et = element::f32; + element::Type et = element::Type_t::f32; auto node = op::Constant::create(et, shape, c); auto new_node = node->clone_with_new_inputs(OutputVector{}); auto node_cast = as_type_ptr(new_node); @@ -151,9 +152,9 @@ TEST(copy, constant) TEST(copy, convert) { Shape shape; - auto& et = element::f64; - auto arg0 = make_shared(element::f32, shape); - OutputVector new_args{make_shared(element::f32, shape)}; + element::Type et = element::Type_t::f64; + auto arg0 = make_shared(element::Type_t::f32, shape); + OutputVector new_args{make_shared(element::Type_t::f32, shape)}; auto node = make_shared(arg0, et); auto new_node = node->clone_with_new_inputs(new_args); @@ -248,7 +249,7 @@ TEST(copy, not_equal) TEST(copy, parameter) { Shape shape{1}; - auto node = make_shared(element::f32, shape); + auto node = make_shared(element::Type_t::f32, shape); auto new_node = node->clone_with_new_inputs({}); auto node_cast = as_type_ptr(new_node); ASSERT_NE(node_cast, nullptr); @@ -267,12 +268,13 @@ TEST(copy, reduce_sum) { Shape shape{4, 3}; AxisSet axes{1}; - auto arg0 = make_shared(element::f32, shape); + auto arg0 = make_shared(element::Type_t::f32, shape); - auto axes_node = op::Constant::create(element::i64, {axes.size()}, axes.to_vector()); + auto axes_node = op::Constant::create(element::Type_t::i64, {axes.size()}, axes.to_vector()); auto node = make_shared(arg0, axes_node, true); - OutputVector new_args{make_shared(element::f32, shape), - op::Constant::create(element::i64, {axes.size()}, axes.to_vector())}; + OutputVector new_args{ + make_shared(element::Type_t::f32, shape), + op::Constant::create(element::Type_t::i64, {axes.size()}, axes.to_vector())}; auto new_node = node->clone_with_new_inputs(new_args); auto node_cast = as_type_ptr(new_node); ASSERT_NE(node_cast, nullptr); @@ -288,11 +290,12 @@ TEST(copy, reshape) Shape shape_in{2, 3, 4}; Shape shape_out{6, 4}; - auto arg0 = make_shared(element::f32, shape_in); - OutputVector new_args{make_shared(element::f32, shape_in), - op::Constant::create(element::u64, {shape_out.size()}, shape_out)}; + auto arg0 = make_shared(element::Type_t::f32, shape_in); + OutputVector new_args{ + make_shared(element::Type_t::f32, shape_in), + op::Constant::create(element::Type_t::u64, {shape_out.size()}, shape_out)}; - auto shape_pattern = op::Constant::create(element::u64, {shape_out.size()}, shape_out); + auto shape_pattern = op::Constant::create(element::Type_t::u64, {shape_out.size()}, shape_out); auto node = make_shared(arg0, shape_pattern, false); auto new_node = node->clone_with_new_inputs(new_args); auto node_cast = as_type_ptr(new_node); @@ -306,12 +309,12 @@ TEST(copy, reshape) TEST(copy, select) { Shape shape{1}; - auto arg0 = make_shared(element::boolean, shape); - auto arg1 = make_shared(element::f32, shape); - auto arg2 = make_shared(element::f32, shape); - OutputVector new_args{make_shared(element::boolean, shape), - make_shared(element::f32, shape), - make_shared(element::f32, shape)}; + auto arg0 = make_shared(element::Type_t::boolean, shape); + auto arg1 = make_shared(element::Type_t::f32, shape); + auto arg2 = make_shared(element::Type_t::f32, shape); + OutputVector new_args{make_shared(element::Type_t::boolean, shape), + make_shared(element::Type_t::f32, shape), + make_shared(element::Type_t::f32, shape)}; auto node = make_shared(arg0, arg1, arg2); auto new_node = node->clone_with_new_inputs(new_args); @@ -344,15 +347,15 @@ TEST(copy, strided_slice) Coordinate upper{2, 3, 4}; Strides strides{1, 1, 1}; - auto arg0 = make_shared(element::f32, shape_in); - OutputVector new_args{make_shared(element::f32, shape_in), - op::Constant::create(element::u64, {lower.size()}, lower), - op::Constant::create(element::u64, {upper.size()}, upper), - op::Constant::create(element::i64, {strides.size()}, strides)}; + auto arg0 = make_shared(element::Type_t::f32, shape_in); + OutputVector new_args{make_shared(element::Type_t::f32, shape_in), + op::Constant::create(element::Type_t::u64, {lower.size()}, lower), + op::Constant::create(element::Type_t::u64, {upper.size()}, upper), + op::Constant::create(element::Type_t::i64, {strides.size()}, strides)}; - auto begin_node = op::Constant::create(element::i64, {lower.size()}, lower); - auto end_node = op::Constant::create(element::i64, {upper.size()}, upper); - auto strides_node = op::Constant::create(element::i64, {strides.size()}, strides); + auto begin_node = op::Constant::create(element::Type_t::i64, {lower.size()}, lower); + auto end_node = op::Constant::create(element::Type_t::i64, {upper.size()}, upper); + auto strides_node = op::Constant::create(element::Type_t::i64, {strides.size()}, strides); auto node = make_shared(arg0, begin_node, end_node, @@ -398,23 +401,23 @@ TEST(copy, tanh) TEST(copy, loop) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto body_condition = - std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + auto current_iteration = make_shared(element::Type_t::i64, Shape{}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared( + ngraph::element::Type_t::boolean, ngraph::Shape{}, true); - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{}, 10); - auto exec_condition = - std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{}, 10); + auto exec_condition = std::make_shared( + ngraph::element::Type_t::boolean, ngraph::Shape{}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -437,9 +440,9 @@ TEST(copy, loop) auto out2 = loop->get_concatenated_slices(Zo, 0, 1, 1, -1, 1); loop->validate_and_infer_types(); // That which we iterate over - auto X_new = make_shared(element::f32, Shape{3, 2, 5}); - auto Y_new = make_shared(element::f32, Shape{3, 2, 5}); - auto M_new = make_shared(element::f32, Shape{3, 2, 5}); + auto X_new = make_shared(element::Type_t::f32, Shape{3, 2, 5}); + auto Y_new = make_shared(element::Type_t::f32, Shape{3, 2, 5}); + auto M_new = make_shared(element::Type_t::f32, Shape{3, 2, 5}); OutputVector new_args = {trip_count, exec_condition, X_new, Y_new, M_new}; auto loop_copy = loop->clone_with_new_inputs(new_args); diff --git a/ngraph/test/dyn_elimination.cpp b/ngraph/test/dyn_elimination.cpp index a3474cabccb..dc18dec85b1 100644 --- a/ngraph/test/dyn_elimination.cpp +++ b/ngraph/test/dyn_elimination.cpp @@ -30,10 +30,10 @@ using namespace std; TEST(dyn_elimination, transpose) { Shape shape_in{2, 4, 6, 8}; - auto param = make_shared(element::boolean, shape_in); + auto param = make_shared(element::Type_t::boolean, shape_in); auto constant_perm = - make_shared(element::i64, Shape{4}, vector{2, 3, 1, 0}); + make_shared(element::Type_t::i64, Shape{4}, vector{2, 3, 1, 0}); auto transpose = make_shared(param, constant_perm); @@ -52,7 +52,7 @@ TEST(dyn_elimination, transpose) ASSERT_EQ(new_reshape->get_input_order(), (AxisVector{2, 3, 1, 0})); ASSERT_EQ(new_reshape->get_output_shape(0), (Shape{6, 8, 4, 2})); - ASSERT_EQ(new_reshape->get_output_element_type(0), element::boolean); + ASSERT_EQ(new_reshape->get_output_element_type(0), element::Type_t::boolean); } // For now, we can't handle the case where the input has dynamic shapes, @@ -63,10 +63,10 @@ TEST(dyn_elimination, transpose_dyn_shape) { PartialShape shape_in{2, 4, Dimension::dynamic(), 8}; - auto param = make_shared(element::boolean, shape_in); + auto param = make_shared(element::Type_t::boolean, shape_in); auto constant_perm = - make_shared(element::i64, Shape{4}, vector{2, 3, 1, 0}); + make_shared(element::Type_t::i64, Shape{4}, vector{2, 3, 1, 0}); auto transpose = make_shared(param, constant_perm); @@ -83,20 +83,23 @@ TEST(dyn_elimination, transpose_dyn_shape) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_transpose); - ASSERT_EQ(new_transpose->get_output_element_type(0), element::boolean); + ASSERT_EQ(new_transpose->get_output_element_type(0), element::Type_t::boolean); ASSERT_TRUE(new_transpose->get_output_partial_shape(0).relaxes( PartialShape{Dimension::dynamic(), 8, 4, 2})); } TEST(dyn_elimination, range) { - auto constant_start = make_shared(element::i64, Shape{}, vector{0}); - auto constant_stop = make_shared(element::i64, Shape{}, vector{5}); - auto constant_step = make_shared(element::i64, Shape{}, vector{2}); + auto constant_start = + make_shared(element::Type_t::i64, Shape{}, vector{0}); + auto constant_stop = + make_shared(element::Type_t::i64, Shape{}, vector{5}); + auto constant_step = + make_shared(element::Type_t::i64, Shape{}, vector{2}); auto range = make_shared(constant_start, constant_stop, constant_step); - ASSERT_EQ(range->get_element_type(), element::i64); + ASSERT_EQ(range->get_element_type(), element::Type_t::i64); ASSERT_EQ(range->get_shape(), (Shape{3})); auto f = make_shared(range, ParameterVector{}); @@ -112,7 +115,7 @@ TEST(dyn_elimination, range) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_NE(replacement, nullptr); - ASSERT_EQ(replacement->get_element_type(), element::i64); + ASSERT_EQ(replacement->get_element_type(), element::Type_t::i64); ASSERT_EQ(replacement->get_shape(), (Shape{3})); auto vals = replacement->get_vector(); @@ -122,13 +125,16 @@ TEST(dyn_elimination, range) TEST(dyn_elimination, range_f64) { - auto constant_start = make_shared(element::f64, Shape{}, vector{-0.5}); - auto constant_stop = make_shared(element::f64, Shape{}, vector{2}); - auto constant_step = make_shared(element::f64, Shape{}, vector{0.25}); + auto constant_start = + make_shared(element::Type_t::f64, Shape{}, vector{-0.5}); + auto constant_stop = + make_shared(element::Type_t::f64, Shape{}, vector{2}); + auto constant_step = + make_shared(element::Type_t::f64, Shape{}, vector{0.25}); auto range = make_shared(constant_start, constant_stop, constant_step); - ASSERT_EQ(range->get_element_type(), element::f64); + ASSERT_EQ(range->get_element_type(), element::Type_t::f64); ASSERT_EQ(range->get_shape(), (Shape{10})); auto f = make_shared(range, ParameterVector{}); @@ -144,7 +150,7 @@ TEST(dyn_elimination, range_f64) as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_NE(replacement, nullptr); - ASSERT_EQ(replacement->get_element_type(), element::f64); + ASSERT_EQ(replacement->get_element_type(), element::Type_t::f64); ASSERT_EQ(replacement->get_shape(), (Shape{10})); auto vals = replacement->get_vector(); diff --git a/ngraph/test/element_type.cpp b/ngraph/test/element_type.cpp index 625679f553e..767a2939887 100644 --- a/ngraph/test/element_type.cpp +++ b/ngraph/test/element_type.cpp @@ -24,62 +24,62 @@ using namespace ngraph; TEST(element_type, from) { - EXPECT_EQ(element::from(), element::boolean); - EXPECT_EQ(element::from(), element::boolean); - EXPECT_EQ(element::from(), element::f32); - EXPECT_EQ(element::from(), element::f64); - EXPECT_EQ(element::from(), element::i8); - EXPECT_EQ(element::from(), element::i16); - EXPECT_EQ(element::from(), element::i32); - EXPECT_EQ(element::from(), element::i64); - EXPECT_EQ(element::from(), element::u8); - EXPECT_EQ(element::from(), element::u16); - EXPECT_EQ(element::from(), element::u32); - EXPECT_EQ(element::from(), element::u64); + EXPECT_EQ(element::from(), element::Type_t::boolean); + EXPECT_EQ(element::from(), element::Type_t::boolean); + EXPECT_EQ(element::from(), element::Type_t::f32); + EXPECT_EQ(element::from(), element::Type_t::f64); + EXPECT_EQ(element::from(), element::Type_t::i8); + EXPECT_EQ(element::from(), element::Type_t::i16); + EXPECT_EQ(element::from(), element::Type_t::i32); + EXPECT_EQ(element::from(), element::Type_t::i64); + EXPECT_EQ(element::from(), element::Type_t::u8); + EXPECT_EQ(element::from(), element::Type_t::u16); + EXPECT_EQ(element::from(), element::Type_t::u32); + EXPECT_EQ(element::from(), element::Type_t::u64); } TEST(element_type, mapable) { std::map test_map; - test_map.insert({element::f32, "float"}); + test_map.insert({element::Type_t::f32, "float"}); } TEST(element_type, merge_both_dynamic) { element::Type t; - ASSERT_TRUE(element::Type::merge(t, element::dynamic, element::dynamic)); + ASSERT_TRUE(element::Type::merge(t, element::Type_t::dynamic, element::Type_t::dynamic)); ASSERT_TRUE(t.is_dynamic()); } TEST(element_type, merge_left_dynamic) { element::Type t; - ASSERT_TRUE(element::Type::merge(t, element::dynamic, element::u64)); + ASSERT_TRUE(element::Type::merge(t, element::Type_t::dynamic, element::Type_t::u64)); ASSERT_TRUE(t.is_static()); - ASSERT_EQ(t, element::u64); + ASSERT_EQ(t, element::Type_t::u64); } TEST(element_type, merge_right_dynamic) { element::Type t; - ASSERT_TRUE(element::Type::merge(t, element::i16, element::dynamic)); + ASSERT_TRUE(element::Type::merge(t, element::Type_t::i16, element::Type_t::dynamic)); ASSERT_TRUE(t.is_static()); - ASSERT_EQ(t, element::i16); + ASSERT_EQ(t, element::Type_t::i16); } TEST(element_type, merge_both_static_equal) { element::Type t; - ASSERT_TRUE(element::Type::merge(t, element::f64, element::f64)); + ASSERT_TRUE(element::Type::merge(t, element::Type_t::f64, element::Type_t::f64)); ASSERT_TRUE(t.is_static()); - ASSERT_EQ(t, element::f64); + ASSERT_EQ(t, element::Type_t::f64); } TEST(element_type, merge_both_static_unequal) { - element::Type t = element::f32; - ASSERT_FALSE(element::Type::merge(t, element::i8, element::i16)); + element::Type t = element::Type_t::f32; + ASSERT_FALSE(element::Type::merge(t, element::Type_t::i8, element::Type_t::i16)); ASSERT_TRUE(t.is_static()); - ASSERT_EQ(t, element::f32); + ASSERT_EQ(t, element::Type_t::f32); } diff --git a/ngraph/test/eval.cpp b/ngraph/test/eval.cpp index b0d4b670b8d..f551e398800 100644 --- a/ngraph/test/eval.cpp +++ b/ngraph/test/eval.cpp @@ -88,7 +88,7 @@ using namespace ngraph; TEST(eval, bad_get_data_ptr) { - HostTensor c(element::f32, Shape{}); + HostTensor c(element::Type_t::f32, Shape{}); *c.get_data_ptr() = 1.0; EXPECT_EQ(*c.get_data_ptr(), 1.0); try @@ -113,7 +113,7 @@ TEST(eval, bad_get_data_ptr) TEST(eval, max_eval_parameter) { - auto p = make_shared(element::i64, Shape{}); + auto p = make_shared(element::Type_t::i64, Shape{}); auto result = maximum_value(p); EXPECT_FALSE(result.first); @@ -122,7 +122,7 @@ TEST(eval, max_eval_parameter) TEST(eval, max_eval_constant) { - auto c = op::Constant::create(element::i64, Shape{}, {27}); + auto c = op::Constant::create(element::Type_t::i64, Shape{}, {27}); auto result = maximum_value(c); ASSERT_TRUE(result.first); EXPECT_EQ(result.second, 27); @@ -130,8 +130,8 @@ TEST(eval, max_eval_constant) TEST(eval, max_eval_minimum_constant) { - auto c = op::Constant::create(element::i64, Shape{}, {27}); - auto p = make_shared(element::i64, Shape{}); + auto c = op::Constant::create(element::Type_t::i64, Shape{}, {27}); + auto p = make_shared(element::Type_t::i64, Shape{}); auto m = make_shared(c, p); auto result = maximum_value(m); ASSERT_TRUE(result.first); @@ -142,31 +142,31 @@ TEST(eval, max_eval_reduce_min) { auto concat = make_shared( make_shared( - OutputVector{make_shared(element::i64, Shape{4}), - make_shared(element::i64, Shape{4}, 37)}, + OutputVector{make_shared(element::Type_t::i64, Shape{4}), + make_shared(element::Type_t::i64, Shape{4}, 37)}, 0), - element::i32); + element::Type_t::i32); auto reduce = make_shared( - make_shared(concat, - make_shared(element::i32, Shape{1}, 0)), - element::i64); + make_shared( + concat, make_shared(element::Type_t::i32, Shape{1}, 0)), + element::Type_t::i64); auto squeezes = make_shared( - make_shared(reduce, - make_shared(element::i32, Shape{1}, 0)), - make_shared(element::i64, Shape{1}, 0)); + make_shared( + reduce, make_shared(element::Type_t::i32, Shape{1}, 0)), + make_shared(element::Type_t::i64, Shape{1}, 0)); EXPECT_EQ(maximum_value(squeezes).second, 37); } TEST(eval, evaluate_shape_of) { - auto p = make_shared(element::f32, PartialShape{-1, -1}); + auto p = make_shared(element::Type_t::f32, PartialShape{-1, -1}); auto so = make_shared(p); auto fun = make_shared(OutputVector{so}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 3}, {0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f})})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2})); auto result_shape = read_vector(result); vector arg_shape{2, 3}; @@ -175,10 +175,10 @@ TEST(eval, evaluate_shape_of) TEST(eval, evaluate_dynamic_range_sum) { - auto p_start = make_shared(element::f32, PartialShape{}); - auto p_stop = make_shared(element::f32, PartialShape{}); - auto p_step = make_shared(element::f32, PartialShape{}); - auto p1 = make_shared(element::f32, PartialShape{}); + auto p_start = make_shared(element::Type_t::f32, PartialShape{}); + auto p_stop = make_shared(element::Type_t::f32, PartialShape{}); + auto p_step = make_shared(element::Type_t::f32, PartialShape{}); + auto p1 = make_shared(element::Type_t::f32, PartialShape{}); auto range = make_shared(p_start, p_stop, p_step); auto add = make_shared(range, p1); auto fun = @@ -189,7 +189,7 @@ TEST(eval, evaluate_dynamic_range_sum) make_host_tensor({}, {10.0f}), make_host_tensor({}, {3.0f}), make_host_tensor({}, {7.0f})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3})); auto cval = read_vector(result_tensor); vector seq{8.0f, 11.0f, 14.0f}; @@ -199,27 +199,27 @@ TEST(eval, evaluate_dynamic_range_sum) #ifdef NGRAPH_INTERPRETER_ENABLE TEST(eval, interpret_dynamic_range_sum) { - auto p_start = make_shared(element::f32, PartialShape{}); - auto p_stop = make_shared(element::f32, PartialShape{}); - auto p_step = make_shared(element::f32, PartialShape{}); - auto p1 = make_shared(element::f32, PartialShape{}); + auto p_start = make_shared(element::Type_t::f32, PartialShape{}); + auto p_stop = make_shared(element::Type_t::f32, PartialShape{}); + auto p_step = make_shared(element::Type_t::f32, PartialShape{}); + auto p1 = make_shared(element::Type_t::f32, PartialShape{}); auto range = make_shared(p_start, p_stop, p_step); auto add = make_shared(range, p1); auto fun = make_shared(OutputVector{add}, ParameterVector{p_start, p_stop, p_step, p1}); auto backend = runtime::Backend::create("INTERPRETER"); - auto p_start_val = backend->create_tensor(element::f32, Shape{}); + auto p_start_val = backend->create_tensor(element::Type_t::f32, Shape{}); copy_data(p_start_val, vector{1.0f}); - auto p_stop_val = backend->create_tensor(element::f32, Shape{}); + auto p_stop_val = backend->create_tensor(element::Type_t::f32, Shape{}); copy_data(p_stop_val, vector{10.0f}); - auto p_step_val = backend->create_tensor(element::f32, Shape{}); + auto p_step_val = backend->create_tensor(element::Type_t::f32, Shape{}); copy_data(p_step_val, vector{3.0f}); - auto p1_val = backend->create_tensor(element::f32, Shape{}); + auto p1_val = backend->create_tensor(element::Type_t::f32, Shape{}); copy_data(p1_val, vector{7.0f}); auto result = backend->create_tensor(); auto cfun = backend->compile(fun); cfun->call({result}, {p_start_val, p_stop_val, p_step_val, p1_val}); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{3})); auto result_val = read_vector(result); vector seq{8.0f, 11.0f, 14.0f}; @@ -230,8 +230,8 @@ TEST(eval, interpret_dynamic_range_sum) TEST(eval, evaluate_broadcast_v3_bidirectional) { Shape shape_a{4, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i32, Shape{3}, {2, 1, 4}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i32, Shape{3}, {2, 1, 4}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); @@ -239,7 +239,7 @@ TEST(eval, evaluate_broadcast_v3_bidirectional) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{4, 1}, {1.0f, 2.0f, 3.0f, 4.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 4, 4})); auto result_val = read_vector(result); vector expec{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, @@ -250,15 +250,15 @@ TEST(eval, evaluate_broadcast_v3_bidirectional) TEST(eval, evaluate_broadcast_v3_bidirectional_target_rank_smaller_than_input) { Shape shape_a{1, 1, 1, 1, 1, 1, 1, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{4}, {1, 3, 1, 1}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{4}, {1, 3, 1, 1}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(shape_a, {1.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{1, 1, 1, 1, 1, 3, 1, 1})); auto result_val = read_vector(result); vector expec{1.0f, 1.0f, 1.0f}; @@ -268,8 +268,8 @@ TEST(eval, evaluate_broadcast_v3_bidirectional_target_rank_smaller_than_input) TEST(eval, evaluate_broadcast_v3_bidirectional_target_rank_smaller_than_input_2) { Shape shape_a{1, 3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i32, Shape{2}, {3, 1}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i32, Shape{2}, {3, 1}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); @@ -277,7 +277,7 @@ TEST(eval, evaluate_broadcast_v3_bidirectional_target_rank_smaller_than_input_2) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{1, 3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{1, 3, 1})); auto result_val = read_vector(result); vector expec{1.0f, 2.0f, 3.0f}; @@ -287,8 +287,8 @@ TEST(eval, evaluate_broadcast_v3_bidirectional_target_rank_smaller_than_input_2) TEST(eval, evaluate_broadcast_v3_bidirectional_dyn) { Shape shape_a{4, 1}; - auto A = make_shared(element::i32, shape_a); - auto target_shape = make_shared(element::i32, Shape{3}); + auto A = make_shared(element::Type_t::i32, shape_a); + auto target_shape = make_shared(element::Type_t::i32, Shape{3}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); @@ -297,7 +297,7 @@ TEST(eval, evaluate_broadcast_v3_bidirectional_dyn) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{4, 1}, {1, 2, 3, 4}), make_host_tensor(Shape{3}, {2, 1, 4})})); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 4, 4})); auto result_val = read_vector(result); vector expec{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, @@ -308,15 +308,15 @@ TEST(eval, evaluate_broadcast_v3_bidirectional_dyn) TEST(eval, evaluate_broadcast_v3_numpy) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 6}); auto bcast_v3 = make_shared(A, target_shape); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -329,8 +329,8 @@ TEST(eval, evaluate_broadcast_v3_numpy) TEST(eval, evaluate_broadcast_v3_numpy_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i32, Shape{3}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = make_shared(element::Type_t::i32, Shape{3}); auto bcast_v3 = make_shared(A, target_shape); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); @@ -339,7 +339,7 @@ TEST(eval, evaluate_broadcast_v3_numpy_dyn) fun->evaluate({result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), make_host_tensor(Shape{3}, {2, 3, 6})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -353,21 +353,21 @@ TEST(eval, evaluate_broadcast_v3_numpy_vs_bidi) { Shape in_shape{1, 4, 1}; - auto A = make_shared(element::f32, in_shape); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {1, 4, 4}); + auto A = make_shared(element::Type_t::f32, in_shape); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {1, 4, 4}); auto bcast_v3_num = make_shared(A, target_shape, op::BroadcastType::NUMPY); auto fun_num = make_shared(OutputVector{bcast_v3_num}, ParameterVector{A}); auto result = make_shared(); ASSERT_TRUE(fun_num->evaluate( {result}, {make_host_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{1, 4, 4})); auto result_val = read_vector(result); vector expec{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}; ASSERT_EQ(expec, result_val); - auto target_shape2 = op::Constant::create(element::i64, Shape{2}, {1, 4}); + auto target_shape2 = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 4}); auto bcast_v3 = make_shared(A, target_shape2, op::BroadcastType::BIDIRECTIONAL); auto fun_bidi = make_shared(OutputVector{bcast_v3_num}, ParameterVector{A}); @@ -375,7 +375,7 @@ TEST(eval, evaluate_broadcast_v3_numpy_vs_bidi) auto result2 = make_shared(); ASSERT_TRUE(fun_bidi->evaluate( {result2}, {make_host_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})})); - EXPECT_EQ(result2->get_element_type(), element::f32); + EXPECT_EQ(result2->get_element_type(), element::Type_t::f32); EXPECT_EQ(result2->get_partial_shape(), (PartialShape{1, 4, 4})); auto result_val2 = read_vector(result2); vector expec2{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}; @@ -386,8 +386,8 @@ TEST(eval, evaluate_broadcast_v3_bidi_3d) { Shape in_shape{1, 4, 1}; - auto A = make_shared(element::f32, in_shape); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {1, 1, 3}); + auto A = make_shared(element::Type_t::f32, in_shape); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {1, 1, 3}); auto bcast_v3_num = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); auto fun_num = make_shared(OutputVector{bcast_v3_num}, ParameterVector{A}); @@ -395,7 +395,7 @@ TEST(eval, evaluate_broadcast_v3_bidi_3d) auto result = make_shared(); ASSERT_TRUE(fun_num->evaluate( {result}, {make_host_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{1, 4, 3})); auto result_val = read_vector(result); vector expec{1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f, 3.0f, 3.0f, 3.0f, 4.0f, 4.0f, 4.0f}; @@ -407,8 +407,8 @@ TEST(eval, evaluate_broadcast_v3_bidi_4d) Shape in_shape{4, 1, 1}; Shape expec_shape{1, 4, 2, 2}; - auto A = make_shared(element::f32, in_shape); - auto target_shape = op::Constant::create(element::i64, Shape{4}, {1, 1, 2, 2}); + auto A = make_shared(element::Type_t::f32, in_shape); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{4}, {1, 1, 2, 2}); auto bcast_v3 = make_shared(A, target_shape, op::BroadcastType::BIDIRECTIONAL); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); @@ -416,7 +416,7 @@ TEST(eval, evaluate_broadcast_v3_bidi_4d) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(in_shape, {1.0f, 2.0f, 3.0f, 4.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{1, 4, 2, 2})); auto result_val = read_vector(result); vector expec{1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4}; @@ -426,8 +426,8 @@ TEST(eval, evaluate_broadcast_v3_bidi_4d) TEST(eval, evaluate_broadcast_v3_pdpd) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 6}); auto bcast_v3 = make_shared( A, target_shape, op::BroadcastModeSpec(op::BroadcastType::PDPD, 1)); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); @@ -435,7 +435,7 @@ TEST(eval, evaluate_broadcast_v3_pdpd) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -448,8 +448,8 @@ TEST(eval, evaluate_broadcast_v3_pdpd) TEST(eval, evaluate_broadcast_v3_pdpd_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i32, Shape{3}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = make_shared(element::Type_t::i32, Shape{3}); auto bcast_v3 = make_shared( A, target_shape, op::BroadcastModeSpec(op::BroadcastType::PDPD, 1)); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); @@ -459,7 +459,7 @@ TEST(eval, evaluate_broadcast_v3_pdpd_dyn) fun->evaluate({result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), make_host_tensor(Shape{3}, {2, 3, 6})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -472,15 +472,15 @@ TEST(eval, evaluate_broadcast_v3_pdpd_dyn) TEST(eval, evaluate_broadcast_v1_numpy) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 6}); auto bcast_v3 = make_shared(A, target_shape); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -493,8 +493,8 @@ TEST(eval, evaluate_broadcast_v1_numpy) TEST(eval, evaluate_broadcast_v1_numpy_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i64, Shape{3}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = make_shared(element::Type_t::i64, Shape{3}); auto bcast_v3 = make_shared(A, target_shape); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); @@ -503,7 +503,7 @@ TEST(eval, evaluate_broadcast_v1_numpy_dyn) fun->evaluate({result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), make_host_tensor(Shape{3}, {2, 3, 6})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -516,8 +516,8 @@ TEST(eval, evaluate_broadcast_v1_numpy_dyn) TEST(eval, evaluate_broadcast_v1_pdpd) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 6}); auto bcast_v3 = make_shared( A, target_shape, op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1)); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); @@ -525,7 +525,7 @@ TEST(eval, evaluate_broadcast_v1_pdpd) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -538,8 +538,8 @@ TEST(eval, evaluate_broadcast_v1_pdpd) TEST(eval, evaluate_broadcast_v1_pdpd_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i64, Shape{3}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = make_shared(element::Type_t::i64, Shape{3}); auto bcast_v3 = make_shared( A, target_shape, op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1)); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A, target_shape}); @@ -549,7 +549,7 @@ TEST(eval, evaluate_broadcast_v1_pdpd_dyn) fun->evaluate({result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), make_host_tensor(Shape{3}, {2, 3, 6})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 6})); auto result_val = read_vector(result); vector expec{ @@ -562,9 +562,9 @@ TEST(eval, evaluate_broadcast_v1_pdpd_dyn) TEST(eval, evaluate_broadcast_v1_explicit) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 1}); - auto axes_mapping = op::Constant::create(element::i32, Shape{2}, {1, 2}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 1}); + auto axes_mapping = op::Constant::create(element::Type_t::i32, Shape{2}, {1, 2}); auto bcast_v3 = make_shared( A, target_shape, axes_mapping, op::AutoBroadcastSpec(op::AutoBroadcastType::EXPLICIT)); auto fun = make_shared(OutputVector{bcast_v3}, ParameterVector{A}); @@ -572,7 +572,7 @@ TEST(eval, evaluate_broadcast_v1_explicit) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 1})); auto result_val = read_vector(result); vector expec{1, 2, 3, 1, 2, 3}; @@ -582,9 +582,9 @@ TEST(eval, evaluate_broadcast_v1_explicit) TEST(eval, evaluate_broadcast_v1_explicit_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i64, Shape{3}); - auto axes_mapping = make_shared(element::i32, Shape{2}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = make_shared(element::Type_t::i64, Shape{3}); + auto axes_mapping = make_shared(element::Type_t::i32, Shape{2}); auto bcast_v1 = make_shared( A, target_shape, axes_mapping, op::AutoBroadcastSpec(op::AutoBroadcastType::EXPLICIT)); @@ -597,7 +597,7 @@ TEST(eval, evaluate_broadcast_v1_explicit_dyn) {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), make_host_tensor(Shape{3}, {2, 3, 1}), make_host_tensor(Shape{2}, {1, 2})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 1})); auto result_val = read_vector(result); vector expec{1, 2, 3, 1, 2, 3}; @@ -607,9 +607,9 @@ TEST(eval, evaluate_broadcast_v1_explicit_dyn) TEST(eval, evaluate_broadcast_v3_explicit_dyn) { Shape shape_a{3, 1}; - auto A = make_shared(element::f32, shape_a); - auto target_shape = make_shared(element::i64, Shape{3}); - auto axes_mapping = make_shared(element::i32, Shape{2}); + auto A = make_shared(element::Type_t::f32, shape_a); + auto target_shape = make_shared(element::Type_t::i64, Shape{3}); + auto axes_mapping = make_shared(element::Type_t::i32, Shape{2}); auto bcast_v3 = make_shared( A, target_shape, axes_mapping, op::BroadcastModeSpec(op::BroadcastType::EXPLICIT)); @@ -622,7 +622,7 @@ TEST(eval, evaluate_broadcast_v3_explicit_dyn) {make_host_tensor(Shape{3, 1}, {1.0f, 2.0f, 3.0f}), make_host_tensor(Shape{3}, {2, 3, 1}), make_host_tensor(Shape{2}, {1, 2})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3, 1})); auto result_val = read_vector(result); vector expec{1, 2, 3, 1, 2, 3}; @@ -631,8 +631,8 @@ TEST(eval, evaluate_broadcast_v3_explicit_dyn) TEST(eval, test_op_multi_out) { - auto p = make_shared(element::f32, PartialShape{2, 3}); - auto p2 = make_shared(element::f64, PartialShape{2, 2}); + auto p = make_shared(element::Type_t::f32, PartialShape{2, 3}); + auto p2 = make_shared(element::Type_t::f64, PartialShape{2, 2}); auto so = make_shared(p, p2); auto fun = make_shared(OutputVector{so->output(0), so->output(1)}, ParameterVector{p, p2}); @@ -641,12 +641,12 @@ TEST(eval, test_op_multi_out) HostTensorVector ins{make_host_tensor(Shape{2, 3}), make_host_tensor(Shape{2, 2})}; ASSERT_TRUE(fun->evaluate({result, result2}, ins)); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_partial_shape(), (PartialShape{2, 3})); auto result_val = read_vector(result); auto arg_val = read_vector(ins[0]); ASSERT_EQ(result_val, arg_val); - EXPECT_EQ(result2->get_element_type(), element::f64); + EXPECT_EQ(result2->get_element_type(), element::Type_t::f64); EXPECT_EQ(result2->get_partial_shape(), (PartialShape{2, 2})); auto result_val2 = read_vector(result2); auto arg_val2 = read_vector(ins[1]); @@ -655,8 +655,8 @@ TEST(eval, test_op_multi_out) TEST(eval, evaluate_reshape_v1) { - auto data = make_shared(element::f32, Shape{2, 5}); - auto pattern = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{2, 5}); + auto pattern = make_shared(element::Type_t::i64, Shape{2}); auto dyn_reshape = make_shared(data, pattern, false); auto func = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); auto result_tensor = make_shared(); @@ -664,7 +664,7 @@ TEST(eval, evaluate_reshape_v1) {result_tensor}, {make_host_tensor({2, 5}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}), make_host_tensor({2}, {5, 2})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{5, 2})); auto computed_val = read_vector(result_tensor); vector expected_val{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; @@ -673,8 +673,8 @@ TEST(eval, evaluate_reshape_v1) TEST(eval, evaluate_reshape_v1_negative_index) { - auto data = make_shared(element::f32, Shape{2, 5}); - auto pattern = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{2, 5}); + auto pattern = make_shared(element::Type_t::i64, Shape{2}); auto dyn_reshape = make_shared(data, pattern, false); auto func = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); auto result_tensor = make_shared(); @@ -682,7 +682,7 @@ TEST(eval, evaluate_reshape_v1_negative_index) {result_tensor}, {make_host_tensor({2, 5}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}), make_host_tensor({2}, {2, -1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{2, 5})); auto computed_val = read_vector(result_tensor); vector expected_val{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; @@ -691,8 +691,8 @@ TEST(eval, evaluate_reshape_v1_negative_index) TEST(eval, evaluate_reshape_v1_negative_index_zero_dim_zero_flag) { - auto data = make_shared(element::f32, Shape{2, 2, 2, 2}); - auto pattern = make_shared(element::i64, Shape{6}); + auto data = make_shared(element::Type_t::f32, Shape{2, 2, 2, 2}); + auto pattern = make_shared(element::Type_t::i64, Shape{6}); auto dyn_reshape = make_shared(data, pattern, true); auto func = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); auto result_tensor = make_shared(); @@ -701,7 +701,7 @@ TEST(eval, evaluate_reshape_v1_negative_index_zero_dim_zero_flag) {make_host_tensor( {2, 2, 2, 2}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}), make_host_tensor({6}, {2, 0, 1, -1, 1, 2})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{2, 2, 1, 2, 1, 2})); auto computed_val = read_vector(result_tensor); vector expected_val{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; @@ -710,8 +710,8 @@ TEST(eval, evaluate_reshape_v1_negative_index_zero_dim_zero_flag) TEST(eval, evaluate_reshape_v1_pattern_int16) { - auto data = make_shared(element::f32, Shape{2, 2, 2, 2}); - auto pattern = make_shared(element::i16, Shape{6}); + auto data = make_shared(element::Type_t::f32, Shape{2, 2, 2, 2}); + auto pattern = make_shared(element::Type_t::i16, Shape{6}); auto dyn_reshape = make_shared(data, pattern, true); auto func = make_shared(OutputVector{dyn_reshape}, ParameterVector{data, pattern}); auto result_tensor = make_shared(); @@ -720,7 +720,7 @@ TEST(eval, evaluate_reshape_v1_pattern_int16) {make_host_tensor( {2, 2, 2, 2}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}), make_host_tensor({6}, {2, 0, 1, -1, 1, 2})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{2, 2, 1, 2, 1, 2})); auto computed_val = read_vector(result_tensor); vector expected_val{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}; @@ -729,8 +729,8 @@ TEST(eval, evaluate_reshape_v1_pattern_int16) TEST(eval, evaluate_convert) { - auto p = make_shared(element::f32, PartialShape{-1, -1}); - auto convert = make_shared(p, element::i64); + auto p = make_shared(element::Type_t::f32, PartialShape{-1, -1}); + auto convert = make_shared(p, element::Type_t::i64); auto fun = make_shared(OutputVector{convert}, ParameterVector{p}); std::vector> inputs{{-1, 1}}; @@ -740,7 +740,7 @@ TEST(eval, evaluate_convert) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{1, 2}, inputs[i])})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), (Shape{1, 2})); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result[i]); @@ -749,14 +749,14 @@ TEST(eval, evaluate_convert) TEST(eval, evaluate_abs) { - auto p = make_shared(element::f32, Shape{2, 3}); + auto p = make_shared(element::Type_t::f32, Shape{2, 3}); auto abs = make_shared(p); auto fun = make_shared(OutputVector{abs}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 3}, {0.0f, -1.0f, -2.0f, -3.0f, 4.0f, 5.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{0.0f, 1.0f, 2.0f, 3.0f, 4.0f, 5.0f}; ASSERT_EQ(result_val, expec); @@ -764,14 +764,14 @@ TEST(eval, evaluate_abs) TEST(eval, evaluate_erf) { - auto p = make_shared(element::f32, Shape{2, 3}); + auto p = make_shared(element::Type_t::f32, Shape{2, 3}); auto erf = make_shared(p); auto fun = make_shared(OutputVector{erf}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 3}, {0.0f, -1.0f, -2.0f, -3.0f, 4.0f, 5.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{std::erf(0.0f), std::erf(-1.0f), @@ -784,14 +784,14 @@ TEST(eval, evaluate_erf) TEST(eval, evaluate_exp) { - auto p = make_shared(element::f32, Shape{2, 3}); + auto p = make_shared(element::Type_t::f32, Shape{2, 3}); auto exp = make_shared(p); auto fun = make_shared(OutputVector{exp}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 3}, {0.0f, -1.0f, -2.0f, -3.0f, 4.0f, 5.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{std::exp(0.0f), std::exp(-1.0f), @@ -804,14 +804,14 @@ TEST(eval, evaluate_exp) TEST(eval, evaluate_floor) { - auto p = make_shared(element::f32, Shape{2, 2}); + auto p = make_shared(element::Type_t::f32, Shape{2, 2}); auto floor = make_shared(p); auto fun = make_shared(OutputVector{floor}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{2, 2}, {-2.5f, -2.0f, 0.3f, 4.8f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{-3.0f, -2.0f, 0.0f, 4.0f}; ASSERT_EQ(result_val, expec); @@ -819,14 +819,14 @@ TEST(eval, evaluate_floor) TEST(eval, evaluate_floor_int32) { - auto p = make_shared(element::i32, Shape{2, 2}); + auto p = make_shared(element::Type_t::i32, Shape{2, 2}); auto floor = make_shared(p); auto fun = make_shared(OutputVector{floor}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 2}, {-2, -136314888, 0x40000010, 0x40000001})})); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); auto result_val = read_vector(result); vector expec{-2, -136314888, 0x40000010, 0x40000001}; ASSERT_EQ(result_val, expec); @@ -834,7 +834,7 @@ TEST(eval, evaluate_floor_int32) TEST(eval, evaluate_log) { - auto p = make_shared(element::f32, Shape{2, 2, 2}); + auto p = make_shared(element::Type_t::f32, Shape{2, 2, 2}); auto log = make_shared(p); auto fun = make_shared(OutputVector{log}, ParameterVector{p}); auto result = make_shared(); @@ -842,7 +842,7 @@ TEST(eval, evaluate_log) fun->evaluate({result}, {make_host_tensor( Shape{2, 2, 2}, {0.125f, 0.25f, 0.5f, 1.f, 2.f, 4.f, 8.f, 16.f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{std::log(0.125f), std::log(0.25f), @@ -857,7 +857,7 @@ TEST(eval, evaluate_log) TEST(eval, evaluate_negative_f32) { - auto p = make_shared(element::f32, Shape{2, 5}); + auto p = make_shared(element::Type_t::f32, Shape{2, 5}); auto negate = make_shared(p); auto fun = make_shared(OutputVector{negate}, ParameterVector{p}); auto result = make_shared(); @@ -866,7 +866,7 @@ TEST(eval, evaluate_negative_f32) {make_host_tensor( Shape{2, 5}, {1.35f, 8.76f, -8.0f, 17.234f, -2.121f, 1.0f, 8.7f, -8.92f, 17.0f, -1.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{-1.35f, -8.76f, 8.0f, -17.234f, 2.121f, -1.0f, -8.7f, 8.92f, -17.0f, 1.0f}; ASSERT_EQ(result_val, expec); @@ -874,14 +874,14 @@ TEST(eval, evaluate_negative_f32) TEST(eval, evaluate_negative_i32) { - auto p = make_shared(element::i32, Shape{2, 5}); + auto p = make_shared(element::Type_t::i32, Shape{2, 5}); auto negate = make_shared(p); auto fun = make_shared(OutputVector{negate}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 5}, {1, 8, -8, 17, -2, 1, 8, -8, 17, 0})})); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); auto result_val = read_vector(result); vector expec{-1, -8, 8, -17, 2, -1, -8, 8, -17, 0}; ASSERT_EQ(result_val, expec); @@ -889,14 +889,14 @@ TEST(eval, evaluate_negative_i32) TEST(eval, evaluate_relu_2Ffprop_f32) { - auto p = make_shared(element::f32, Shape{2, 5}); + auto p = make_shared(element::Type_t::f32, Shape{2, 5}); auto relu = make_shared(p); auto fun = make_shared(OutputVector{relu}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 5}, {1, 8, -8, 17, -0.5, 0.1, 8.5, -8, 17, -0.5})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{1, 8, 0, 17, 0, 0.1, 8.5, 0, 17, 0}; ASSERT_EQ(result_val, expec); @@ -904,14 +904,14 @@ TEST(eval, evaluate_relu_2Ffprop_f32) TEST(eval, evaluate_relu_2Ffprop_i32) { - auto p = make_shared(element::i32, Shape{2, 5}); + auto p = make_shared(element::Type_t::i32, Shape{2, 5}); auto relu = make_shared(p); auto fun = make_shared(OutputVector{relu}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor( Shape{2, 5}, {1, 8, -8, 17, -2, 1, 8, -8, 17, -1})})); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); auto result_val = read_vector(result); vector expec{1, 8, 0, 17, 0, 1, 8, 0, 17, 0}; ASSERT_EQ(result_val, expec); @@ -919,14 +919,14 @@ TEST(eval, evaluate_relu_2Ffprop_i32) TEST(eval, evaluate_round) { - auto p = make_shared(element::f32, Shape{5}); + auto p = make_shared(element::Type_t::f32, Shape{5}); auto round = make_shared(p, op::v5::Round::RoundMode::HALF_TO_EVEN); auto fun = make_shared(OutputVector{round}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{5}, {0.9f, 2.5f, 2.3f, 1.5f, -4.5f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{1.0f, 2.0f, 2.0f, 2.0f, -4.0f}; ASSERT_EQ(result_val, expec); @@ -934,7 +934,7 @@ TEST(eval, evaluate_round) TEST(eval, evaluate_round_2D) { - auto p = make_shared(element::f32, Shape{3, 5}); + auto p = make_shared(element::Type_t::f32, Shape{3, 5}); auto round = make_shared(p, op::v5::Round::RoundMode::HALF_TO_EVEN); auto fun = make_shared(OutputVector{round}, ParameterVector{p}); auto result = make_shared(); @@ -955,7 +955,7 @@ TEST(eval, evaluate_round_2D) -2.2f, -2.5f, -2.8f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{ 0.f, 0.f, 1.f, 1.f, 2.f, 2.f, 2.f, 2.f, 3.f, -1.f, -2.f, -2.f, -2.f, -2.f, -3.f}; @@ -964,7 +964,7 @@ TEST(eval, evaluate_round_2D) TEST(eval, evaluate_sigmoid) { - auto p = make_shared(element::f32, Shape{1, 1, 2, 2}); + auto p = make_shared(element::Type_t::f32, Shape{1, 1, 2, 2}); auto sigmoid = make_shared(p); auto fun = make_shared(OutputVector{sigmoid}, ParameterVector{p}); auto result = make_shared(); @@ -975,7 +975,7 @@ TEST(eval, evaluate_sigmoid) float sigma2 = 1.0f / (1.0f + std::exp(-x2)); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{1, 1, 2, 2}, {x1, x2, x1, x2})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{sigma1, sigma2, sigma1, sigma2}; EXPECT_EQ(result_val.size(), expec.size()); @@ -983,7 +983,7 @@ TEST(eval, evaluate_sigmoid) TEST(eval, evaluate_sign) { - auto p = make_shared(element::f32, Shape{2, 3}); + auto p = make_shared(element::Type_t::f32, Shape{2, 3}); auto sign = make_shared(p); auto fun = make_shared(OutputVector{sign}, ParameterVector{p}); auto result = make_shared(); @@ -991,7 +991,7 @@ TEST(eval, evaluate_sign) ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{2, 3}, {1, -2, 0, -4.8f, 4.8f, -0.0f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{1, -1, 0, -1, 1, 0}; ASSERT_EQ(result_val, expec); @@ -999,7 +999,7 @@ TEST(eval, evaluate_sign) TEST(eval, evaluate_sin) { - auto p = make_shared(element::f32, Shape{11}); + auto p = make_shared(element::Type_t::f32, Shape{11}); auto sin = make_shared(p); auto fun = make_shared(OutputVector{sin}, ParameterVector{p}); auto result = make_shared(); @@ -1008,7 +1008,7 @@ TEST(eval, evaluate_sin) {result}, {make_host_tensor( Shape{11}, {0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{0.00000000f, 0.24740396f, @@ -1026,14 +1026,14 @@ TEST(eval, evaluate_sin) TEST(eval, evaluate_sinh) { - auto p = make_shared(element::f32, Shape{6}); + auto p = make_shared(element::Type_t::f32, Shape{6}); auto sinh = make_shared(p); auto fun = make_shared(OutputVector{sinh}, ParameterVector{p}); auto result = make_shared(); vector input{1.0f, 0.0f, -0.0f, -1.0f, 5.0f, -5.0f}; ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{6}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return sinhf(x); }); @@ -1042,14 +1042,14 @@ TEST(eval, evaluate_sinh) TEST(eval, evaluate_sqrt) { - auto p = make_shared(element::f32, Shape{6}); + auto p = make_shared(element::Type_t::f32, Shape{6}); auto sqrt = make_shared(p); auto fun = make_shared(OutputVector{sqrt}, ParameterVector{p}); auto result = make_shared(); vector input{16, 4, 81, 100, 10000, 0}; ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{6}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{4, 2, 9, 10, 100, 0}; ASSERT_FLOAT_VECTORS_EQ(expec, result_val); @@ -1057,7 +1057,7 @@ TEST(eval, evaluate_sqrt) TEST(eval, evaluate_acos) { - auto p = make_shared(element::f32, Shape{11}); + auto p = make_shared(element::Type_t::f32, Shape{11}); auto acos = make_shared(p); auto fun = make_shared(OutputVector{acos}, ParameterVector{p}); auto result = make_shared(); @@ -1065,7 +1065,7 @@ TEST(eval, evaluate_acos) vector input{-1.f, -0.75f, -0.5f, -0.25f, -0.125f, 0.f, 0.125f, 0.25f, 0.5f, 0.75f, 1.f}; ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{11}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return std::acos(x); }); @@ -1074,7 +1074,7 @@ TEST(eval, evaluate_acos) TEST(eval, evaluate_asin) { - auto p = make_shared(element::f32, Shape{11}); + auto p = make_shared(element::Type_t::f32, Shape{11}); auto asin = make_shared(p); auto fun = make_shared(OutputVector{asin}, ParameterVector{p}); auto result = make_shared(); @@ -1082,7 +1082,7 @@ TEST(eval, evaluate_asin) vector input{-1.f, -0.75f, -0.5f, -0.25f, -0.125f, 0.f, 0.125f, 0.25f, 0.5f, 0.75f, 1.f}; ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{11}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return std::asin(x); }); @@ -1092,7 +1092,7 @@ TEST(eval, evaluate_asin) TEST(eval, evaluate_atan) { - auto p = make_shared(element::f32, Shape{11}); + auto p = make_shared(element::Type_t::f32, Shape{11}); auto atan = make_shared(p); auto fun = make_shared(OutputVector{atan}, ParameterVector{p}); auto result = make_shared(); @@ -1100,7 +1100,7 @@ TEST(eval, evaluate_atan) vector input{-4.f, -2.f, -1.f, -0.5f, -0.25f, 0.f, 0.25f, 0.5f, 1.f, 2.f, 4.f}; ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{11}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return std::atan(x); }); @@ -1110,7 +1110,7 @@ TEST(eval, evaluate_atan) TEST(eval, evaluate_ceiling) { - auto p = make_shared(element::f32, Shape{2, 2}); + auto p = make_shared(element::Type_t::f32, Shape{2, 2}); auto ceil = make_shared(p); auto fun = make_shared(OutputVector{ceil}, ParameterVector{p}); auto result = make_shared(); @@ -1118,7 +1118,7 @@ TEST(eval, evaluate_ceiling) vector input{-2.5f, -2.0f, 0.3f, 4.8f}; ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{2, 2}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); vector expec{-2.0f, -2.0f, 1.0f, 5.0f}; ASSERT_EQ(result_val, expec); @@ -1126,7 +1126,7 @@ TEST(eval, evaluate_ceiling) TEST(eval, evaluate_cos) { - auto p = make_shared(element::f32, Shape{11}); + auto p = make_shared(element::Type_t::f32, Shape{11}); auto cos = make_shared(p); auto fun = make_shared(OutputVector{cos}, ParameterVector{p}); auto result = make_shared(); @@ -1134,7 +1134,7 @@ TEST(eval, evaluate_cos) vector input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f}; ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{11}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return std::cos(x); }); @@ -1144,14 +1144,14 @@ TEST(eval, evaluate_cos) TEST(eval, evaluate_cosh) { - auto p = make_shared(element::f32, Shape{6}); + auto p = make_shared(element::Type_t::f32, Shape{6}); auto cosh = make_shared(p); auto fun = make_shared(OutputVector{cosh}, ParameterVector{p}); auto result = make_shared(); vector input{1.0f, 0.0f, -0.0f, -1.0f, 5.0f, -5.0f}; ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{6}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return std::cosh(x); }); @@ -1161,7 +1161,7 @@ TEST(eval, evaluate_cosh) TEST(eval, evaluate_tan) { - auto p = make_shared(element::f32, Shape{11}); + auto p = make_shared(element::Type_t::f32, Shape{11}); auto tan = make_shared(p); auto fun = make_shared(OutputVector{tan}, ParameterVector{p}); auto result = make_shared(); @@ -1169,7 +1169,7 @@ TEST(eval, evaluate_tan) vector input{0.f, 0.25f, -0.25f, 0.5f, -0.5f, 1.f, -1.f, 2.f, -2.f, 4.f, -4.f}; ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{11}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return std::tan(x); }); @@ -1179,14 +1179,14 @@ TEST(eval, evaluate_tan) TEST(eval, evaluate_tanh) { - auto p = make_shared(element::f32, Shape{6}); + auto p = make_shared(element::Type_t::f32, Shape{6}); auto tanh = make_shared(p); auto fun = make_shared(OutputVector{tanh}, ParameterVector{p}); auto result = make_shared(); vector input{1.0f, 0.0f, -0.0f, -1.0f, 0.5f, -0.5f}; ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{6}, input)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); auto result_val = read_vector(result); std::transform( input.begin(), input.end(), input.begin(), [](float x) -> float { return std::tanh(x); }); @@ -1196,14 +1196,14 @@ TEST(eval, evaluate_tanh) TEST(eval, evaluate_logical_not) { - auto p = make_shared(element::boolean, Shape{2, 2}); + auto p = make_shared(element::Type_t::boolean, Shape{2, 2}); auto logical_not = make_shared(p); auto fun = make_shared(OutputVector{logical_not}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(Shape{2, 2}, {1, 0, 1, 0})})); - EXPECT_EQ(result->get_element_type(), element::boolean); + EXPECT_EQ(result->get_element_type(), element::Type_t::boolean); auto result_val = read_vector(result); vector expec{0, 1, 0, 1}; ASSERT_EQ(result_val, expec); @@ -1211,9 +1211,9 @@ TEST(eval, evaluate_logical_not) TEST(eval, evaluate_dynamic_gather) { - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::i32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::i32, PartialShape::dynamic()); auto gather = make_shared(arg1, arg2, arg3); auto fun = make_shared(OutputVector{gather}, ParameterVector{arg1, arg2, arg3}); auto result_tensor = make_shared(); @@ -1221,7 +1221,7 @@ TEST(eval, evaluate_dynamic_gather) {make_host_tensor({3}, {1.0f, 2.0f, 3.0f}), make_host_tensor({2}, {1, 0}), make_host_tensor({1}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{2})); auto cval = read_vector(result_tensor); vector out{2.0f, 1.0f}; @@ -1230,9 +1230,9 @@ TEST(eval, evaluate_dynamic_gather) TEST(eval, evaluate_dynamic_axis_gather) { - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto gather = make_shared(arg1, arg2, arg3); auto fun = make_shared(OutputVector{gather}, ParameterVector{arg1, arg2, arg3}); auto result_tensor = make_shared(); @@ -1241,7 +1241,7 @@ TEST(eval, evaluate_dynamic_axis_gather) {3, 3}, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f, 3.0f, 3.1f, 3.2f}), make_host_tensor({1, 2}, {0, 2}), make_host_tensor({}, {1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 1, 2})); auto cval = read_vector(result_tensor); vector out{1.0f, 1.2f, 2.0f, 2.2f, 3.0f, 3.2f}; @@ -1250,15 +1250,15 @@ TEST(eval, evaluate_dynamic_axis_gather) TEST(eval, evaluate_dynamic_concat) { - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::f32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto concat = make_shared(NodeVector{arg1, arg2}, 1); auto fun = make_shared(OutputVector{concat}, ParameterVector{arg1, arg2}); auto result_tensor = make_shared(); ASSERT_TRUE(fun->evaluate({result_tensor}, {make_host_tensor({1, 1}, {1.0f}), make_host_tensor({1, 2}, {8.0f, 10.0f})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{1, 3})); auto cval = read_vector(result_tensor); vector out{1.0f, 8.0f, 10.0f}; @@ -1289,17 +1289,25 @@ void test_eval(shared_ptr fun, TEST(eval, eval_transpose) { - auto x = make_shared(element::f32, PartialShape::dynamic()); + auto x = make_shared(element::Type_t::f32, PartialShape::dynamic()); vector> axes; - axes.push_back(make_shared(element::i8, PartialShape{Dimension::dynamic()})); - axes.push_back(make_shared(element::i16, PartialShape{Dimension::dynamic()})); - axes.push_back(make_shared(element::i32, PartialShape{Dimension::dynamic()})); - axes.push_back(make_shared(element::i64, PartialShape{Dimension::dynamic()})); + axes.push_back( + make_shared(element::Type_t::i8, PartialShape{Dimension::dynamic()})); + axes.push_back( + make_shared(element::Type_t::i16, PartialShape{Dimension::dynamic()})); + axes.push_back( + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic()})); + axes.push_back( + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic()})); - axes.push_back(make_shared(element::u8, PartialShape{Dimension::dynamic()})); - axes.push_back(make_shared(element::u16, PartialShape{Dimension::dynamic()})); - axes.push_back(make_shared(element::u32, PartialShape{Dimension::dynamic()})); - axes.push_back(make_shared(element::u64, PartialShape{Dimension::dynamic()})); + axes.push_back( + make_shared(element::Type_t::u8, PartialShape{Dimension::dynamic()})); + axes.push_back( + make_shared(element::Type_t::u16, PartialShape{Dimension::dynamic()})); + axes.push_back( + make_shared(element::Type_t::u32, PartialShape{Dimension::dynamic()})); + axes.push_back( + make_shared(element::Type_t::u64, PartialShape{Dimension::dynamic()})); std::vector x_shapes{Shape{2, 3}, Shape{2, 3}, Shape{2, 2, 3}}; @@ -1348,7 +1356,7 @@ TEST(eval, eval_transpose) TEST(eval, max_pool_v1_dynamic) { Shape window_shape{3}; - auto A = make_shared(element::f32, PartialShape::dynamic()); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto f = make_shared( make_shared( A, Strides(), Shape(), Shape(), window_shape, op::RoundingType::FLOOR), @@ -1359,7 +1367,7 @@ TEST(eval, max_pool_v1_dynamic) {make_host_tensor( {1, 1, 14}, {0, 1, 0, 2, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{1, 1, 12})); auto cval = read_vector(result_tensor); vector out{1, 2, 2, 2, 3, 3, 3, 2, 2, 2, 2, 0}; @@ -1369,10 +1377,10 @@ TEST(eval, evaluate_static_scatter_elements_update_basic) { const Shape data_shape{3, 3}; const Shape indices_shape{2, 3}; - auto arg1 = make_shared(element::f32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::f32, indices_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::Type_t::f32, data_shape); + auto arg2 = make_shared(element::Type_t::i32, indices_shape); + auto arg3 = make_shared(element::Type_t::f32, indices_shape); + auto arg4 = make_shared(element::Type_t::i64, Shape{}); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); auto fun = make_shared(OutputVector{scatter_elements_update}, @@ -1386,7 +1394,7 @@ TEST(eval, evaluate_static_scatter_elements_update_basic) make_host_tensor(indices_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_shape(), (Shape{3, 3})); auto cval = read_vector(result_tensor); vector out{2.f, 1.1f, 0.0f, 1.f, 0.0f, 2.2f, 0.f, 2.1f, 1.2f}; @@ -1398,10 +1406,10 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_basic) const Shape data_shape{3, 3}; const Shape indices_shape{2, 3}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); @@ -1417,7 +1425,7 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_basic) {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); auto cval = read_vector(result_tensor); vector out{2.f, 1.1f, 0.0f, 1.f, 0.0f, 2.2f, 0.f, 2.1f, 1.2f}; @@ -1430,10 +1438,10 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_negative_axis) const Shape indices_shape{2, 3}; const Shape axis_shape{}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); @@ -1449,7 +1457,7 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_negative_axis) {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor(axis_shape, {-1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); auto cval = read_vector(result_tensor); vector out{1.1f, 1.0f, 1.2f, 2.0f, 2.2f, 2.1f, 0.0f, 0.0f, 0.0f}; @@ -1461,10 +1469,10 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_1d_axis) const Shape data_shape{3, 3}; const Shape indices_shape{2, 3}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); @@ -1480,7 +1488,7 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_1d_axis) {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor({1}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); auto cval = read_vector(result_tensor); vector out{2.f, 1.1f, 0.0f, 1.f, 0.0f, 2.2f, 0.f, 2.1f, 1.2f}; @@ -1493,10 +1501,10 @@ TEST(eval, DISABLED_evaluate_dynamic_scatter_elements_update_3d_i16) const Shape data_shape{3, 3, 3}; const Shape indices_shape{2, 2, 3}; - auto arg1 = make_shared(element::i16, PartialShape::dynamic()); - auto arg2 = make_shared(element::i16, PartialShape::dynamic()); - auto arg3 = make_shared(element::i16, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::i16, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i16, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::i16, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); @@ -1513,7 +1521,7 @@ TEST(eval, DISABLED_evaluate_dynamic_scatter_elements_update_3d_i16) indices_shape, {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}), make_host_tensor({}, {1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::i16); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::i16); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3, 3})); auto cval = read_vector(result_tensor); vector out{4, 2, 0, 1, 0, 6, 0, 5, 3, 10, 0, 12, 0, 11, @@ -1526,10 +1534,10 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_one_elem_i32) const Shape data_shape{3, 3, 3}; const Shape indices_shape{1, 1, 1}; - auto arg1 = make_shared(element::i32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::i32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_elements_update = make_shared(arg1, arg2, arg3, arg4); @@ -1544,7 +1552,7 @@ TEST(eval, evaluate_dynamic_scatter_elements_update_one_elem_i32) make_host_tensor(indices_shape, {2}), make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::i32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::i32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3, 3})); auto cval = read_vector(result_tensor); vector out{0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, @@ -1557,9 +1565,9 @@ TEST(eval, topk_v1) Shape shape{2, 3, 2}; Shape rshape{2, 2, 2}; - auto A = make_shared(element::f32, shape); - const auto k = op::Constant::create(element::i32, Shape{}, {2}); - auto B = make_shared(A, k, 1, "max", "index", element::i32); + auto A = make_shared(element::Type_t::f32, shape); + const auto k = op::Constant::create(element::Type_t::i32, Shape{}, {2}); + auto B = make_shared(A, k, 1, "max", "index", element::Type_t::i32); auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A}); @@ -1568,9 +1576,9 @@ TEST(eval, topk_v1) ASSERT_TRUE(fun->evaluate({result0, result1}, {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); auto result0_val = read_vector(result0); @@ -1587,9 +1595,9 @@ TEST(eval, topk_v1_dyn) { Shape shape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::u32, Shape{}); - auto B = make_shared(A, k, 1, "max", "index", element::i32); + auto A = make_shared(element::Type_t::f32, shape); + auto k = make_shared(element::Type_t::u32, Shape{}); + auto B = make_shared(A, k, 1, "max", "index", element::Type_t::i32); auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); @@ -1600,9 +1608,9 @@ TEST(eval, topk_v1_dyn) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {2})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1617,9 +1625,9 @@ TEST(eval, topk_v3_dyn) { Shape shape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::u32, Shape{}); - auto B = make_shared(A, k, 1, "max", "index", element::i32); + auto A = make_shared(element::Type_t::f32, shape); + auto k = make_shared(element::Type_t::u32, Shape{}); + auto B = make_shared(A, k, 1, "max", "index", element::Type_t::i32); auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); @@ -1630,9 +1638,9 @@ TEST(eval, topk_v3_dyn) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {2})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1647,9 +1655,9 @@ TEST(eval, topk_v3_dyn_values) { Shape shape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::u32, Shape{}); - auto B = make_shared(A, k, 1, "max", "value", element::i32); + auto A = make_shared(element::Type_t::f32, shape); + auto k = make_shared(element::Type_t::u32, Shape{}); + auto B = make_shared(A, k, 1, "max", "value", element::Type_t::i32); auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); @@ -1660,9 +1668,9 @@ TEST(eval, topk_v3_dyn_values) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {2})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1677,9 +1685,9 @@ TEST(eval, topk_v3_dyn_values_k0) { Shape shape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::u32, Shape{}); - auto B = make_shared(A, k, 1, "max", "value", element::i32); + auto A = make_shared(element::Type_t::f32, shape); + auto k = make_shared(element::Type_t::u32, Shape{}); + auto B = make_shared(A, k, 1, "max", "value", element::Type_t::i32); auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); @@ -1690,9 +1698,9 @@ TEST(eval, topk_v3_dyn_values_k0) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {0})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 3, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 3, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1707,10 +1715,10 @@ TEST(eval, topk_v1_dyn_k0) { Shape shape{2, 3, 2}; - auto A = make_shared(element::f32, shape); - auto k = make_shared(element::i64, Shape{}); + auto A = make_shared(element::Type_t::f32, shape); + auto k = make_shared(element::Type_t::i64, Shape{}); - element::Type result_et{element::i32}; + element::Type result_et{element::Type_t::i32}; auto B = make_shared( A, k, 1, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES, result_et); @@ -1723,9 +1731,9 @@ TEST(eval, topk_v1_dyn_k0) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {0})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 3, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 3, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1739,9 +1747,9 @@ TEST(eval, topk_v1_dyn_k0) TEST(eval, topk_v3_param_dyn_values_k0) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto k = make_shared(element::u32, Shape{}); - auto B = make_shared(A, k, 1, "max", "value", element::i32); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto k = make_shared(element::Type_t::u32, Shape{}); + auto B = make_shared(A, k, 1, "max", "value", element::Type_t::i32); auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); @@ -1752,9 +1760,9 @@ TEST(eval, topk_v3_param_dyn_values_k0) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {0})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 3, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 3, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1767,9 +1775,9 @@ TEST(eval, topk_v3_param_dyn_values_k0) TEST(eval, topk_v3_param_dyn_values_k2) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto k = make_shared(element::u32, Shape{}); - auto B = make_shared(A, k, 1, "max", "value", element::i32); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto k = make_shared(element::Type_t::u32, Shape{}); + auto B = make_shared(A, k, 1, "max", "value", element::Type_t::i32); auto fun = make_shared(OutputVector{B->output(0), B->output(1)}, ParameterVector{A, k}); @@ -1780,9 +1788,9 @@ TEST(eval, topk_v3_param_dyn_values_k2) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {2})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1795,11 +1803,11 @@ TEST(eval, topk_v3_param_dyn_values_k2) TEST(eval, topk_v1_param_dyn_k2) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto k = make_shared(element::i64, Shape{}); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto k = make_shared(element::Type_t::i64, Shape{}); auto axis = 1; - element::Type result_et{element::i32}; + element::Type result_et{element::Type_t::i32}; auto B = make_shared( A, k, axis, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES, result_et); @@ -1812,9 +1820,9 @@ TEST(eval, topk_v1_param_dyn_k2) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {2})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 2, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 2, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1828,10 +1836,10 @@ TEST(eval, topk_v1_param_dyn_k2) TEST(eval, topk_v1_param_dyn_k0) { - auto A = make_shared(element::f32, PartialShape::dynamic()); - auto k = make_shared(element::i64, Shape{}); + auto A = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto k = make_shared(element::Type_t::i64, Shape{}); - element::Type result_et{element::i32}; + element::Type result_et{element::Type_t::i32}; auto B = make_shared( A, k, 1, op::v1::TopK::Mode::MAX, op::v1::TopK::SortType::SORT_VALUES, result_et); @@ -1845,9 +1853,9 @@ TEST(eval, topk_v1_param_dyn_k0) {make_host_tensor( Shape{2, 3, 2}, {12, 2, 10, 9, 8, 4, 6, 1, 5, 3, 11, 7}), make_host_tensor(Shape{}, {0})})); - EXPECT_EQ(result0->get_element_type(), element::f32); + EXPECT_EQ(result0->get_element_type(), element::Type_t::f32); EXPECT_EQ(result0->get_partial_shape(), (PartialShape{2, 3, 2})); - EXPECT_EQ(result1->get_element_type(), element::i32); + EXPECT_EQ(result1->get_element_type(), element::Type_t::i32); EXPECT_EQ(result1->get_partial_shape(), (PartialShape{2, 3, 2})); auto result0_val = read_vector(result0); auto result1_val = read_vector(result1); @@ -1861,8 +1869,8 @@ TEST(eval, topk_v1_param_dyn_k0) TEST(eval, reduce_logical_and__neg_axis) { - const auto data = make_shared(element::boolean, Shape{2, 2, 2}); - const auto axes = make_shared(element::i64, Shape{}); + const auto data = make_shared(element::Type_t::boolean, Shape{2, 2, 2}); + const auto axes = make_shared(element::Type_t::i64, Shape{}); const auto op = make_shared(data, axes); @@ -1887,10 +1895,10 @@ TEST(eval, evaluate_static_scatter_update_basic_axes_indices_i32) const Shape indices_shape{1, 2}; const Shape updates_shape{1, 2, 3}; - auto arg1 = make_shared(element::f32, data_shape); - auto arg2 = make_shared(element::i32, indices_shape); - auto arg3 = make_shared(element::f32, updates_shape); - auto arg4 = make_shared(element::i32, Shape{}); + auto arg1 = make_shared(element::Type_t::f32, data_shape); + auto arg2 = make_shared(element::Type_t::i32, indices_shape); + auto arg3 = make_shared(element::Type_t::f32, updates_shape); + auto arg4 = make_shared(element::Type_t::i32, Shape{}); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); auto fun = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); @@ -1902,7 +1910,7 @@ TEST(eval, evaluate_static_scatter_update_basic_axes_indices_i32) make_host_tensor( updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_shape(), (Shape{3, 3})); auto cval = read_vector(result_tensor); vector out{0.f, 0.f, 0.f, 1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}; @@ -1915,10 +1923,10 @@ TEST(eval, evaluate_static_scatter_update_basic_axes_indices_i64) const Shape indices_shape{1, 2}; const Shape updates_shape{1, 2, 3}; - auto arg1 = make_shared(element::f32, data_shape); - auto arg2 = make_shared(element::i64, indices_shape); - auto arg3 = make_shared(element::f32, updates_shape); - auto arg4 = make_shared(element::i64, Shape{}); + auto arg1 = make_shared(element::Type_t::f32, data_shape); + auto arg2 = make_shared(element::Type_t::i64, indices_shape); + auto arg3 = make_shared(element::Type_t::f32, updates_shape); + auto arg4 = make_shared(element::Type_t::i64, Shape{}); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); auto fun = make_shared(OutputVector{scatter_update}, ParameterVector{arg1, arg2, arg3, arg4}); @@ -1930,7 +1938,7 @@ TEST(eval, evaluate_static_scatter_update_basic_axes_indices_i64) make_host_tensor( updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_shape(), (Shape{3, 3})); auto cval = read_vector(result_tensor); vector out{0.f, 0.f, 0.f, 1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}; @@ -1943,10 +1951,10 @@ TEST(eval, evaluate_dynamic_scatter_update_basic) const Shape indices_shape{1, 2}; const Shape updates_shape{1, 2, 3}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); auto fun = make_shared(OutputVector{scatter_update}, @@ -1960,7 +1968,7 @@ TEST(eval, evaluate_dynamic_scatter_update_basic) updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); auto cval = read_vector(result_tensor); vector out{0.f, 0.f, 0.f, 1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}; @@ -1974,10 +1982,10 @@ TEST(eval, evaluate_dynamic_scatter_update_negative_axis) const Shape updates_shape{3, 1, 2}; const Shape axis_shape{}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); auto fun = make_shared(OutputVector{scatter_update}, @@ -1991,7 +1999,7 @@ TEST(eval, evaluate_dynamic_scatter_update_negative_axis) updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor(axis_shape, {-1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); auto cval = read_vector(result_tensor); vector out{0.f, 1.0f, 1.1f, 0.0f, 1.2f, 2.0f, 0.0f, 2.1f, 2.2f}; @@ -2004,10 +2012,10 @@ TEST(eval, evaluate_dynamic_scatter_update_1d_axis) const Shape indices_shape{1, 2}; const Shape updates_shape{3, 1, 2}; - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::f32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); auto fun = make_shared(OutputVector{scatter_update}, @@ -2021,7 +2029,7 @@ TEST(eval, evaluate_dynamic_scatter_update_1d_axis) updates_shape, {1.0f, 1.1f, 1.2f, 2.0f, 2.1f, 2.2f}), make_host_tensor({1}, {1})})); - EXPECT_EQ(result_tensor->get_element_type(), element::f32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::f32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3})); auto cval = read_vector(result_tensor); vector out{0.f, 1.0f, 1.1f, 0.0f, 1.2f, 2.0f, 0.0f, 2.1f, 2.2f}; @@ -2034,10 +2042,10 @@ TEST(eval, evaluate_dynamic_scatter_update_one_elem_i32) const Shape indices_shape{1, 1}; const Shape updates_shape{1, 1, 3, 2}; - auto arg1 = make_shared(element::i32, PartialShape::dynamic()); - auto arg2 = make_shared(element::i32, PartialShape::dynamic()); - auto arg3 = make_shared(element::i32, PartialShape::dynamic()); - auto arg4 = make_shared(element::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg3 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg4 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto scatter_update = make_shared(arg1, arg2, arg3, arg4); auto fun = make_shared(OutputVector{scatter_update}, @@ -2051,7 +2059,7 @@ TEST(eval, evaluate_dynamic_scatter_update_one_elem_i32) make_host_tensor(updates_shape, {1, 2, 3, 4, 5, 6}), make_host_tensor({}, {0})})); - EXPECT_EQ(result_tensor->get_element_type(), element::i32); + EXPECT_EQ(result_tensor->get_element_type(), element::Type_t::i32); EXPECT_EQ(result_tensor->get_partial_shape(), (PartialShape{3, 3, 2})); auto cval = read_vector(result_tensor); vector out{0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0}; diff --git a/ngraph/test/graph_rewrite.cpp b/ngraph/test/graph_rewrite.cpp index 5cb3f5da222..5bfd26086b3 100644 --- a/ngraph/test/graph_rewrite.cpp +++ b/ngraph/test/graph_rewrite.cpp @@ -20,7 +20,7 @@ public: : MatcherPass() { auto divide = std::make_shared( - element::f32, Shape{}, pattern::has_class()); + element::Type_t::f32, Shape{}, pattern::has_class()); ngraph::graph_rewrite_callback callback = [this](pattern::Matcher& m) { if (m_transformation_callback(m.get_match_root())) { @@ -52,10 +52,10 @@ NGRAPH_RTTI_DEFINITION(Anchor, "Anchor", 0); std::shared_ptr get_function() { - auto data = - std::make_shared(ngraph::element::f32, ngraph::Shape{3, 1, 2}); + auto data = std::make_shared(ngraph::element::Type_t::f32, + ngraph::Shape{3, 1, 2}); auto divide_constant = - ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1.5}); + ngraph::opset3::Constant::create(ngraph::element::Type_t::f32, ngraph::Shape{1}, {1.5}); auto divide = std::make_shared(data, divide_constant); return std::make_shared(ngraph::NodeVector{divide}, ngraph::ParameterVector{data}); @@ -148,10 +148,10 @@ NGRAPH_RTTI_DEFINITION(PrivateDivide, "PrivateDivide", 0, ngraph::opset3::Divide std::shared_ptr get_derived_function() { - auto data = - std::make_shared(ngraph::element::f32, ngraph::Shape{3, 1, 2}); + auto data = std::make_shared(ngraph::element::Type_t::f32, + ngraph::Shape{3, 1, 2}); auto divide_constant = - ngraph::opset3::Constant::create(ngraph::element::f32, ngraph::Shape{1}, {1.5}); + ngraph::opset3::Constant::create(ngraph::element::Type_t::f32, ngraph::Shape{1}, {1.5}); auto divide = std::make_shared(data, divide_constant); return std::make_shared(ngraph::NodeVector{divide}, ngraph::ParameterVector{data}); @@ -177,7 +177,7 @@ public: auto divide = std::make_shared( std::make_shared(), std::make_shared()); - // element::f32, Shape{}, pattern::has_class()); + // element::Type_t::f32, Shape{}, pattern::has_class()); ngraph::graph_rewrite_callback callback = [this](pattern::Matcher& m) { if (m_transformation_callback(m.get_match_root())) { @@ -384,4 +384,4 @@ TEST(PassConfigTest, Test1) manager.run_passes(f); ASSERT_EQ(count_ops_of_type(f), 1); } -} \ No newline at end of file +} diff --git a/ngraph/test/input_output_assign.cpp b/ngraph/test/input_output_assign.cpp index 4dac79ae7a0..61c125bf5f8 100644 --- a/ngraph/test/input_output_assign.cpp +++ b/ngraph/test/input_output_assign.cpp @@ -28,7 +28,7 @@ using namespace ngraph; TEST(input_output, param_tensor) { // Params have no arguments, so we can check that the value becomes a tensor output - auto& et = element::f32; + element::Type et = element::Type_t::f32; Shape shape{2, 4}; auto param = make_shared(et, shape); @@ -39,8 +39,8 @@ TEST(input_output, param_tensor) TEST(input_output, simple_output) { - auto param_0 = make_shared(element::f32, Shape{2, 4}); - auto param_1 = make_shared(element::f32, Shape{2, 4}); + auto param_0 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); auto add = make_shared(param_0, param_1); // Sort the ops diff --git a/ngraph/test/matcher_pass.cpp b/ngraph/test/matcher_pass.cpp index be2dadfed1d..1bba9331bb0 100644 --- a/ngraph/test/matcher_pass.cpp +++ b/ngraph/test/matcher_pass.cpp @@ -74,7 +74,7 @@ TEST(pattern, matcher_pass) { { TestMatcherPass test_matcher; - auto a = make_shared(element::f32, Shape{1}); + auto a = make_shared(element::Type_t::f32, Shape{1}); auto b = make_shared(a); auto c = make_shared(b); auto f = std::make_shared(ngraph::NodeVector{c}, ParameterVector{a}); @@ -92,7 +92,7 @@ TEST(pattern, matcher_pass) { TestMatcherPass test_matcher; - auto a = make_shared(element::f32, Shape{1}); + auto a = make_shared(element::Type_t::f32, Shape{1}); auto b = make_shared(a); auto c = make_shared(b); auto f = std::make_shared(ngraph::NodeVector{b, c}, ParameterVector{a}); @@ -103,7 +103,7 @@ TEST(pattern, matcher_pass) { std::shared_ptr f; { - auto a = make_shared(element::f32, Shape{1}); + auto a = make_shared(element::Type_t::f32, Shape{1}); auto b = make_shared(a); auto c = make_shared(b); auto d = make_shared(c); @@ -117,4 +117,4 @@ TEST(pattern, matcher_pass) // Parameter->Relu->Result ASSERT_TRUE(f->get_ops().size() == 3); } -} \ No newline at end of file +} diff --git a/ngraph/test/node_input_output.cpp b/ngraph/test/node_input_output.cpp index fdcc98d3ff5..4104e681667 100644 --- a/ngraph/test/node_input_output.cpp +++ b/ngraph/test/node_input_output.cpp @@ -30,8 +30,8 @@ using namespace std; TEST(node_input_output, input_create) { - auto x = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto y = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto x = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto y = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto add = make_shared(x, y); auto add_in_0 = add->input(0); @@ -39,14 +39,14 @@ TEST(node_input_output, input_create) EXPECT_EQ(add_in_0.get_node(), add.get()); EXPECT_EQ(add_in_0.get_index(), 0); - EXPECT_EQ(add_in_0.get_element_type(), element::f32); + EXPECT_EQ(add_in_0.get_element_type(), element::Type_t::f32); EXPECT_EQ(add_in_0.get_shape(), (Shape{1, 2, 3, 4})); EXPECT_TRUE(add_in_0.get_partial_shape().same_scheme(PartialShape{1, 2, 3, 4})); EXPECT_EQ(add_in_0.get_source_output(), Output(x, 0)); EXPECT_EQ(add_in_1.get_node(), add.get()); EXPECT_EQ(add_in_1.get_index(), 1); - EXPECT_EQ(add_in_1.get_element_type(), element::f32); + EXPECT_EQ(add_in_1.get_element_type(), element::Type_t::f32); EXPECT_EQ(add_in_1.get_shape(), (Shape{1, 2, 3, 4})); EXPECT_TRUE(add_in_1.get_partial_shape().same_scheme(PartialShape{1, 2, 3, 4})); EXPECT_EQ(add_in_1.get_source_output(), Output(y, 0)); @@ -56,8 +56,8 @@ TEST(node_input_output, input_create) TEST(node_input_output, input_create_const) { - auto x = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto y = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto x = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto y = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto add = make_shared(x, y); auto add_in_0 = add->input(0); @@ -65,14 +65,14 @@ TEST(node_input_output, input_create_const) EXPECT_EQ(add_in_0.get_node(), add.get()); EXPECT_EQ(add_in_0.get_index(), 0); - EXPECT_EQ(add_in_0.get_element_type(), element::f32); + EXPECT_EQ(add_in_0.get_element_type(), element::Type_t::f32); EXPECT_EQ(add_in_0.get_shape(), (Shape{1, 2, 3, 4})); EXPECT_TRUE(add_in_0.get_partial_shape().same_scheme(PartialShape{1, 2, 3, 4})); EXPECT_EQ(add_in_0.get_source_output(), Output(x, 0)); EXPECT_EQ(add_in_1.get_node(), add.get()); EXPECT_EQ(add_in_1.get_index(), 1); - EXPECT_EQ(add_in_1.get_element_type(), element::f32); + EXPECT_EQ(add_in_1.get_element_type(), element::Type_t::f32); EXPECT_EQ(add_in_1.get_shape(), (Shape{1, 2, 3, 4})); EXPECT_TRUE(add_in_1.get_partial_shape().same_scheme(PartialShape{1, 2, 3, 4})); EXPECT_EQ(add_in_1.get_source_output(), Output(y, 0)); @@ -82,15 +82,15 @@ TEST(node_input_output, input_create_const) TEST(node_input_output, output_create) { - auto x = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto y = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto x = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto y = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto add = make_shared(x, y); auto add_out_0 = add->output(0); EXPECT_EQ(add_out_0.get_node(), add.get()); EXPECT_EQ(add_out_0.get_index(), 0); - EXPECT_EQ(add_out_0.get_element_type(), element::f32); + EXPECT_EQ(add_out_0.get_element_type(), element::Type_t::f32); EXPECT_EQ(add_out_0.get_shape(), (Shape{1, 2, 3, 4})); EXPECT_TRUE(add_out_0.get_partial_shape().same_scheme(PartialShape{1, 2, 3, 4})); @@ -99,15 +99,15 @@ TEST(node_input_output, output_create) TEST(node_input_output, output_create_const) { - auto x = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto y = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto x = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto y = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto add = make_shared(x, y); auto add_out_0 = add->output(0); EXPECT_EQ(add_out_0.get_node(), add.get()); EXPECT_EQ(add_out_0.get_index(), 0); - EXPECT_EQ(add_out_0.get_element_type(), element::f32); + EXPECT_EQ(add_out_0.get_element_type(), element::Type_t::f32); EXPECT_EQ(add_out_0.get_shape(), (Shape{1, 2, 3, 4})); EXPECT_TRUE(add_out_0.get_partial_shape().same_scheme(PartialShape{1, 2, 3, 4})); diff --git a/ngraph/test/onnx/onnx_import.in.cpp b/ngraph/test/onnx/onnx_import.in.cpp index c52b3717ae5..2d412e58acc 100644 --- a/ngraph/test/onnx/onnx_import.in.cpp +++ b/ngraph/test/onnx/onnx_import.in.cpp @@ -426,7 +426,7 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_model_missing_input) "TestMissingIn", 1, "com.intel.ai", [](const onnx_import::Node& node) -> OutputVector { OutputVector ng_inputs{node.get_ng_inputs()}; std::shared_ptr result = std::make_shared( - element::f32, ngraph::Shape{2, 2}, std::vector{1, 1, 1, 1}); + element::Type_t::f32, ngraph::Shape{2, 2}, std::vector{1, 1, 1, 1}); for (const auto& ng_input : ng_inputs) { diff --git a/ngraph/test/onnx/onnx_import_controlflow.in.cpp b/ngraph/test/onnx/onnx_import_controlflow.in.cpp index 827c5b4d716..be3168b40df 100644 --- a/ngraph/test/onnx/onnx_import_controlflow.in.cpp +++ b/ngraph/test/onnx/onnx_import_controlflow.in.cpp @@ -49,16 +49,16 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_controlflow_loop_2d_add) // Shape inference tests const auto& parameters = function->get_parameters(); EXPECT_EQ(parameters.size(), 1); - EXPECT_EQ(parameters.at(0)->get_element_type(), ngraph::element::f32); + EXPECT_EQ(parameters.at(0)->get_element_type(), ngraph::element::Type_t::f32); EXPECT_TRUE(parameters.at(0)->get_partial_shape().is_static()); EXPECT_EQ(parameters.at(0)->get_partial_shape().to_shape(), (Shape{1, 2})); const auto& results = function->get_results(); EXPECT_EQ(results.size(), 2); - EXPECT_EQ(function->get_output_element_type(0), ngraph::element::f32); + EXPECT_EQ(function->get_output_element_type(0), ngraph::element::Type_t::f32); EXPECT_TRUE(function->get_output_partial_shape(0).is_static()); EXPECT_EQ(function->get_output_shape(0), (Shape{1, 2})); - EXPECT_EQ(function->get_output_element_type(1), ngraph::element::f32); + EXPECT_EQ(function->get_output_element_type(1), ngraph::element::Type_t::f32); EXPECT_TRUE(function->get_output_partial_shape(1).is_static()); EXPECT_EQ(function->get_output_shape(1), (Shape{3, 2})); @@ -375,10 +375,10 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_controlflow_loop_2d_trip_count_and_cond_skippe const auto& results = function->get_results(); EXPECT_EQ(results.size(), 2); - EXPECT_EQ(function->get_output_element_type(0), ngraph::element::f32); + EXPECT_EQ(function->get_output_element_type(0), ngraph::element::Type_t::f32); EXPECT_TRUE(function->get_output_partial_shape(0).is_static()); EXPECT_EQ(function->get_output_shape(0), (Shape{1, 2})); - EXPECT_EQ(function->get_output_element_type(1), ngraph::element::f32); + EXPECT_EQ(function->get_output_element_type(1), ngraph::element::Type_t::f32); // scan_outputs shape is not know if trip_count and termination condition is not determined EXPECT_TRUE(function->get_output_partial_shape(1).rank().is_dynamic()); } diff --git a/ngraph/test/op.cpp b/ngraph/test/op.cpp index 96e5e8d53da..380b177125d 100644 --- a/ngraph/test/op.cpp +++ b/ngraph/test/op.cpp @@ -33,14 +33,14 @@ using namespace ngraph; TEST(op, is_op) { - auto arg0 = make_shared(element::f32, Shape{1}); + auto arg0 = make_shared(element::Type_t::f32, Shape{1}); ASSERT_NE(nullptr, arg0); EXPECT_TRUE(op::is_parameter(arg0)); } TEST(op, is_parameter) { - auto arg0 = make_shared(element::f32, Shape{1}); + auto arg0 = make_shared(element::Type_t::f32, Shape{1}); ASSERT_NE(nullptr, arg0); auto t0 = make_shared(arg0, arg0); ASSERT_NE(nullptr, t0); @@ -49,7 +49,7 @@ TEST(op, is_parameter) TEST(op, provenance_tag) { - auto node = make_shared(element::f32, Shape{1}); + auto node = make_shared(element::Type_t::f32, Shape{1}); auto tag1 = "parameter node"; auto tag2 = "f32 node"; node->add_provenance_tag(tag1); @@ -104,7 +104,7 @@ TEST(op, variant) EXPECT_EQ(ship.x, 3); EXPECT_EQ(ship.y, 4); - auto node = make_shared(element::f32, Shape{1}); + auto node = make_shared(element::Type_t::f32, Shape{1}); node->get_rt_info()["A"] = var_ship; auto node_var_ship = node->get_rt_info().at("A"); ASSERT_TRUE((is_type>(node_var_ship))); diff --git a/ngraph/test/op_eval/floor_mod.cpp b/ngraph/test/op_eval/floor_mod.cpp index 2b2ad9a57df..8d1c3c765f9 100644 --- a/ngraph/test/op_eval/floor_mod.cpp +++ b/ngraph/test/op_eval/floor_mod.cpp @@ -30,8 +30,8 @@ using namespace ngraph; TEST(op_eval, floor_mod) { - auto a = make_shared(element::f32, Shape{4}); - auto b = make_shared(element::f32, Shape{4}); + auto a = make_shared(element::Type_t::f32, Shape{4}); + auto b = make_shared(element::Type_t::f32, Shape{4}); auto floor_mod = make_shared(a, b); auto fun = make_shared(OutputVector{floor_mod}, ParameterVector{a, b}); @@ -43,7 +43,7 @@ TEST(op_eval, floor_mod) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{4}, a_value), make_host_tensor(Shape{4}, b_value)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{4}); auto result_data = read_vector(result); for (size_t i = 0; i < expected_result.size(); i++) diff --git a/ngraph/test/op_eval/hsigmoid.cpp b/ngraph/test/op_eval/hsigmoid.cpp index 58e67e8baa3..17763841d13 100644 --- a/ngraph/test/op_eval/hsigmoid.cpp +++ b/ngraph/test/op_eval/hsigmoid.cpp @@ -30,7 +30,7 @@ using namespace ngraph; TEST(op_eval, hsigmoid) { - auto p = make_shared(element::f32, Shape{3}); + auto p = make_shared(element::Type_t::f32, Shape{3}); auto swish = make_shared(p); auto fun = make_shared(OutputVector{swish}, ParameterVector{p}); @@ -40,7 +40,7 @@ TEST(op_eval, hsigmoid) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{3}, inputs)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{3}); auto result_data = read_vector(result); for (auto i = 0; i < inputs.size(); i++) diff --git a/ngraph/test/op_eval/hswish.cpp b/ngraph/test/op_eval/hswish.cpp index 1de6087f5f4..3091b208a25 100644 --- a/ngraph/test/op_eval/hswish.cpp +++ b/ngraph/test/op_eval/hswish.cpp @@ -30,7 +30,7 @@ using namespace ngraph; TEST(op_eval, hswish) { - auto p = make_shared(element::f32, Shape{3}); + auto p = make_shared(element::Type_t::f32, Shape{3}); auto swish = make_shared(p); auto fun = make_shared(OutputVector{swish}, ParameterVector{p}); @@ -40,7 +40,7 @@ TEST(op_eval, hswish) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{3}, inputs)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{3}); auto result_data = read_vector(result); for (auto i = 0; i < inputs.size(); i++) diff --git a/ngraph/test/op_eval/interpolate.cpp b/ngraph/test/op_eval/interpolate.cpp index 239bcacbaaa..aaa637341f8 100644 --- a/ngraph/test/op_eval/interpolate.cpp +++ b/ngraph/test/op_eval/interpolate.cpp @@ -165,11 +165,11 @@ TEST(op_eval, interpolate_v4_cubic) std::size_t i = 0; for (const auto& s : shapes_and_attrs) { - auto image = std::make_shared(element::f32, data_shape); + auto image = std::make_shared(element::Type_t::f32, data_shape); auto target_spatial_shape = - op::Constant::create(element::i64, Shape{2}, s.spatial_shape); - auto scales = op::Constant::create(element::f32, Shape{2}, s.scales_data); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + op::Constant::create(element::Type_t::i64, Shape{2}, s.spatial_shape); + auto scales = op::Constant::create(element::Type_t::f32, Shape{2}, s.scales_data); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::cubic; @@ -187,7 +187,7 @@ TEST(op_eval, interpolate_v4_cubic) auto result = std::make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(data_shape, input_data)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), s.out_shape); auto result_vector = read_vector(result); std::size_t num_of_elems = shape_size(s.out_shape); @@ -377,11 +377,11 @@ TEST(op_eval, interpolate_v4_nearest) std::size_t i = 0; for (const auto& s : shapes_and_attrs) { - auto image = std::make_shared(element::f32, s.input_data_shape); + auto image = std::make_shared(element::Type_t::f32, s.input_data_shape); auto target_spatial_shape = - op::Constant::create(element::i64, Shape{2}, s.spatial_shape); - auto scales = op::Constant::create(element::f32, Shape{2}, s.scales_data); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + op::Constant::create(element::Type_t::i64, Shape{2}, s.spatial_shape); + auto scales = op::Constant::create(element::Type_t::f32, Shape{2}, s.scales_data); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::nearest; @@ -400,7 +400,7 @@ TEST(op_eval, interpolate_v4_nearest) ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(s.input_data_shape, input_data_list[i])})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), s.out_shape); auto result_vector = read_vector(result); std::size_t num_of_elems = shape_size(s.out_shape); @@ -523,11 +523,11 @@ TEST(op_eval, interpolate_v4_linear_onnx) std::size_t i = 0; for (const auto& s : shapes_and_attrs) { - auto image = std::make_shared(element::f32, s.input_data_shape); + auto image = std::make_shared(element::Type_t::f32, s.input_data_shape); auto target_spatial_shape = - op::Constant::create(element::i64, Shape{2}, s.spatial_shape); - auto scales = op::Constant::create(element::f32, Shape{2}, s.scales_data); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + op::Constant::create(element::Type_t::i64, Shape{2}, s.spatial_shape); + auto scales = op::Constant::create(element::Type_t::f32, Shape{2}, s.scales_data); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::linear_onnx; @@ -546,7 +546,7 @@ TEST(op_eval, interpolate_v4_linear_onnx) ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(s.input_data_shape, input_data_list[i])})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), s.out_shape); auto result_vector = read_vector(result); std::size_t num_of_elems = shape_size(s.out_shape); diff --git a/ngraph/test/op_eval/matmul.cpp b/ngraph/test/op_eval/matmul.cpp index b74c02a8299..265fdc96cd2 100644 --- a/ngraph/test/op_eval/matmul.cpp +++ b/ngraph/test/op_eval/matmul.cpp @@ -28,8 +28,8 @@ using namespace ngraph; TEST(op_eval, matmul_dynamic_1D_arg) { - auto arg0 = make_shared(element::i32, PartialShape::dynamic()); - auto arg1 = make_shared(element::i32, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::i32, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, false, false); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -59,8 +59,8 @@ TEST(op_eval, matmul_dynamic_1D_arg) TEST(op_eval, matmul_dynamic_0_elem_arg) { - auto arg0 = make_shared(element::f32, PartialShape::dynamic()); - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, false, false); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -85,8 +85,8 @@ TEST(op_eval, matmul_dynamic_0_elem_arg) TEST(op_eval, matmul_dynamic_2D_args) { - auto arg0 = make_shared(element::f32, PartialShape::dynamic()); - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, false, false); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -107,8 +107,8 @@ TEST(op_eval, matmul_dynamic_2D_args) TEST(op_eval, matmul_dynamic_2D_transpose0) { - auto arg0 = make_shared(element::f32, PartialShape::dynamic()); - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, true, false); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -128,8 +128,8 @@ TEST(op_eval, matmul_dynamic_2D_transpose0) TEST(op_eval, matmul_dynamic_2D_transpose1) { - auto arg0 = make_shared(element::f32, PartialShape::dynamic()); - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, false, true); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -149,8 +149,8 @@ TEST(op_eval, matmul_dynamic_2D_transpose1) TEST(op_eval, matmul_dynamic_same_batch_size) { - auto arg0 = make_shared(element::f32, PartialShape::dynamic()); - auto arg1 = make_shared(element::f32, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, false, false); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -177,8 +177,8 @@ TEST(op_eval, matmul_dynamic_same_batch_size) TEST(op_eval, matmul_dynamic_broadcast) { - auto arg0 = make_shared(element::i64, PartialShape::dynamic()); - auto arg1 = make_shared(element::i64, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, false, false); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -229,8 +229,8 @@ TEST(op_eval, matmul_dynamic_broadcast) TEST(op_eval, matmul_dynamic_broadcast_transpose0) { - auto arg0 = make_shared(element::i64, PartialShape::dynamic()); - auto arg1 = make_shared(element::i64, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, true, false); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); @@ -265,8 +265,8 @@ TEST(op_eval, matmul_dynamic_broadcast_transpose0) TEST(op_eval, matmul_dynamic_broadcast_transpose1) { - auto arg0 = make_shared(element::i64, PartialShape::dynamic()); - auto arg1 = make_shared(element::i64, PartialShape::dynamic()); + auto arg0 = make_shared(element::Type_t::i64, PartialShape::dynamic()); + auto arg1 = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto matmul = make_shared(arg0, arg1, false, true); auto fun = make_shared(OutputVector{matmul}, ParameterVector{arg0, arg1}); diff --git a/ngraph/test/op_eval/mish.cpp b/ngraph/test/op_eval/mish.cpp index acc81f0e95f..2fb4251d155 100644 --- a/ngraph/test/op_eval/mish.cpp +++ b/ngraph/test/op_eval/mish.cpp @@ -31,7 +31,7 @@ using namespace ngraph; TEST(op_eval, mish_0D) { - auto p = make_shared(element::f32, Shape{}); + auto p = make_shared(element::Type_t::f32, Shape{}); auto mish = make_shared(p); auto fun = make_shared(OutputVector{mish}, ParameterVector{p}); @@ -43,7 +43,7 @@ TEST(op_eval, mish_0D) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{}, inputs[i])})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), (Shape{})); auto result_data = read_vector(result); EXPECT_NEAR(result_data[0], expected_result[i][0], 0.000001); diff --git a/ngraph/test/op_eval/non_zero.cpp b/ngraph/test/op_eval/non_zero.cpp index 52f6aa6b607..1d56e80bac0 100644 --- a/ngraph/test/op_eval/non_zero.cpp +++ b/ngraph/test/op_eval/non_zero.cpp @@ -31,8 +31,8 @@ using namespace ngraph; TEST(op_eval, non_zero_0D) { - auto p = make_shared(element::i32, Shape{}); - auto non_zero = make_shared(p, element::i64); + auto p = make_shared(element::Type_t::i32, Shape{}); + auto non_zero = make_shared(p, element::Type_t::i64); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); std::vector> inputs{{-1}, {1}, {20}}; @@ -43,7 +43,7 @@ TEST(op_eval, non_zero_0D) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{}, inputs[i])})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), (Shape{1, 1})); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result[i]); @@ -52,13 +52,13 @@ TEST(op_eval, non_zero_0D) TEST(op_eval, non_zero_0D_0) { - auto p = make_shared(element::i32, Shape{}); - auto non_zero = make_shared(p, element::i64); + auto p = make_shared(element::Type_t::i32, Shape{}); + auto non_zero = make_shared(p, element::Type_t::i64); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{}, {0})})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), (Shape{0, 0})); auto result_data = read_vector(result); ASSERT_EQ(result_data.data(), nullptr); @@ -67,8 +67,8 @@ TEST(op_eval, non_zero_0D_0) TEST(op_eval, non_zero_1D) { Shape p_shape{5}; - auto p = make_shared(element::f32, p_shape); - auto non_zero = make_shared(p, element::i32); + auto p = make_shared(element::Type_t::f32, p_shape); + auto non_zero = make_shared(p, element::Type_t::i32); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); std::vector> inputs{ {1.0, 0, 3.0, 4.0, 0}, {0, 0, 0, 1.0, 3.2}, {1.0, 1.0, 1.0, 1.0, 1.0}}; @@ -79,7 +79,7 @@ TEST(op_eval, non_zero_1D) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(p_shape, inputs[i])})); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); EXPECT_EQ(result->get_shape(), expected_output_shape[i]); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result[i]); @@ -89,14 +89,14 @@ TEST(op_eval, non_zero_1D) TEST(op_eval, non_zero_1D_0s) { Shape p_shape{5}; - auto p = make_shared(element::f32, p_shape); - auto non_zero = make_shared(p, element::i64); + auto p = make_shared(element::Type_t::f32, p_shape); + auto non_zero = make_shared(p, element::Type_t::i64); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); std::vector input(shape_size(p_shape), 0); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(p_shape, input)})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), (Shape{1, 0})); auto result_data = read_vector(result); ASSERT_EQ(result_data.data(), nullptr); @@ -105,7 +105,7 @@ TEST(op_eval, non_zero_1D_0s) TEST(op_eval, non_zero_2D) { Shape p_shape{3, 2}; - auto p = make_shared(element::i32, p_shape); + auto p = make_shared(element::Type_t::i32, p_shape); auto non_zero = make_shared(p); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); std::vector> inputs{ @@ -118,7 +118,7 @@ TEST(op_eval, non_zero_2D) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(p_shape, inputs[i])})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_output_shape[i]); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result[i]); @@ -128,8 +128,8 @@ TEST(op_eval, non_zero_2D) TEST(op_eval, non_zero_3D) { Shape p_shape{3, 2, 2}; - auto p = make_shared(element::i64, p_shape); - auto non_zero = make_shared(p, element::i32); + auto p = make_shared(element::Type_t::i64, p_shape); + auto non_zero = make_shared(p, element::Type_t::i32); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); std::vector> inputs{{1, 0, 3, 4, 0, 1, 0, 0, 1, 3, 5, 0}, {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}}; @@ -143,7 +143,7 @@ TEST(op_eval, non_zero_3D) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(p_shape, inputs[i])})); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); EXPECT_EQ(result->get_shape(), expected_output_shape[i]); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result[i]); @@ -153,14 +153,14 @@ TEST(op_eval, non_zero_3D) TEST(op_eval, non_zero_3D_0s) { Shape p_shape{3, 2, 2}; - auto p = make_shared(element::i64, p_shape); - auto non_zero = make_shared(p, element::i32); + auto p = make_shared(element::Type_t::i64, p_shape); + auto non_zero = make_shared(p, element::Type_t::i32); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); std::vector input(shape_size(p_shape), 0); auto result = make_shared(); ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(p_shape, input)})); - EXPECT_EQ(result->get_element_type(), element::i32); + EXPECT_EQ(result->get_element_type(), element::Type_t::i32); EXPECT_EQ(result->get_shape(), (Shape{p_shape.size(), 0})); auto result_data = read_vector(result); ASSERT_EQ(result_data.data(), nullptr); @@ -169,7 +169,7 @@ TEST(op_eval, non_zero_3D_0s) TEST(op_eval, non_zero_dynamic) { PartialShape p_shape = PartialShape::dynamic(); - auto p = make_shared(element::i32, p_shape); + auto p = make_shared(element::Type_t::i32, p_shape); auto non_zero = make_shared(p); auto fun = make_shared(OutputVector{non_zero}, ParameterVector{p}); std::vector> inputs{ @@ -182,7 +182,7 @@ TEST(op_eval, non_zero_dynamic) auto result = make_shared(); ASSERT_TRUE(fun->evaluate( {result}, {make_host_tensor(input_shapes[i], inputs[i])})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_output_shape[i]); auto result_data = read_vector(result); ASSERT_EQ(result_data, expected_result[i]); diff --git a/ngraph/test/op_eval/reduce_l1.cpp b/ngraph/test/op_eval/reduce_l1.cpp index 544b31fc447..ed494971783 100644 --- a/ngraph/test/op_eval/reduce_l1.cpp +++ b/ngraph/test/op_eval/reduce_l1.cpp @@ -30,8 +30,8 @@ using namespace ngraph; TEST(op_eval, reduce_l1_one_axis_keep_dims) { - auto data = make_shared(element::f32, Shape{3, 2, 2}); - auto axes = opset4::Constant::create(element::i32, Shape{1}, {2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 2, 2}); + auto axes = opset4::Constant::create(element::Type_t::i32, Shape{1}, {2}); auto reduce = make_shared(data, axes, true); auto fun = make_shared(OutputVector{reduce}, ParameterVector{data}); @@ -42,7 +42,7 @@ TEST(op_eval, reduce_l1_one_axis_keep_dims) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3, 2, 2}, inputs), make_host_tensor(Shape{1}, {2})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{std::vector({3, 2, 1})}); auto result_data = read_vector(result); for (auto i = 0; i < expected_result.size(); i++) @@ -51,8 +51,8 @@ TEST(op_eval, reduce_l1_one_axis_keep_dims) TEST(op_eval, reduce_l1_one_axis_do_not_keep_dims) { - auto data = make_shared(element::f32, Shape{3, 2, 2}); - auto axes = opset4::Constant::create(element::i32, Shape{1}, {2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 2, 2}); + auto axes = opset4::Constant::create(element::Type_t::i32, Shape{1}, {2}); auto reduce = make_shared(data, axes, false); auto fun = make_shared(OutputVector{reduce}, ParameterVector{data}); @@ -63,7 +63,7 @@ TEST(op_eval, reduce_l1_one_axis_do_not_keep_dims) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3, 2, 2}, inputs), make_host_tensor(Shape{1}, {2})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{std::vector({3, 2})}); auto result_data = read_vector(result); for (auto i = 0; i < expected_result.size(); i++) diff --git a/ngraph/test/op_eval/reduce_l2.cpp b/ngraph/test/op_eval/reduce_l2.cpp index d79bf067b7d..d718e1a3685 100644 --- a/ngraph/test/op_eval/reduce_l2.cpp +++ b/ngraph/test/op_eval/reduce_l2.cpp @@ -30,8 +30,8 @@ using namespace ngraph; TEST(op_eval, reduce_l2_one_axis_keep_dims) { - auto data = make_shared(element::f32, Shape{3, 2, 2}); - auto axes = opset4::Constant::create(element::i32, Shape{1}, {2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 2, 2}); + auto axes = opset4::Constant::create(element::Type_t::i32, Shape{1}, {2}); auto reduce = make_shared(data, axes, true); auto fun = make_shared(OutputVector{reduce}, ParameterVector{data}); @@ -43,7 +43,7 @@ TEST(op_eval, reduce_l2_one_axis_keep_dims) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3, 2, 2}, inputs), make_host_tensor(Shape{1}, {2})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{std::vector({3, 2, 1})}); auto result_data = read_vector(result); for (auto i = 0; i < expected_result.size(); i++) @@ -52,8 +52,8 @@ TEST(op_eval, reduce_l2_one_axis_keep_dims) TEST(op_eval, reduce_l2_one_axis_do_not_keep_dims) { - auto data = make_shared(element::f32, Shape{3, 2, 2}); - auto axes = opset4::Constant::create(element::i32, Shape{1}, {2}); + auto data = make_shared(element::Type_t::f32, Shape{3, 2, 2}); + auto axes = opset4::Constant::create(element::Type_t::i32, Shape{1}, {2}); auto reduce = make_shared(data, axes, false); auto fun = make_shared(OutputVector{reduce}, ParameterVector{data}); @@ -65,7 +65,7 @@ TEST(op_eval, reduce_l2_one_axis_do_not_keep_dims) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3, 2, 2}, inputs), make_host_tensor(Shape{1}, {2})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{std::vector({3, 2})}); auto result_data = read_vector(result); for (auto i = 0; i < expected_result.size(); i++) diff --git a/ngraph/test/op_eval/roi_align.cpp b/ngraph/test/op_eval/roi_align.cpp index 3e43e1f810d..9e556671a40 100644 --- a/ngraph/test/op_eval/roi_align.cpp +++ b/ngraph/test/op_eval/roi_align.cpp @@ -42,9 +42,9 @@ TEST(op_eval, roi_align_avg_pool) const auto data_shape = Shape{N, C, H, W}; const auto rois_shape = Shape{num_rois, 4}; - const auto data = make_shared(element::f32, data_shape); - const auto rois = make_shared(element::f32, rois_shape); - const auto batch_indices = make_shared(element::i32, Shape{num_rois}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto rois = make_shared(element::Type_t::f32, rois_shape); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{num_rois}); auto roi_align = make_shared( data, rois, batch_indices, pooled_height, pooled_width, 2, 1.0f / 16.0f, "avg"); @@ -93,7 +93,7 @@ TEST(op_eval, roi_align_avg_pool) 56.8021f, 58.4375f, 58.4375f, 58.4375f, 58.4688f, 60.1042f, 60.1042f, 60.1042f, 60.1354f}; const auto expected_shape = Shape{num_rois, C, pooled_height, pooled_width}; - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), expected_shape); ASSERT_TRUE(test::all_close_f(read_vector(result), expected_vec, 6, 0.001)); } @@ -109,9 +109,9 @@ TEST(op_eval, roi_align_max_pool) const auto data_shape = Shape{N, C, H, W}; const auto rois_shape = Shape{num_rois, 4}; - const auto data = make_shared(element::f32, data_shape); - const auto rois = make_shared(element::f32, rois_shape); - const auto batch_indices = make_shared(element::i32, Shape{num_rois}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto rois = make_shared(element::Type_t::f32, rois_shape); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{num_rois}); auto roi_align = make_shared( data, rois, batch_indices, pooled_height, pooled_width, 2, 1.0f / 16.0f, "max"); @@ -160,7 +160,7 @@ TEST(op_eval, roi_align_max_pool) 40.1042f, 46.25f, 46.25f, 46.25f, 46.25f, 56.25f, 56.25f, 56.25f, 56.25f}; const auto expected_shape = Shape{num_rois, C, pooled_height, pooled_width}; - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), expected_shape); ASSERT_TRUE(test::all_close_f(read_vector(result), expected_vec, 6, 0.001)); -} \ No newline at end of file +} diff --git a/ngraph/test/op_eval/roi_pooling.cpp b/ngraph/test/op_eval/roi_pooling.cpp index c7057e866c7..00ed56cc876 100644 --- a/ngraph/test/op_eval/roi_pooling.cpp +++ b/ngraph/test/op_eval/roi_pooling.cpp @@ -43,8 +43,8 @@ NGRAPH_TEST(op_eval, roi_pooling_invalid_roi_batch_id) Shape pooled_shape{pooled_h, pooled_w}; Shape output_shape{num_rois, channels, pooled_h, pooled_w}; - const auto feat_maps = make_shared(element::f32, feat_maps_shape); - const auto rois = make_shared(element::f32, rois_shape); + const auto feat_maps = make_shared(element::Type_t::f32, feat_maps_shape); + const auto rois = make_shared(element::Type_t::f32, rois_shape); const auto roi_pooling = make_shared(feat_maps, rois, pooled_shape, spatial_scale, "max"); const auto f = make_shared(roi_pooling, ParameterVector{feat_maps, rois}); diff --git a/ngraph/test/op_eval/round.cpp b/ngraph/test/op_eval/round.cpp index e9807aa8047..a933b74ce3e 100644 --- a/ngraph/test/op_eval/round.cpp +++ b/ngraph/test/op_eval/round.cpp @@ -30,7 +30,7 @@ using namespace ngraph; TEST(op_eval, rounding_to_even) { - auto p = make_shared(element::f32, Shape{9}); + auto p = make_shared(element::Type_t::f32, Shape{9}); auto round = make_shared(p, op::v5::Round::RoundMode::HALF_TO_EVEN); auto fun = make_shared(OutputVector{round}, ParameterVector{p}); @@ -40,7 +40,7 @@ TEST(op_eval, rounding_to_even) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{9}, inputs)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{9}); auto result_data = read_vector(result); for (auto i = 0; i < inputs.size(); i++) @@ -49,7 +49,7 @@ TEST(op_eval, rounding_to_even) TEST(op_eval, rounding_away) { - auto p = make_shared(element::f32, Shape{9}); + auto p = make_shared(element::Type_t::f32, Shape{9}); auto round = make_shared(p, op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO); auto fun = make_shared(OutputVector{round}, ParameterVector{p}); @@ -59,7 +59,7 @@ TEST(op_eval, rounding_away) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{9}, inputs)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{9}); auto result_data = read_vector(result); for (auto i = 0; i < inputs.size(); i++) diff --git a/ngraph/test/op_eval/softplus.cpp b/ngraph/test/op_eval/softplus.cpp index 5404e741056..3e8ec24c724 100644 --- a/ngraph/test/op_eval/softplus.cpp +++ b/ngraph/test/op_eval/softplus.cpp @@ -30,7 +30,7 @@ using namespace ngraph; TEST(op_eval, softplus_4D) { - auto p = make_shared(element::f32, Shape{4}); + auto p = make_shared(element::Type_t::f32, Shape{4}); auto softplus = make_shared(p); auto fun = make_shared(OutputVector{softplus}, ParameterVector{p}); @@ -40,7 +40,7 @@ TEST(op_eval, softplus_4D) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{4}, inputs)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{4}); auto result_data = read_vector(result); for (size_t i = 0; i < inputs.size(); i++) diff --git a/ngraph/test/op_eval/split.cpp b/ngraph/test/op_eval/split.cpp index 7f806303cc6..0e538f76f34 100644 --- a/ngraph/test/op_eval/split.cpp +++ b/ngraph/test/op_eval/split.cpp @@ -32,8 +32,8 @@ using namespace ngraph; TEST(op_eval, split) { const auto data_shape = Shape{3, 8, 3}; - const auto data = make_shared(element::i64, data_shape); - const auto axis = make_shared(element::i64, Shape{}); + const auto data = make_shared(element::Type_t::i64, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); const size_t num_splits = 4; auto split = make_shared(data, axis, num_splits); @@ -61,7 +61,7 @@ TEST(op_eval, split) for (int i = 0; i < num_splits; ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::i64); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::i64); EXPECT_EQ(results[i]->get_shape(), (Shape{3, 2, 3})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } @@ -70,8 +70,8 @@ TEST(op_eval, split) TEST(op_eval, split_neg_axis) { const auto data_shape = Shape{2, 1, 4, 1}; - const auto data = make_shared(element::i64, data_shape); - const auto axis = make_shared(element::i64, Shape{}); + const auto data = make_shared(element::Type_t::i64, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); const size_t num_splits = 4; auto split = make_shared(data, axis, num_splits); @@ -95,7 +95,7 @@ TEST(op_eval, split_neg_axis) for (int i = 0; i < num_splits; ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::i64); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::i64); EXPECT_EQ(results[i]->get_shape(), (Shape{2, 1, 1, 1})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } @@ -104,8 +104,8 @@ TEST(op_eval, split_neg_axis) TEST(op_eval, split_boolean_type) { const auto data_shape = Shape{2, 1, 2, 1, 2}; - const auto data = make_shared(element::boolean, data_shape); - const auto axis = make_shared(element::i64, Shape{}); + const auto data = make_shared(element::Type_t::boolean, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); const size_t num_splits = 2; auto split = make_shared(data, axis, num_splits); @@ -129,7 +129,7 @@ TEST(op_eval, split_boolean_type) for (int i = 0; i < num_splits; ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::boolean); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::boolean); EXPECT_EQ(results[i]->get_shape(), (Shape{2, 1, 1, 1, 2})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } @@ -138,8 +138,8 @@ TEST(op_eval, split_boolean_type) TEST(op_eval, split_1d) { const auto data_shape = Shape{8}; - const auto data = make_shared(element::f32, data_shape); - const auto axis = make_shared(element::i64, Shape{}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); const size_t num_splits = 4; auto split = make_shared(data, axis, num_splits); @@ -164,7 +164,7 @@ TEST(op_eval, split_1d) for (int i = 0; i < num_splits; ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::f32); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::f32); EXPECT_EQ(results[i]->get_shape(), (Shape{2})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } diff --git a/ngraph/test/op_eval/strided_slice.cpp b/ngraph/test/op_eval/strided_slice.cpp index f9229a1dbaa..4d9f7c00c51 100644 --- a/ngraph/test/op_eval/strided_slice.cpp +++ b/ngraph/test/op_eval/strided_slice.cpp @@ -32,10 +32,10 @@ using namespace ngraph; TEST(op_eval, strided_slice1) { auto A_shape = Shape{3, 2, 3}; - auto A = make_shared(element::i64, A_shape); - auto begin = make_shared(element::i64, Shape{3}); - auto end = make_shared(element::i64, Shape{3}); - auto strides = make_shared(element::i64, Shape{3}); + auto A = make_shared(element::Type_t::i64, A_shape); + auto begin = make_shared(element::Type_t::i64, Shape{3}); + auto end = make_shared(element::Type_t::i64, Shape{3}); + auto strides = make_shared(element::Type_t::i64, Shape{3}); auto r = make_shared(A, begin, end, @@ -66,7 +66,7 @@ TEST(op_eval, strided_slice1) make_host_tensor(Shape{3}, begin_vecs[i]), make_host_tensor(Shape{3}, end_vecs[i]), make_host_tensor(Shape{3}, strides_vecs[i])})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_shape[i]); EXPECT_EQ(read_vector(result), expected_results[i]); } @@ -89,10 +89,10 @@ TEST(op_eval, strided_slice1) TEST(op_eval, strided_slice2) { auto A_shape = Shape{3, 2, 3}; - auto A = make_shared(element::i64, A_shape); - auto begin = make_shared(element::i64, Shape{3}); - auto end = make_shared(element::i64, Shape{3}); - auto strides = make_shared(element::i64, Shape{3}); + auto A = make_shared(element::Type_t::i64, A_shape); + auto begin = make_shared(element::Type_t::i64, Shape{3}); + auto end = make_shared(element::Type_t::i64, Shape{3}); + auto strides = make_shared(element::Type_t::i64, Shape{3}); std::vector begin_vec{1, 0, 0}; std::vector end_vec{0, 0, 0}; @@ -123,7 +123,7 @@ TEST(op_eval, strided_slice2) make_host_tensor(Shape{3}, begin_vec), make_host_tensor(Shape{3}, end_vec), make_host_tensor(Shape{3}, strides_vec)})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_shape); EXPECT_EQ(read_vector(result), expected); } @@ -136,10 +136,10 @@ TEST(op_eval, strided_slice2) TEST(op_eval, strided_slice3) { auto A_shape = Shape{3, 2, 3}; - auto A = make_shared(element::i64, A_shape); - auto begin = make_shared(element::i64, Shape{3}); - auto end = make_shared(element::i64, Shape{3}); - auto strides = make_shared(element::i64, Shape{3}); + auto A = make_shared(element::Type_t::i64, A_shape); + auto begin = make_shared(element::Type_t::i64, Shape{3}); + auto end = make_shared(element::Type_t::i64, Shape{3}); + auto strides = make_shared(element::Type_t::i64, Shape{3}); std::vector begin_vec{0, 1, 0}; std::vector end_vec{2, 0, 0}; @@ -170,7 +170,7 @@ TEST(op_eval, strided_slice3) make_host_tensor(Shape{3}, begin_vec), make_host_tensor(Shape{3}, end_vec), make_host_tensor(Shape{3}, strides_vec)})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_shape); EXPECT_EQ(read_vector(result), expected); } @@ -183,10 +183,10 @@ TEST(op_eval, strided_slice3) TEST(op_eval, strided_slice_reverse) { auto A_shape = Shape{3, 2, 3}; - auto A = make_shared(element::i64, A_shape); - auto begin = make_shared(element::i64, Shape{3}); - auto end = make_shared(element::i64, Shape{3}); - auto strides = make_shared(element::i64, Shape{3}); + auto A = make_shared(element::Type_t::i64, A_shape); + auto begin = make_shared(element::Type_t::i64, Shape{3}); + auto end = make_shared(element::Type_t::i64, Shape{3}); + auto strides = make_shared(element::Type_t::i64, Shape{3}); std::vector begin_vec{0, 0, 0}; std::vector end_vec{1, 0, 0}; @@ -217,7 +217,7 @@ TEST(op_eval, strided_slice_reverse) make_host_tensor(Shape{3}, begin_vec), make_host_tensor(Shape{3}, end_vec), make_host_tensor(Shape{3}, strides_vec)})); - EXPECT_EQ(result->get_element_type(), element::i64); + EXPECT_EQ(result->get_element_type(), element::Type_t::i64); EXPECT_EQ(result->get_shape(), expected_shape); EXPECT_EQ(read_vector(result), expected); } diff --git a/ngraph/test/op_eval/swish.cpp b/ngraph/test/op_eval/swish.cpp index 26997dfc0fb..9d57235b588 100644 --- a/ngraph/test/op_eval/swish.cpp +++ b/ngraph/test/op_eval/swish.cpp @@ -30,8 +30,8 @@ using namespace ngraph; TEST(op_eval, swish_with_beta1) { - auto p = make_shared(element::f32, Shape{3}); - auto beta = make_shared(element::f32, Shape{}); + auto p = make_shared(element::Type_t::f32, Shape{3}); + auto beta = make_shared(element::Type_t::f32, Shape{}); auto swish = make_shared(p, beta); auto fun = make_shared(OutputVector{swish}, ParameterVector{p, beta}); @@ -42,7 +42,7 @@ TEST(op_eval, swish_with_beta1) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3}, inputs), make_host_tensor(Shape{}, {1.0})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{3}); auto result_data = read_vector(result); for (auto i = 0; i < inputs.size(); i++) @@ -51,8 +51,8 @@ TEST(op_eval, swish_with_beta1) TEST(op_eval, swish_with_beta0_75) { - auto p = make_shared(element::f32, Shape{3}); - auto beta = make_shared(element::f32, Shape{}); + auto p = make_shared(element::Type_t::f32, Shape{3}); + auto beta = make_shared(element::Type_t::f32, Shape{}); auto swish = make_shared(p, beta); auto fun = make_shared(OutputVector{swish}, ParameterVector{p, beta}); @@ -63,7 +63,7 @@ TEST(op_eval, swish_with_beta0_75) ASSERT_TRUE(fun->evaluate({result}, {make_host_tensor(Shape{3}, inputs), make_host_tensor(Shape{}, {0.75})})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{3}); auto result_data = read_vector(result); for (auto i = 0; i < inputs.size(); i++) @@ -72,7 +72,7 @@ TEST(op_eval, swish_with_beta0_75) TEST(op_eval, swish_without_beta) { - auto p = make_shared(element::f32, Shape{3}); + auto p = make_shared(element::Type_t::f32, Shape{3}); auto swish = make_shared(p); auto fun = make_shared(OutputVector{swish}, ParameterVector{p}); @@ -82,7 +82,7 @@ TEST(op_eval, swish_without_beta) auto result = make_shared(); ASSERT_TRUE( fun->evaluate({result}, {make_host_tensor(Shape{3}, inputs)})); - EXPECT_EQ(result->get_element_type(), element::f32); + EXPECT_EQ(result->get_element_type(), element::Type_t::f32); EXPECT_EQ(result->get_shape(), Shape{3}); auto result_data = read_vector(result); for (auto i = 0; i < inputs.size(); i++) diff --git a/ngraph/test/op_eval/variadic_split.cpp b/ngraph/test/op_eval/variadic_split.cpp index ff8942dbd23..40b8ec9ad88 100644 --- a/ngraph/test/op_eval/variadic_split.cpp +++ b/ngraph/test/op_eval/variadic_split.cpp @@ -32,9 +32,9 @@ using namespace ngraph; TEST(op_eval, variadic_split_same_lengths) { const auto data_shape = Shape{3, 8, 3}; - const auto data = make_shared(element::i64, data_shape); - const auto axis = make_shared(element::i64, Shape{}); - const auto split_lengths = make_shared(element::i64, Shape{4}); + const auto data = make_shared(element::Type_t::i64, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); + const auto split_lengths = make_shared(element::Type_t::i64, Shape{4}); auto var_split = make_shared(data, axis, split_lengths); @@ -62,7 +62,7 @@ TEST(op_eval, variadic_split_same_lengths) for (int i = 0; i < split_lengths_vec.size(); ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::i64); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::i64); EXPECT_EQ(results[i]->get_shape(), (Shape{3, static_cast(split_lengths_vec[i]), 3})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); @@ -72,9 +72,9 @@ TEST(op_eval, variadic_split_same_lengths) TEST(op_eval, variadic_split_different_lengths) { const auto data_shape = Shape{6, 2, 3}; - const auto data = make_shared(element::i64, data_shape); - const auto axis = make_shared(element::i64, Shape{}); - const auto split_lengths = make_shared(element::i64, Shape{3}); + const auto data = make_shared(element::Type_t::i64, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); + const auto split_lengths = make_shared(element::Type_t::i64, Shape{3}); auto var_split = make_shared(data, axis, split_lengths); @@ -101,7 +101,7 @@ TEST(op_eval, variadic_split_different_lengths) for (int i = 0; i < split_lengths_vec.size(); ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::i64); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::i64); EXPECT_EQ(results[i]->get_shape(), (Shape{static_cast(split_lengths_vec[i]), 2, 3})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); @@ -111,9 +111,9 @@ TEST(op_eval, variadic_split_different_lengths) TEST(op_eval, variadic_split_neg_length) { const auto data_shape = Shape{2, 7, 1}; - const auto data = make_shared(element::i64, data_shape); - const auto axis = make_shared(element::i64, Shape{}); - const auto split_lengths = make_shared(element::i64, Shape{3}); + const auto data = make_shared(element::Type_t::i64, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); + const auto split_lengths = make_shared(element::Type_t::i64, Shape{3}); auto var_split = make_shared(data, axis, split_lengths); @@ -139,7 +139,7 @@ TEST(op_eval, variadic_split_neg_length) const vector expected_lengths{3, 1, 3}; for (int i = 0; i < split_lengths_vec.size(); ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::i64); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::i64); EXPECT_EQ(results[i]->get_shape(), (Shape{2, expected_lengths[i], 1})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } @@ -148,9 +148,9 @@ TEST(op_eval, variadic_split_neg_length) TEST(op_eval, variadic_split_neg_length_neg_axis) { const auto data_shape = Shape{2, 1, 5, 2}; - const auto data = make_shared(element::i64, data_shape); - const auto axis = make_shared(element::i64, Shape{}); - const auto split_lengths = make_shared(element::i64, Shape{3}); + const auto data = make_shared(element::Type_t::i64, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); + const auto split_lengths = make_shared(element::Type_t::i64, Shape{3}); auto var_split = make_shared(data, axis, split_lengths); @@ -176,7 +176,7 @@ TEST(op_eval, variadic_split_neg_length_neg_axis) const vector expected_lengths{1, 2, 2}; for (int i = 0; i < split_lengths_vec.size(); ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::i64); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::i64); EXPECT_EQ(results[i]->get_shape(), (Shape{2, 1, expected_lengths[i], 2})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } @@ -185,9 +185,9 @@ TEST(op_eval, variadic_split_neg_length_neg_axis) TEST(op_eval, variadic_split_neg_length_bool_data_type) { const auto data_shape = Shape{2, 1, 5}; - const auto data = make_shared(element::boolean, data_shape); - const auto axis = make_shared(element::i64, Shape{}); - const auto split_lengths = make_shared(element::i64, Shape{3}); + const auto data = make_shared(element::Type_t::boolean, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); + const auto split_lengths = make_shared(element::Type_t::i64, Shape{3}); auto var_split = make_shared(data, axis, split_lengths); @@ -212,7 +212,7 @@ TEST(op_eval, variadic_split_neg_length_bool_data_type) const vector expected_lengths{1, 2, 2}; for (int i = 0; i < split_lengths_vec.size(); ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::boolean); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::boolean); EXPECT_EQ(results[i]->get_shape(), (Shape{2, 1, expected_lengths[i]})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } @@ -221,9 +221,9 @@ TEST(op_eval, variadic_split_neg_length_bool_data_type) TEST(op_eval, variadic_split_neg_length_axis_ui64) { const auto data_shape = Shape{2, 1, 4, 2}; - const auto data = make_shared(element::i64, data_shape); - const auto axis = make_shared(element::u64, Shape{}); - const auto split_lengths = make_shared(element::i64, Shape{2}); + const auto data = make_shared(element::Type_t::i64, data_shape); + const auto axis = make_shared(element::Type_t::u64, Shape{}); + const auto split_lengths = make_shared(element::Type_t::i64, Shape{2}); auto var_split = make_shared(data, axis, split_lengths); @@ -250,7 +250,7 @@ TEST(op_eval, variadic_split_neg_length_axis_ui64) const vector expected_lengths{2, 2}; for (int i = 0; i < split_lengths_vec.size(); ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::i64); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::i64); EXPECT_EQ(results[i]->get_shape(), (Shape{2, 1, expected_lengths[i], 2})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } @@ -259,9 +259,9 @@ TEST(op_eval, variadic_split_neg_length_axis_ui64) TEST(op_eval, variadic_split_data_float_length_i32) { const auto data_shape = Shape{2, 3, 3}; - const auto data = make_shared(element::f32, data_shape); - const auto axis = make_shared(element::i64, Shape{}); - const auto split_lengths = make_shared(element::i32, Shape{3}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto axis = make_shared(element::Type_t::i64, Shape{}); + const auto split_lengths = make_shared(element::Type_t::i32, Shape{3}); auto var_split = make_shared(data, axis, split_lengths); @@ -288,7 +288,7 @@ TEST(op_eval, variadic_split_data_float_length_i32) const vector expected_lengths{1, 1, 1}; for (int i = 0; i < split_lengths_vec.size(); ++i) { - EXPECT_EQ(results[i]->get_element_type(), element::f32); + EXPECT_EQ(results[i]->get_element_type(), element::Type_t::f32); EXPECT_EQ(results[i]->get_shape(), (Shape{2, 3, expected_lengths[i]})); EXPECT_EQ(read_vector(results[i]), expected_results[i]); } diff --git a/ngraph/test/partial_shape.cpp b/ngraph/test/partial_shape.cpp index 390f7a35201..e57ab91aacd 100644 --- a/ngraph/test/partial_shape.cpp +++ b/ngraph/test/partial_shape.cpp @@ -218,7 +218,7 @@ TEST(partial_shape, to_shape_rank_dynamic) TEST(partial_shape, tensor_descriptor_from_shape) { - descriptor::Tensor t{element::i32, Shape{1, 2, 3}, "Ankeny"}; + descriptor::Tensor t{element::Type_t::i32, Shape{1, 2, 3}, "Ankeny"}; ASSERT_EQ(t.get_shape(), (Shape{1, 2, 3})); ASSERT_EQ(t.get_partial_shape().rank().get_length(), 3); @@ -227,7 +227,7 @@ TEST(partial_shape, tensor_descriptor_from_shape) TEST(partial_shape, tensor_descriptor_from_static_partial_shape) { - descriptor::Tensor t{element::i32, PartialShape{1, 2, 3}, "Burnside"}; + descriptor::Tensor t{element::Type_t::i32, PartialShape{1, 2, 3}, "Burnside"}; ASSERT_EQ(t.get_shape(), (Shape{1, 2, 3})); ASSERT_EQ(t.get_partial_shape().rank().get_length(), 3); @@ -236,7 +236,7 @@ TEST(partial_shape, tensor_descriptor_from_static_partial_shape) TEST(partial_shape, tensor_descriptor_from_rank_static_dynamic_partial_shape) { - descriptor::Tensor t{element::i32, PartialShape{1, Dimension::dynamic(), 3}, "Couch"}; + descriptor::Tensor t{element::Type_t::i32, PartialShape{1, Dimension::dynamic(), 3}, "Couch"}; ASSERT_EQ(t.get_partial_shape().rank().get_length(), 3); ASSERT_THROW({ t.get_shape(); }, std::invalid_argument); @@ -245,7 +245,7 @@ TEST(partial_shape, tensor_descriptor_from_rank_static_dynamic_partial_shape) TEST(partial_shape, tensor_descriptor_from_rank_dynamic_partial_shape) { - descriptor::Tensor t{element::i32, PartialShape::dynamic(), "Davis"}; + descriptor::Tensor t{element::Type_t::i32, PartialShape::dynamic(), "Davis"}; ASSERT_TRUE(t.get_partial_shape().rank().is_dynamic()); ASSERT_THROW({ t.get_shape(); }, std::invalid_argument); @@ -877,7 +877,7 @@ TEST(partial_shape, changed_dimension_by_reference) TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -904,7 +904,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_ok) TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_zero_data_dilation) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 0, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -931,7 +931,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_zero_data TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_zero_window_dilation) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -958,7 +958,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_zero_wind TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_zero_window_strides) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -985,7 +985,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_dynamic_zero_wind TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_dynamic_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), 2, 3, Dimension::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -1012,7 +1012,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_dynamic_ok TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_dynamic_zero_data_post_padding) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), 2, 3, Dimension::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, -1, 0, 0}; @@ -1039,7 +1039,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_dynamic_neg_padding_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), 4, 3, Dimension::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, -1, 0, 0}; @@ -1064,7 +1064,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_dynamic_ne TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -1090,7 +1090,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_ok TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_window_dim_zero) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -1119,7 +1119,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_wi TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_window_dilated_dim_zero) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -1148,7 +1148,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_window_all_in_padding_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 3, 0}; @@ -1175,7 +1175,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_window_all_in_padding_not_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 3, 0}; @@ -1204,7 +1204,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_dynamic_rank_static_dynamic_dilated_window_not_all_in_padding) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{PartialShape::dynamic()}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 3, 0}; @@ -1230,7 +1230,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dynamic_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), Dimension::dynamic(), 6, 4}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -1258,7 +1258,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dyn TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dynamic_with_padding_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), Dimension::dynamic(), 6, 4}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 2, 0}; @@ -1286,7 +1286,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dynamic_with_padding_and_stride_ok) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), Dimension::dynamic(), 6, 4}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 2, 0}; @@ -1313,7 +1313,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dynamic_window_too_big) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), Dimension::dynamic(), 6, 4}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 0, 0}; @@ -1342,7 +1342,7 @@ TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dyn TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dynamic_window_not_too_big_padding) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), Dimension::dynamic(), 6, 4}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 5, 0}; @@ -1370,7 +1370,7 @@ TEST(partial_shape, TEST(partial_shape, infer_windowed_reduction_rank_static_dynamic_rank_static_dynamic_window_dilated_too_big) { - auto node = std::make_shared(element::f32, Shape{}); + auto node = std::make_shared(element::Type_t::f32, Shape{}); PartialShape data_shape{Dimension::dynamic(), Dimension::dynamic(), 6, 4}; Strides data_dilation{1, 1, 1, 1}; CoordinateDiff data_padding_below{0, 0, 5, 0}; diff --git a/ngraph/test/pass_config.cpp b/ngraph/test/pass_config.cpp index f350c4a5658..264d5e90a71 100644 --- a/ngraph/test/pass_config.cpp +++ b/ngraph/test/pass_config.cpp @@ -90,8 +90,8 @@ NGRAPH_RTTI_DEFINITION(TestGraphRewritePass, "TestGraphRewritePass", 0); std::tuple, std::shared_ptr, std::shared_ptr> get_test_function() { - auto data = - std::make_shared(ngraph::element::f32, ngraph::Shape{3, 1, 2}); + auto data = std::make_shared(ngraph::element::Type_t::f32, + ngraph::Shape{3, 1, 2}); auto relu = std::make_shared(data); relu->set_friendly_name("relu"); auto sigmoid = std::make_shared(relu); @@ -378,4 +378,4 @@ TEST(PassConfig, EnableDisablePasses11) ASSERT_EQ(relu->get_friendly_name(), "renamed"); ASSERT_EQ(sigmoid->get_friendly_name(), "renamed"); -} \ No newline at end of file +} diff --git a/ngraph/test/pass_liveness.cpp b/ngraph/test/pass_liveness.cpp index 63ef1126582..89433c2e12e 100644 --- a/ngraph/test/pass_liveness.cpp +++ b/ngraph/test/pass_liveness.cpp @@ -36,7 +36,7 @@ namespace ng = ngraph; TEST(liveness, constant) { Shape shape{1}; - auto c = op::Constant::create(element::i32, shape, {5}); + auto c = op::Constant::create(element::Type_t::i32, shape, {5}); auto f = make_shared(make_shared(c), ParameterVector{}); pass::Manager pass_manager; diff --git a/ngraph/test/pass_shape_relevance.cpp b/ngraph/test/pass_shape_relevance.cpp index 18470dc0511..18be6e268a3 100644 --- a/ngraph/test/pass_shape_relevance.cpp +++ b/ngraph/test/pass_shape_relevance.cpp @@ -32,8 +32,8 @@ using namespace std; TEST(shape_relevance, simple) { - auto param0 = make_shared(element::f32, Shape{4, 6}); - auto param1 = make_shared(element::f32, Shape{4, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); + auto param1 = make_shared(element::Type_t::f32, Shape{4, 6}); auto x = make_shared(param0, param1); auto f = make_shared(x, ParameterVector{param0, param1}); @@ -48,8 +48,8 @@ TEST(shape_relevance, simple) TEST(shape_relevance, param_direct) { - auto param0 = make_shared(element::f32, Shape{4, 6}); - auto param1 = make_shared(element::i64, Shape{4}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); + auto param1 = make_shared(element::Type_t::i64, Shape{4}); auto x = make_shared(param0, param1, true); auto f = make_shared(x, ParameterVector{param0, param1}); @@ -64,9 +64,9 @@ TEST(shape_relevance, param_direct) TEST(shape_relevance, param_indirect) { - auto param0 = make_shared(element::f32, Shape{4, 6}); - auto param1 = make_shared(element::i64, Shape{4}); - auto param2 = make_shared(element::i64, Shape{2}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); + auto param1 = make_shared(element::Type_t::i64, Shape{4}); + auto param2 = make_shared(element::Type_t::i64, Shape{2}); auto c = make_shared(NodeVector{param1, param2}, 0); auto x = make_shared(param0, c, true); @@ -84,7 +84,7 @@ TEST(shape_relevance, param_indirect) TEST(shape_relevance, param_shape_of_direct_v0) { - auto param0 = make_shared(element::f32, Shape{4, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); auto x = make_shared(param0, make_shared(param0), true); @@ -99,7 +99,7 @@ TEST(shape_relevance, param_shape_of_direct_v0) TEST(shape_relevance, param_shape_of_direct_v3) { - auto param0 = make_shared(element::f32, Shape{4, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); auto x = make_shared(param0, make_shared(param0), true); @@ -114,10 +114,10 @@ TEST(shape_relevance, param_shape_of_direct_v3) TEST(shape_relevance, param_shape_of_direct_i32_v3) { - auto param0 = make_shared(element::f32, Shape{4, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); auto x = make_shared( - param0, make_shared(param0, element::i32), true); + param0, make_shared(param0, element::Type_t::i32), true); auto f = make_shared(x, ParameterVector{param0}); @@ -130,11 +130,11 @@ TEST(shape_relevance, param_shape_of_direct_i32_v3) TEST(shape_relevance, param_shape_of_indirect_v0) { - auto param0 = make_shared(element::f32, Shape{4, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); auto s = make_shared(param0); auto r = make_shared( - s, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + s, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); auto x = make_shared(param0, r, true); auto f = make_shared(x, ParameterVector{param0}); @@ -148,11 +148,11 @@ TEST(shape_relevance, param_shape_of_indirect_v0) TEST(shape_relevance, param_shape_of_indirect_v3) { - auto param0 = make_shared(element::f32, Shape{4, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); auto s = make_shared(param0); auto r = make_shared( - s, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + s, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); auto x = make_shared(param0, r, true); auto f = make_shared(x, ParameterVector{param0}); @@ -166,11 +166,11 @@ TEST(shape_relevance, param_shape_of_indirect_v3) TEST(shape_relevance, param_shape_of_indirect_i32_v3) { - auto param0 = make_shared(element::f32, Shape{4, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{4, 6}); - auto s = make_shared(param0, element::i32); + auto s = make_shared(param0, element::Type_t::i32); auto r = make_shared( - s, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + s, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); auto x = make_shared(param0, r, true); auto f = make_shared(x, ParameterVector{param0}); diff --git a/ngraph/test/pattern.cpp b/ngraph/test/pattern.cpp index 97c2e63cbcb..0ee8871b3b2 100644 --- a/ngraph/test/pattern.cpp +++ b/ngraph/test/pattern.cpp @@ -52,20 +52,20 @@ using namespace std; static std::shared_ptr construct_constant_node(int n) { - return op::Constant::create(element::i32, Shape{}, {n}); + return op::Constant::create(element::Type_t::i32, Shape{}, {n}); } static std::shared_ptr construct_variance_graph() { // construct varaiance - auto N = op::Constant::create(element::f32, Shape{3}, {2, 2, 2}); - auto input = std::make_shared(element::f32, Shape{2, 3}); + auto N = op::Constant::create(element::Type_t::f32, Shape{3}, {2, 2, 2}); + auto input = std::make_shared(element::Type_t::f32, Shape{2, 3}); auto input_sq = std::make_shared(input, input); - auto sum_input = - std::make_shared(input, op::Constant::create(element::i64, {1}, {0})); + auto sum_input = std::make_shared( + input, op::Constant::create(element::Type_t::i64, {1}, {0})); auto square_sumed_input = std::make_shared(sum_input, sum_input); - auto sum_squared_input = - std::make_shared(input_sq, op::Constant::create(element::i64, {1}, {0})); + auto sum_squared_input = std::make_shared( + input_sq, op::Constant::create(element::Type_t::i64, {1}, {0})); auto avg_input_sum_sq = std::make_shared(square_sumed_input, N); auto xmu = std::make_shared(sum_squared_input, avg_input_sum_sq); auto variance = std::make_shared(xmu, N); @@ -78,10 +78,10 @@ static std::shared_ptr construct_variance_graph() static std::shared_ptr construct_mean_graph() { // construct mean; - auto input = std::make_shared(element::f32, Shape{2, 3}); - auto N = op::Constant::create(element::f32, Shape{3}, {2, 2, 2}); - auto sum_input1 = - std::make_shared(input, op::Constant::create(element::i64, {1}, {0})); + auto input = std::make_shared(element::Type_t::f32, Shape{2, 3}); + auto N = op::Constant::create(element::Type_t::f32, Shape{3}, {2, 2, 2}); + auto sum_input1 = std::make_shared( + input, op::Constant::create(element::Type_t::i64, {1}, {0})); auto mean = std::make_shared(sum_input1, N); auto mean_label = std::make_shared(mean, nullptr, NodeVector{mean}); return mean_label; @@ -212,9 +212,9 @@ TEST(pattern, graph_rewrite) pass_manager.register_pass(); { - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); - auto c = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); + auto c = make_shared(element::Type_t::i32, shape); auto iconst0 = construct_constant_node(0); auto graph_a = a + iconst0; auto graph_b = b + iconst0; @@ -231,8 +231,8 @@ TEST(pattern, graph_rewrite) } { - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto iconst0 = construct_constant_node(0); auto sum = (a + iconst0); auto graph = b + sum; @@ -247,8 +247,8 @@ TEST(pattern, graph_rewrite) } { - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto iconst1 = construct_constant_node(1); auto mul = (a * iconst1); auto graph = b + mul; @@ -263,8 +263,8 @@ TEST(pattern, graph_rewrite) } { - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto iconst1 = construct_constant_node(1); auto graph = ((((a * iconst1) * iconst1) * iconst1) * iconst1) + b; run_passes(pass_manager, graph, {a, b}); @@ -275,8 +275,8 @@ TEST(pattern, graph_rewrite) } { - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto iconst0 = construct_constant_node(0); auto iconst1 = construct_constant_node(1); auto graph = b + (iconst0 + ((a + iconst0) * iconst1)); @@ -288,8 +288,8 @@ TEST(pattern, graph_rewrite) } { - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto iconst1 = construct_constant_node(1); auto graph = b + (iconst1 * (iconst1 * (iconst1 * (iconst1 * a)))); run_passes(pass_manager, graph, {a, b}); @@ -303,7 +303,7 @@ TEST(pattern, graph_rewrite) TEST(pattern, matcher) { Shape shape{}; - auto a = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); TestMatcher n; ASSERT_TRUE(n.match(a, a)); ASSERT_EQ(n.get_matched_nodes(), (NodeVector{a})); @@ -327,7 +327,7 @@ TEST(pattern, matcher) ASSERT_FALSE(n.match(pattern_false, a)); ASSERT_EQ(n.get_matched_nodes(), (NodeVector{})); - auto b = make_shared(element::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto is_bea = [](std::shared_ptr node) -> bool { return op::is_binary_elementwise_arithmetic(node); @@ -363,7 +363,7 @@ TEST(pattern, matcher) ASSERT_TRUE(n.match(bea_label, ab)); ASSERT_EQ(n.get_pattern_map()[bea_label], ab); - auto d = make_shared(element::i32, shape); + auto d = make_shared(element::Type_t::i32, shape); ASSERT_FALSE(n.match(d, b)); ASSERT_FALSE(n.match(abs + b, b + b)); @@ -381,7 +381,7 @@ TEST(pattern, matcher) ASSERT_EQ(n.get_pattern_map()[pattern], abs); ASSERT_EQ(n.get_matched_nodes(), (NodeVector{add_absb, abs, b})); - auto c = make_shared(element::i32, shape); + auto c = make_shared(element::Type_t::i32, shape); auto mul_add_absb = c * (add_absb); ASSERT_TRUE(n.match(c * (b + pattern), mul_add_absb)); ASSERT_EQ(n.get_pattern_map()[pattern], abs); @@ -399,7 +399,7 @@ TEST(pattern, matcher) auto iconst1_1 = construct_constant_node(1); ASSERT_TRUE(n.match(pattern * iconst1_0, a * iconst1_1)); // different iconst ASSERT_EQ(n.get_pattern_map()[pattern], a); - auto fconst1_0 = op::Constant::create(element::f32, shape, {1}); + auto fconst1_0 = op::Constant::create(element::Type_t::f32, shape, {1}); auto patternf = std::make_shared(fconst1_0); ASSERT_TRUE(n.match(patternf * fconst1_0, a * iconst1_1)); // different iconst @@ -462,22 +462,22 @@ TEST(pattern, matcher) { TestMatcher sm(Output{}, "TestMatcher", true); // exact shape and type - auto scalar_param = make_shared(element::i32, Shape{}); + auto scalar_param = make_shared(element::Type_t::i32, Shape{}); auto label_dynamic_shape = - make_shared(element::i32, PartialShape::dynamic()); - auto param = make_shared(element::f32, Shape{}); + make_shared(element::Type_t::i32, PartialShape::dynamic()); + auto param = make_shared(element::Type_t::f32, Shape{}); ASSERT_TRUE(sm.match(label_dynamic_shape, scalar_param)); // wrong type - auto scalar_param_wrong_type = make_shared(element::f32, Shape{}); + auto scalar_param_wrong_type = make_shared(element::Type_t::f32, Shape{}); ASSERT_FALSE(sm.match(label, scalar_param_wrong_type)); // dynamic dimension - auto label_dynamic_dimension = - make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto vector_param = make_shared(element::i32, Shape{10}); + auto label_dynamic_dimension = make_shared( + element::Type_t::i32, PartialShape{Dimension::dynamic()}); + auto vector_param = make_shared(element::Type_t::i32, Shape{10}); ASSERT_TRUE(sm.match(label_dynamic_dimension, vector_param)); // dynamic type - auto label_dynamic_type = - make_shared(element::dynamic, PartialShape{Dimension::dynamic()}); + auto label_dynamic_type = make_shared( + element::Type_t::dynamic, PartialShape{Dimension::dynamic()}); ASSERT_TRUE(sm.match(label_dynamic_type, vector_param)); } } @@ -487,10 +487,10 @@ TEST(pattern, mean) // construct mean TestMatcher n; - auto input = std::make_shared(element::f32, Shape{2, 3}); - auto N = op::Constant::create(element::f32, Shape{3}, {2, 2, 2}); - auto sum_input1 = - std::make_shared(input, op::Constant::create(element::i64, {1}, {0})); + auto input = std::make_shared(element::Type_t::f32, Shape{2, 3}); + auto N = op::Constant::create(element::Type_t::f32, Shape{3}, {2, 2, 2}); + auto sum_input1 = std::make_shared( + input, op::Constant::create(element::Type_t::i64, {1}, {0})); auto mean = std::make_shared(sum_input1, N); auto mean_graph = construct_mean_graph(); @@ -502,14 +502,14 @@ TEST(pattern, variance) { // construct variance TestMatcher n; - auto N = op::Constant::create(element::f32, Shape{3}, {2, 2, 2}); - auto input = std::make_shared(element::f32, Shape{2, 3}); + auto N = op::Constant::create(element::Type_t::f32, Shape{3}, {2, 2, 2}); + auto input = std::make_shared(element::Type_t::f32, Shape{2, 3}); auto input_sq = std::make_shared(input, input); - auto sum_input = - std::make_shared(input, op::Constant::create(element::i64, {1}, {0})); + auto sum_input = std::make_shared( + input, op::Constant::create(element::Type_t::i64, {1}, {0})); auto square_sumed_input = std::make_shared(sum_input, sum_input); - auto sum_squared_input = - std::make_shared(input_sq, op::Constant::create(element::i64, {1}, {0})); + auto sum_squared_input = std::make_shared( + input_sq, op::Constant::create(element::Type_t::i64, {1}, {0})); auto avg_input_sum_sq = std::make_shared(square_sumed_input, N); auto xmu = std::make_shared(sum_squared_input, avg_input_sum_sq); auto variance = std::make_shared(xmu, N); @@ -524,8 +524,8 @@ TEST(pattern, previous_matches) using ngraph::pattern::Matcher; Shape shape{}; Matcher::PatternMap previous_matches; - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto pattern = std::make_shared(b); auto abs = make_shared(a); auto add = abs + b; @@ -547,14 +547,14 @@ TEST(pattern, test_sort) using ngraph::pattern::Matcher; Shape shape{}; - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto abs1 = make_shared(a); auto abs2 = make_shared(b); auto add = abs1 + abs2; - auto pa = make_shared(element::i32, shape); - auto pb = make_shared(element::i32, shape); + auto pa = make_shared(element::Type_t::i32, shape); + auto pb = make_shared(element::Type_t::i32, shape); auto pabs1 = make_shared(pa); auto pabs1_label = std::make_shared(pabs1); auto pabs2 = make_shared(b); @@ -574,8 +574,8 @@ TEST(pattern, recurrent_pattern) using ngraph::pattern::RecurrentMatcher; Shape shape{}; ngraph::pattern::Matcher::PatternMap previous_matches; - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto rpattern = std::make_shared(b); auto iconst0 = construct_constant_node(0); auto abs = make_shared(a); @@ -643,7 +643,7 @@ public: auto iconst0 = construct_constant_node(0); auto iconst_label = std::make_shared(iconst0, nullptr, NodeVector{iconst0}); - auto rpattern = std::make_shared(element::i32, shape); + auto rpattern = std::make_shared(element::Type_t::i32, shape); auto padd = iconst_label + rpattern; auto callback = [iconst_label, rpattern](pattern::RecurrentMatcher& rm) { @@ -697,14 +697,14 @@ TEST(pattern, recurrent_graph_rewrite) pass_manager.register_pass(); { - auto a = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); auto iconst0 = construct_constant_node(0); auto add_a1 = a + iconst0; auto add_a2 = add_a1 + iconst0; auto add_a3 = add_a2 + iconst0; auto abs_add_a3 = std::make_shared(add_a3); - auto b = make_shared(element::i32, shape); + auto b = make_shared(element::Type_t::i32, shape); auto add_b1 = b + iconst0; auto add_b2 = add_b1 + iconst0; auto abs_add_b2 = std::make_shared(add_b2); @@ -727,9 +727,9 @@ TEST(pattern, recurrent_graph_rewrite) TEST(pattern, label_on_skip) { Shape shape{2, 2}; - auto a = make_shared(element::i32, shape); - auto b = make_shared(element::i32, Shape{}); - auto iconst = ngraph::make_zero(element::i32, Shape{}); + auto a = make_shared(element::Type_t::i32, shape); + auto b = make_shared(element::Type_t::i32, Shape{}); + auto iconst = ngraph::make_zero(element::Type_t::i32, Shape{}); auto label = std::make_shared(iconst); auto const_label = std::make_shared(iconst, ngraph::is_zero, NodeVector{iconst}); @@ -738,8 +738,8 @@ TEST(pattern, label_on_skip) return as_type_ptr(n) != nullptr; }; - auto shape_const = op::Constant::create(element::u64, Shape{shape.size()}, shape); - auto axes_const = op::Constant::create(element::u8, Shape{}, {0}); + auto shape_const = op::Constant::create(element::Type_t::u64, Shape{shape.size()}, shape); + auto axes_const = op::Constant::create(element::Type_t::u8, Shape{}, {0}); auto bcst = std::make_shared( OutputVector{const_label, shape_const, axes_const}, bcst_pred); auto bcst_label = std::make_shared(bcst, nullptr, NodeVector{bcst}); @@ -762,7 +762,7 @@ TEST(pattern, label_on_skip) TEST(pattern, is_contained_match) { Shape shape{}; - auto a = make_shared(element::i32, shape); + auto a = make_shared(element::Type_t::i32, shape); auto absn = make_shared(a); TestMatcher n; @@ -781,11 +781,13 @@ TEST(pattern, is_contained_match) TEST(pattern, wrap_type) { - auto a = make_shared(element::f32, Shape{1, 3, 64, 64}); + auto a = make_shared(element::Type_t::f32, Shape{1, 3, 64, 64}); auto b = make_shared(a); auto c = make_shared(a); - auto mul1 = make_shared(a, op::Constant::create(element::f32, Shape{}, {1})); - auto mul2 = make_shared(op::Constant::create(element::f32, Shape{}, {1}), a); + auto mul1 = + make_shared(a, op::Constant::create(element::Type_t::f32, Shape{}, {1})); + auto mul2 = + make_shared(op::Constant::create(element::Type_t::f32, Shape{}, {1}), a); { auto m = pattern::wrap_type(); diff --git a/ngraph/test/provenance.cpp b/ngraph/test/provenance.cpp index e9a28d3cec6..6ac66b39b68 100644 --- a/ngraph/test/provenance.cpp +++ b/ngraph/test/provenance.cpp @@ -69,8 +69,8 @@ TEST(provenance, provenance) // of the graph. // { - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); a->add_provenance_tag("tag_a"); @@ -114,8 +114,8 @@ TEST(provenance, provenance) // of the graph. // { - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); a->add_provenance_tag("tag_a"); @@ -152,8 +152,8 @@ TEST(provenance, provenance) // * D is the replacement root, and its insertion kills A, B, and C. // { - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); a->add_provenance_tag("tag_a"); @@ -164,7 +164,7 @@ TEST(provenance, provenance) auto f = make_shared(c, ParameterVector{x, y}); - auto d = make_zero(element::i32, Shape{2, 3, 4}); + auto d = make_zero(element::Type_t::i32, Shape{2, 3, 4}); d->add_provenance_tag("tag_d"); replace_node(c, d); @@ -190,8 +190,8 @@ TEST(provenance, provenance) // * D is the replacement root, and its insertion kills A, B, and C. // { - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); a->add_provenance_tag("tag_a"); @@ -202,7 +202,7 @@ TEST(provenance, provenance) auto f = make_shared(c, ParameterVector{x, y}); - auto d = make_zero(element::i32, Shape{2, 3, 4}); + auto d = make_zero(element::Type_t::i32, Shape{2, 3, 4}); replace_node(c, d); EXPECT_EQ(d->get_provenance_tags(), (ProvSet{"tag_a", "tag_b", "tag_c"})); @@ -237,8 +237,8 @@ TEST(provenance, provenance) // * D is the replacement root replacing C and creating a new argument node E // { - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); a->add_provenance_tag("tag_a"); @@ -288,8 +288,8 @@ TEST(provenance, provenance) // * D is the replacement root replacing C and creating a new argument node E // { - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); a->add_provenance_tag("tag_a"); @@ -314,9 +314,9 @@ TEST(provenance, provenance) TEST(provenance, add_group_above) { - auto p1 = make_shared(element::i32, PartialShape{2, 3, 4}); + auto p1 = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); p1->add_provenance_tag("P1"); - auto p2 = make_shared(element::i32, PartialShape{2, 3, 4}); + auto p2 = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); p2->add_provenance_tag("P2"); auto a1 = p1 + p2; auto m1 = (a1 * a1)->add_provenance_group_members_above({p1, p2}); @@ -329,8 +329,8 @@ TEST(provenance, add_group_above) TEST(provenance, add_tags_above) { - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); auto b = make_shared(x, y); @@ -379,9 +379,10 @@ TEST(provenance, add_tags_above) TEST(provenance, builder) { - auto p1 = make_shared(element::i32, PartialShape{2, 3, 4}); + auto p1 = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); p1->add_provenance_tag("P1"); - auto norm = builder::opset1::lp_norm(p1, op::Constant::create(element::i64, {}, {0}), 1, 0); + auto norm = + builder::opset1::lp_norm(p1, op::Constant::create(element::Type_t::i64, {}, {0}), 1, 0); norm->add_provenance_tag("norm"); for (auto node : topological_sort(NodeVector{norm})) { @@ -400,7 +401,7 @@ TEST(provenance, fused_copy_origin_tags) { test::ProvenanceEnabler provenance_enabler; - auto p1 = make_shared(element::f32, PartialShape{2, 3, 4}); + auto p1 = make_shared(element::Type_t::f32, PartialShape{2, 3, 4}); p1->add_provenance_tag("P1"); auto g = make_shared(p1); g->add_provenance_tag("G"); @@ -433,7 +434,7 @@ TEST(provenance, fused_decomposition_tag) { test::ProvenanceEnabler provenance_enabler; - auto p1 = make_shared(element::f32, PartialShape{2, 3, 4}); + auto p1 = make_shared(element::Type_t::f32, PartialShape{2, 3, 4}); auto fused_op = make_shared(p1); auto result = make_shared(fused_op); auto f = make_shared(ResultVector{result}, ParameterVector{p1}); @@ -453,7 +454,7 @@ TEST(provenance, fused_decomposition_tag) TEST(provenance, empty_group) { - auto p1 = make_shared(element::i32, PartialShape{2, 3, 4}); + auto p1 = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); p1->add_provenance_tag("P1"); auto abs = make_shared(p1); // Make sure group is empty @@ -476,8 +477,8 @@ TEST(provenance, opset1_upgrade_pass_graph) { test::ProvenanceEnabler provenance_enabler; - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); auto b = make_shared(x, y); @@ -519,8 +520,8 @@ TEST(provenance, opset0_downgrade_pass_graph) { test::ProvenanceEnabler provenance_enabler; - auto x = make_shared(element::i32, PartialShape{2, 3, 4}); - auto y = make_shared(element::i32, PartialShape{2, 3, 4}); + auto x = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); + auto y = make_shared(element::Type_t::i32, PartialShape{2, 3, 4}); auto a = make_shared(x, y); auto b = make_shared(x, y); diff --git a/ngraph/test/replace_node.cpp b/ngraph/test/replace_node.cpp index 8564a1e1c99..816f1f83569 100644 --- a/ngraph/test/replace_node.cpp +++ b/ngraph/test/replace_node.cpp @@ -63,24 +63,26 @@ using namespace ngraph; // TEST(replace_node, replace_nodes) { - auto x = make_shared(element::f32, Shape{2}); - auto y = make_shared(element::f32, Shape{2}); - auto z = make_shared(element::f32, Shape{2}); + auto x = make_shared(element::Type_t::f32, Shape{2}); + auto y = make_shared(element::Type_t::f32, Shape{2}); + auto z = make_shared(element::Type_t::f32, Shape{2}); auto add = x + y; - auto k = make_shared(element::f32, Shape{2}, vector{1, 2}); + auto k = make_shared(element::Type_t::f32, Shape{2}, vector{1, 2}); auto mul = add * k; auto sub = mul - z; auto f = make_shared(NodeVector{sub}, ParameterVector{x, y, z}); unordered_map, shared_ptr> parameter_replacement_map; - auto x_replacement = make_shared(element::f32, Shape{2}); + auto x_replacement = make_shared(element::Type_t::f32, Shape{2}); parameter_replacement_map[x] = x_replacement; unordered_map, shared_ptr> body_replacement_map; - auto y_replacement = make_shared(element::f32, Shape{2}, vector{3, 4}); - auto k_replacement = make_shared(element::f32, Shape{2}, vector{5, 6}); + auto y_replacement = + make_shared(element::Type_t::f32, Shape{2}, vector{3, 4}); + auto k_replacement = + make_shared(element::Type_t::f32, Shape{2}, vector{5, 6}); auto z_replacement = x_replacement + mul; body_replacement_map[y] = y_replacement; body_replacement_map[k] = k_replacement; diff --git a/ngraph/test/runtime/interpreter/int_executable.cpp b/ngraph/test/runtime/interpreter/int_executable.cpp index cb450ae4534..88506e6117b 100644 --- a/ngraph/test/runtime/interpreter/int_executable.cpp +++ b/ngraph/test/runtime/interpreter/int_executable.cpp @@ -454,7 +454,7 @@ void runtime::interpreter::INTExecutable::perform_nan_check( for (const shared_ptr& tensor : tensors) { const element::Type& type = tensor->get_element_type(); - if (type == element::f32) + if (type == element::Type_t::f32) { const float* data = tensor->get_data_ptr(); for (size_t i = 0; i < tensor->get_element_count(); i++) @@ -473,7 +473,7 @@ void runtime::interpreter::INTExecutable::perform_nan_check( } } } - else if (type == element::f64) + else if (type == element::Type_t::f64) { const double* data = tensor->get_data_ptr(); for (size_t i = 0; i < tensor->get_element_count(); i++) diff --git a/ngraph/test/runtime/interpreter/int_executable.hpp b/ngraph/test/runtime/interpreter/int_executable.hpp index b63c32fbd83..8d01ec56477 100644 --- a/ngraph/test/runtime/interpreter/int_executable.hpp +++ b/ngraph/test/runtime/interpreter/int_executable.hpp @@ -439,7 +439,7 @@ protected: { const op::v4::CTCLoss* ctc_loss = static_cast(&node); auto t_int = node.get_input_element_type(1); - if (t_int == element::i32) + if (t_int == element::Type_t::i32) { reference::CTCLoss( args[0]->get_data_ptr(), @@ -453,7 +453,7 @@ protected: ctc_loss->get_unique(), out[0]->get_data_ptr()); } - else if (t_int == element::i64) + else if (t_int == element::Type_t::i64) { reference::CTCLoss( args[0]->get_data_ptr(), @@ -473,7 +473,7 @@ protected: { const op::CumSum* cumsum = static_cast(&node); auto axis_et = node.get_input_element_type(1); - if (axis_et == element::i32) + if (axis_et == element::Type_t::i32) { reference::cumsum(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -482,7 +482,7 @@ protected: cumsum->is_exclusive(), cumsum->is_reverse()); } - else if (axis_et == element::i64) + else if (axis_et == element::Type_t::i64) { reference::cumsum(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -500,7 +500,7 @@ protected: auto indicesType = embed->input(1).get_element_type(); size_t indices_num = shape_size(embed->get_input_shape(1)); - if (indicesType == element::u64 || indicesType == element::i64) + if (indicesType == element::Type_t::u64 || indicesType == element::Type_t::i64) { reference::embeddingBagOffsetsSum( args[0]->get_data_ptr(), @@ -512,7 +512,7 @@ protected: indices_num, embed->get_shape()); } - else if (indicesType == element::u32 || indicesType == element::i32) + else if (indicesType == element::Type_t::u32 || indicesType == element::Type_t::i32) { reference::embeddingBagOffsetsSum( args[0]->get_data_ptr(), @@ -538,7 +538,7 @@ protected: static_cast(&node); auto indicesType = embed->input(1).get_element_type(); - if (indicesType == element::u64 || indicesType == element::i64) + if (indicesType == element::Type_t::u64 || indicesType == element::Type_t::i64) { reference::embeddingBagPackedSum( args[0]->get_data_ptr(), @@ -548,7 +548,7 @@ protected: embed->get_input_shape(1), embed->get_shape()); } - else if (indicesType == element::u32 || indicesType == element::i32) + else if (indicesType == element::Type_t::u32 || indicesType == element::Type_t::i32) { reference::embeddingBagPackedSum( args[0]->get_data_ptr(), @@ -573,7 +573,7 @@ protected: auto indicesType = embed->input(1).get_element_type(); size_t indices_num = shape_size(embed->get_input_shape(1)); - if (indicesType == element::u64 || indicesType == element::i64) + if (indicesType == element::Type_t::u64 || indicesType == element::Type_t::i64) { reference::embeddingSegmentsSum( args[0]->get_data_ptr(), @@ -586,7 +586,7 @@ protected: embed->get_input_shape(1), embed->get_shape()); } - else if (indicesType == element::u32 || indicesType == element::i32) + else if (indicesType == element::Type_t::u32 || indicesType == element::Type_t::i32) { reference::embeddingSegmentsSum( args[0]->get_data_ptr(), @@ -667,7 +667,7 @@ protected: case OP_TYPEID::GatherND_v5: { const op::v5::GatherND* gatherNDNode = static_cast(&node); - if (node.get_input_element_type(1) == element::i64) + if (node.get_input_element_type(1) == element::Type_t::i64) { reference::gather_nd(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -677,7 +677,7 @@ protected: node.get_output_shape(0), gatherNDNode->get_batch_dims()); } - else if (node.get_input_element_type(1) == element::i32) + else if (node.get_input_element_type(1) == element::Type_t::i32) { reference::gather_nd(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -760,7 +760,7 @@ protected: { auto lstm_seq = static_cast(&node); auto type = args[3]->get_element_type(); - if (type == element::i64 || type == element::u64) + if (type == element::Type_t::i64 || type == element::Type_t::u64) { runtime::reference::lstm_sequence(args[0]->get_data_ptr(), args[0]->get_shape(), @@ -785,7 +785,7 @@ protected: lstm_seq->get_clip(), lstm_seq->get_direction()); } - else if (type == element::i32 || type == element::u32) + else if (type == element::Type_t::i32 || type == element::Type_t::u32) { runtime::reference::lstm_sequence(args[0]->get_data_ptr(), args[0]->get_shape(), @@ -822,7 +822,7 @@ protected: { auto gru_seq = static_cast(&node); auto type = args[2]->get_element_type(); - if (type == element::i64 || type == element::u64) + if (type == element::Type_t::i64 || type == element::Type_t::u64) { runtime::reference::gru_sequence(args[0]->get_data_ptr(), args[0]->get_shape(), @@ -844,7 +844,7 @@ protected: gru_seq->get_direction(), gru_seq->get_linear_before_reset()); } - else if (type == element::i32 || type == element::u32) + else if (type == element::Type_t::i32 || type == element::Type_t::u32) { runtime::reference::gru_sequence(args[0]->get_data_ptr(), args[0]->get_shape(), @@ -891,7 +891,7 @@ protected: { auto rnn_seq = static_cast(&node); auto type = args[2]->get_element_type(); - if (type == element::i64 || type == element::u64) + if (type == element::Type_t::i64 || type == element::Type_t::u64) { runtime::reference::rnn_sequence(args[0]->get_data_ptr(), args[0]->get_shape(), @@ -911,7 +911,7 @@ protected: rnn_seq->get_clip(), rnn_seq->get_direction()); } - else if (type == element::i32 || type == element::u32) + else if (type == element::Type_t::i32 || type == element::Type_t::u32) { runtime::reference::rnn_sequence(args[0]->get_data_ptr(), args[0]->get_shape(), @@ -1106,7 +1106,7 @@ protected: const op::Quantize* quantize = static_cast(&node); auto type = quantize->get_element_type(); - if (type == element::u8) + if (type == element::Type_t::u8) { reference::quantize(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -1117,7 +1117,7 @@ protected: quantize->get_axes(), quantize->get_round_mode()); } - else if (type == element::i8) + else if (type == element::Type_t::i8) { reference::quantize(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -1128,7 +1128,7 @@ protected: quantize->get_axes(), quantize->get_round_mode()); } - else if (type == element::i32) + else if (type == element::Type_t::i32) { reference::quantize(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -1172,7 +1172,7 @@ protected: { const op::ReverseSequence* reverse = static_cast(&node); - if (node.get_input_element_type(1) == element::i32) + if (node.get_input_element_type(1) == element::Type_t::i32) { reference::reverse_sequence(args[0]->get_data_ptr(), out[0]->get_data_ptr(), @@ -1181,7 +1181,7 @@ protected: reverse->get_sequence_axis(), args[1]->get_data_ptr()); } - else if (node.get_input_element_type(1) == element::i64) + else if (node.get_input_element_type(1) == element::Type_t::i64) { reference::reverse_sequence(args[0]->get_data_ptr(), out[0]->get_data_ptr(), @@ -1378,7 +1378,7 @@ protected: const op::ScatterNDUpdate* scatterNDUpd = static_cast(&node); auto idxType = scatterNDUpd->get_input_element_type(1); - if (idxType == element::i32) + if (idxType == element::Type_t::i32) { reference::scatterNdUpdate(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -1388,7 +1388,7 @@ protected: node.get_input_shape(1), node.get_input_shape(2)); } - else if (idxType == element::i64) + else if (idxType == element::Type_t::i64) { reference::scatterNdUpdate(args[0]->get_data_ptr(), args[1]->get_data_ptr(), @@ -1457,8 +1457,8 @@ protected: &valid_outputs, info.sort_result_descending); - auto selected_scores_type = - (args.size() < 4) ? element::f32 : args[3]->get_element_type(); + auto selected_scores_type = (args.size() < 4) ? element::Type(element::Type_t::f32) + : args[3]->get_element_type(); reference::nms5_postprocessing(out, info.output_type, diff --git a/ngraph/test/runtime/pass/dyn_elimination.cpp b/ngraph/test/runtime/pass/dyn_elimination.cpp index 0f82643a877..8fcbf143481 100644 --- a/ngraph/test/runtime/pass/dyn_elimination.cpp +++ b/ngraph/test/runtime/pass/dyn_elimination.cpp @@ -56,12 +56,12 @@ std::shared_ptr make_range_replacement(const element::Type& et, void pass::DynElimination::construct_range() { - auto start_arg_label = - make_shared(element::f32, Shape{}, pattern::has_class()); - auto stop_arg_label = - make_shared(element::f32, Shape{}, pattern::has_class()); - auto step_arg_label = - make_shared(element::f32, Shape{}, pattern::has_class()); + auto start_arg_label = make_shared( + element::Type_t::f32, Shape{}, pattern::has_class()); + auto stop_arg_label = make_shared( + element::Type_t::f32, Shape{}, pattern::has_class()); + auto step_arg_label = make_shared( + element::Type_t::f32, Shape{}, pattern::has_class()); auto range_pat = make_shared(start_arg_label, stop_arg_label, step_arg_label); diff --git a/ngraph/test/runtime/pass/opset0_downgrade.cpp b/ngraph/test/runtime/pass/opset0_downgrade.cpp index 82e8bf3c7d5..bd7ca068162 100644 --- a/ngraph/test/runtime/pass/opset0_downgrade.cpp +++ b/ngraph/test/runtime/pass/opset0_downgrade.cpp @@ -85,7 +85,7 @@ namespace opset0_downgrade reshaped_output_shape.insert(reshaped_output_shape.begin() + axis, 1); } auto shape_pattern = op::Constant::create( - element::u64, {reshaped_output_shape.size()}, reshaped_output_shape); + element::Type_t::u64, {reshaped_output_shape.size()}, reshaped_output_shape); auto reshaped_product = make_shared(replacement_node->output(0), shape_pattern, false); return reshaped_product; diff --git a/ngraph/test/runtime/pass/opset1_downgrade.cpp b/ngraph/test/runtime/pass/opset1_downgrade.cpp index b4fd099c8e2..23fe9aa970e 100644 --- a/ngraph/test/runtime/pass/opset1_downgrade.cpp +++ b/ngraph/test/runtime/pass/opset1_downgrade.cpp @@ -39,7 +39,7 @@ namespace opset1_downgrade { const auto const_filled_with_ones = make_shared( op::Constant::create(data->get_element_type(), {}, {1}), target_shape); - if (const_filled_with_ones->get_element_type() == element::boolean) + if (const_filled_with_ones->get_element_type() == element::Type_t::boolean) { replacement_node = make_shared(data, const_filled_with_ones); } diff --git a/ngraph/test/runtime/pass/opset1_upgrade.cpp b/ngraph/test/runtime/pass/opset1_upgrade.cpp index 333ab280f71..4258eaea3ac 100644 --- a/ngraph/test/runtime/pass/opset1_upgrade.cpp +++ b/ngraph/test/runtime/pass/opset1_upgrade.cpp @@ -106,7 +106,7 @@ namespace opset1_upgrade node->input_value(1), // data node->input_value(0), // filters op::Constant::create( - element::i64, + element::Type_t::i64, Shape{data_batch_shape.size() - 2}, vector(data_batch_shape.begin() + 2, data_batch_shape.end())), strides, @@ -230,7 +230,8 @@ namespace opset1_upgrade auto replacement_node = make_shared( node->input_value(2), reshaped_filters, - op::Constant::create(element::i64, Shape{data_batch_shape.size()}, data_batch_shape), + op::Constant::create( + element::Type_t::i64, Shape{data_batch_shape.size()}, data_batch_shape), strides, pads_begin, pads_end, diff --git a/ngraph/test/specialize_function.cpp b/ngraph/test/specialize_function.cpp index 6a8e91cfb65..fe09800a1b5 100644 --- a/ngraph/test/specialize_function.cpp +++ b/ngraph/test/specialize_function.cpp @@ -27,10 +27,10 @@ using namespace ngraph; // shapes. TEST(specialize_function, et_shape_static) { - auto p0 = std::make_shared(element::f32, Shape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, Shape{1, 2, 3}); + auto p0 = std::make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto p1 = std::make_shared(element::Type_t::i32, Shape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -38,21 +38,21 @@ TEST(specialize_function, et_shape_static) std::vector param_vals{nullptr, nullptr}; auto g = specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); ASSERT_EQ(g->get_output_shape(0), (Shape{1, 2, 3})); - ASSERT_EQ(g->get_output_element_type(0), element::f32); + ASSERT_EQ(g->get_output_element_type(0), element::Type_t::f32); } // Test specialization of dynamic element types. TEST(specialize_function, et_dynamic_shape_static) { - auto p0 = std::make_shared(element::dynamic, Shape{1, 2, 3}); - auto p1 = std::make_shared(element::dynamic, Shape{1, 2, 3}); + auto p0 = std::make_shared(element::Type_t::dynamic, Shape{1, 2, 3}); + auto p1 = std::make_shared(element::Type_t::dynamic, Shape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -60,21 +60,21 @@ TEST(specialize_function, et_dynamic_shape_static) std::vector param_vals{nullptr, nullptr}; auto g = specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); ASSERT_EQ(g->get_output_shape(0), (Shape{1, 2, 3})); - ASSERT_EQ(g->get_output_element_type(0), element::f32); + ASSERT_EQ(g->get_output_element_type(0), element::Type_t::f32); } // Test specialization of rank-dynamic shapes. TEST(specialize_function, et_static_shape_rank_dynamic) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic()); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic()); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape::dynamic()); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -82,21 +82,21 @@ TEST(specialize_function, et_static_shape_rank_dynamic) std::vector param_vals{nullptr, nullptr}; auto g = specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); ASSERT_EQ(g->get_output_shape(0), (Shape{1, 2, 3})); - ASSERT_EQ(g->get_output_element_type(0), element::f32); + ASSERT_EQ(g->get_output_element_type(0), element::Type_t::f32); } // Test specialization of rank-static dynamic shapes. TEST(specialize_function, et_static_shape_rank_static_dynamic) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape::dynamic(3)); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -104,21 +104,21 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic) std::vector param_vals{nullptr, nullptr}; auto g = specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); ASSERT_EQ(g->get_output_shape(0), (Shape{1, 2, 3})); - ASSERT_EQ(g->get_output_element_type(0), element::f32); + ASSERT_EQ(g->get_output_element_type(0), element::Type_t::f32); } // Test specialization of values to a shape-dynamic parameters. TEST(specialize_function, et_static_shape_rank_static_dynamic_subst_val) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape::dynamic(3)); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -128,12 +128,12 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_subst_val) std::vector param_vals{nullptr, p1_subst_vals.data()}; auto g = specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); ASSERT_EQ(g->get_output_shape(0), (Shape{1, 2, 3})); - ASSERT_EQ(g->get_output_element_type(0), element::f32); + ASSERT_EQ(g->get_output_element_type(0), element::Type_t::f32); auto plus_node = as_type_ptr(g->get_results().at(0)->input_value(0).get_node_shared_ptr()); @@ -143,7 +143,7 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_subst_val) auto const_node = as_type_ptr(convert_node->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(const_node); - ASSERT_EQ(const_node->get_output_element_type(0), element::i32); + ASSERT_EQ(const_node->get_output_element_type(0), element::Type_t::i32); ASSERT_EQ(const_node->get_output_shape(0), (Shape{1, 2, 3})); ASSERT_EQ(const_node->get_vector(), p1_subst_vals); } @@ -153,10 +153,10 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_subst_val) // (The input shapes we provide at specialization time are inconsistent.) TEST(specialize_function, et_static_shape_rank_dynamic_validation_fails) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic()); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic()); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape::dynamic()); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -166,7 +166,7 @@ TEST(specialize_function, et_static_shape_rank_dynamic_validation_fails) ASSERT_THROW( { specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3, 4}}, param_vals); }, @@ -178,10 +178,10 @@ TEST(specialize_function, et_static_shape_rank_dynamic_validation_fails) // (The input element types we provide at specialization time are inconsistent.) TEST(specialize_function, et_dynamic_shape_static_validation_fails) { - auto p0 = std::make_shared(element::dynamic, Shape{1, 2, 3}); - auto p1 = std::make_shared(element::dynamic, Shape{1, 2, 3}); + auto p0 = std::make_shared(element::Type_t::dynamic, Shape{1, 2, 3}); + auto p1 = std::make_shared(element::Type_t::dynamic, Shape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -191,7 +191,7 @@ TEST(specialize_function, et_dynamic_shape_static_validation_fails) ASSERT_THROW( { specialize_function(f, - {element::u32, element::i32}, + {element::Type_t::u32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); }, @@ -206,10 +206,10 @@ TEST(specialize_function, et_dynamic_shape_static_validation_fails) // reconstruct the graph.) TEST(specialize_function, et_static_shape_rank_static_dynamic_rank_mismatch) { - auto p0 = std::make_shared(element::f32, PartialShape::dynamic(3)); - auto p1 = std::make_shared(element::i32, PartialShape::dynamic(3)); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape::dynamic(3)); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -219,7 +219,7 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_rank_mismatch) ASSERT_THROW( { specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3, 4}}, param_vals); }, @@ -234,11 +234,11 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_rank_mismatch) // reconstruct the graph.) TEST(specialize_function, et_static_shape_rank_static_dynamic_dim_mismatch) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = - std::make_shared(element::i32, PartialShape{1, Dimension::dynamic(), 3}); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::Type_t::i32, + PartialShape{1, Dimension::dynamic(), 3}); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -248,7 +248,7 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_dim_mismatch) ASSERT_THROW( { specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 9, 4}}, param_vals); }, @@ -258,10 +258,10 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_dim_mismatch) // Test for failure when we supply the wrong number of replacement element types. TEST(specialize_function, et_count_wrong) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -271,7 +271,7 @@ TEST(specialize_function, et_count_wrong) ASSERT_THROW( { specialize_function(f, - {element::f32, element::i32, element::u32}, + {element::Type_t::f32, element::Type_t::i32, element::Type_t::u32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); }, @@ -281,10 +281,10 @@ TEST(specialize_function, et_count_wrong) // Test for failure when we supply the wrong number of replacement shapes. TEST(specialize_function, shape_count_wrong) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -295,7 +295,7 @@ TEST(specialize_function, shape_count_wrong) { specialize_function( f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}, PartialShape{4, 5, 6}}, param_vals); }, @@ -305,10 +305,10 @@ TEST(specialize_function, shape_count_wrong) // Test for failure when we supply the wrong number of replacement parameter values. TEST(specialize_function, value_count_wrong) { - auto p0 = std::make_shared(element::f32, PartialShape{1, 2, 3}); - auto p1 = std::make_shared(element::i32, PartialShape{1, 2, 3}); + auto p0 = std::make_shared(element::Type_t::f32, PartialShape{1, 2, 3}); + auto p1 = std::make_shared(element::Type_t::i32, PartialShape{1, 2, 3}); - auto k = std::make_shared(p1, element::f32); + auto k = std::make_shared(p1, element::Type_t::f32); auto a = p0 + k; auto f = std::make_shared(a, ParameterVector{p0, p1}); @@ -318,7 +318,7 @@ TEST(specialize_function, value_count_wrong) ASSERT_THROW( { specialize_function(f, - {element::f32, element::i32}, + {element::Type_t::f32, element::Type_t::i32}, {PartialShape{1, 2, 3}, PartialShape{1, 2, 3}}, param_vals); }, diff --git a/ngraph/test/tensor.cpp b/ngraph/test/tensor.cpp index 650e5c5ffad..0eab2f21e1d 100644 --- a/ngraph/test/tensor.cpp +++ b/ngraph/test/tensor.cpp @@ -39,7 +39,7 @@ TEST(tensor, size) pass_manager.register_pass(); { - auto arg0 = make_shared(element::f32, Shape{2, 3}); + auto arg0 = make_shared(element::Type_t::f32, Shape{2, 3}); auto add = make_shared(arg0, arg0); auto f0 = make_shared(add, ParameterVector{arg0}); @@ -51,7 +51,7 @@ TEST(tensor, size) } { - auto arg0 = make_shared(element::f32, Shape{}); + auto arg0 = make_shared(element::Type_t::f32, Shape{}); auto add = make_shared(arg0, arg0); auto f0 = make_shared(add, ParameterVector{arg0}); @@ -63,7 +63,7 @@ TEST(tensor, size) } { - auto arg0 = make_shared(element::f32, Shape{1}); + auto arg0 = make_shared(element::Type_t::f32, Shape{1}); auto add = make_shared(arg0, arg0); auto f0 = make_shared(add, ParameterVector{arg0}); @@ -80,7 +80,7 @@ TEST(tensor, output_flag) pass::Manager pass_manager; pass_manager.register_pass(); - auto arg0 = make_shared(element::f32, Shape{1}); + auto arg0 = make_shared(element::Type_t::f32, Shape{1}); auto add = make_shared(arg0, arg0); auto f0 = make_shared(add, ParameterVector{arg0}); diff --git a/ngraph/test/type_prop/assign.cpp b/ngraph/test/type_prop/assign.cpp index 3bffbd8a931..5e29020f72d 100644 --- a/ngraph/test/type_prop/assign.cpp +++ b/ngraph/test/type_prop/assign.cpp @@ -23,7 +23,7 @@ using namespace ngraph; TEST(type_prop, assign_variable_not_found) { - auto A = make_shared(element::f32, Shape{1, 2, 64, 64}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2, 64, 64}); try { auto space_to_depth = make_shared(A, "variable_id"); @@ -43,10 +43,10 @@ TEST(type_prop, assign_variable_not_found) TEST(type_prop, assign_deduce) { - auto input = make_shared(element::f32, Shape{1, 2, 64, 64}); + auto input = make_shared(element::Type_t::f32, Shape{1, 2, 64, 64}); auto read_value = make_shared(input, "variable_id"); auto assign = make_shared(read_value, "variable_id"); - ASSERT_EQ(assign->get_element_type(), element::f32); + ASSERT_EQ(assign->get_element_type(), element::Type_t::f32); ASSERT_EQ(assign->get_shape(), (Shape{1, 2, 64, 64})); } diff --git a/ngraph/test/type_prop/avg_pool.cpp b/ngraph/test/type_prop/avg_pool.cpp index a08c58a2139..1837f39c0f2 100644 --- a/ngraph/test/type_prop/avg_pool.cpp +++ b/ngraph/test/type_prop/avg_pool.cpp @@ -32,7 +32,7 @@ TEST(type_prop, avg_pool_auto_padding) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad); @@ -52,7 +52,7 @@ TEST(type_prop, avg_pool_auto_padding_nc_dims_dynamic_same_lower) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad); @@ -73,7 +73,7 @@ TEST(type_prop, avg_pool_auto_padding_nc_dims_dynamic_same_upper) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_UPPER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad); @@ -94,7 +94,7 @@ TEST(type_prop, avg_pool_auto_padding_spatial_dims_dynamic) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, exclude_pad, rounding_mode, auto_pad); @@ -106,12 +106,12 @@ TEST(type_prop, avg_pool_auto_padding_spatial_dims_dynamic) TEST(type_prop, avg_pool_1d_deduce) { - const auto param = make_shared(element::f32, Shape{64, 3, 100}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 100}); const Shape kernel{10}; const auto avg_pool = make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 91})); EXPECT_EQ(avg_pool->get_strides(), Strides{1}); @@ -122,13 +122,13 @@ TEST(type_prop, avg_pool_1d_deduce) TEST(type_prop, avg_pool_1d_deduce_strided) { - const auto param = make_shared(element::f32, Shape{64, 3, 100}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 100}); const Shape kernel{10}; const auto move_strides = Strides{2}; const auto avg_pool = make_shared( param, move_strides, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 46})); EXPECT_EQ(avg_pool->get_strides(), Strides{2}); @@ -139,13 +139,13 @@ TEST(type_prop, avg_pool_1d_deduce_strided) TEST(type_prop, avg_pool_1d_deduce_strided_small_uneven) { - const auto param = make_shared(element::f32, Shape{64, 3, 5}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 5}); const Shape kernel{2}; const auto move_strides = Strides{2}; const auto avg_pool = make_shared( param, move_strides, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 2})); EXPECT_EQ(avg_pool->get_strides(), Strides{2}); @@ -156,13 +156,13 @@ TEST(type_prop, avg_pool_1d_deduce_strided_small_uneven) TEST(type_prop, avg_pool_1d_deduce_strided_small_even) { - const auto param = make_shared(element::f32, Shape{64, 3, 6}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 6}); const Shape kernel{2}; const auto move_strides = Strides{2}; const auto avg_pool = make_shared( param, move_strides, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 3})); EXPECT_EQ(avg_pool->get_strides(), Strides{2}); @@ -173,12 +173,12 @@ TEST(type_prop, avg_pool_1d_deduce_strided_small_even) TEST(type_prop, avg_pool_2d_deduce) { - const auto param = make_shared(element::f32, Shape{64, 3, 100, 150}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); const Shape kernel{10, 20}; const auto avg_pool = make_shared( param, Strides{1, 1}, Shape{0, 0}, Shape{0, 0}, kernel, true, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 91, 131})); EXPECT_EQ(avg_pool->get_strides(), (Strides{1, 1})); @@ -189,13 +189,13 @@ TEST(type_prop, avg_pool_2d_deduce) TEST(type_prop, avg_pool_2d_deduce_strided) { - const auto param = make_shared(element::f32, Shape{64, 3, 100, 150}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); const Shape kernel{10, 20}; const auto move_strides = Strides{2, 3}; const auto avg_pool = make_shared( param, move_strides, Shape{0, 0}, Shape{0, 0}, kernel, true, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 46, 44})); EXPECT_EQ(avg_pool->get_strides(), (Strides{2, 3})); @@ -206,13 +206,13 @@ TEST(type_prop, avg_pool_2d_deduce_strided) TEST(type_prop, avg_pool_3d_deduce_strided_small) { - const auto param = make_shared(element::f32, Shape{64, 3, 7, 8, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 7, 8, 10}); const Shape kernel{2, 3, 2}; const auto move_strides = Strides{2, 3, 4}; const auto avg_pool = make_shared( param, move_strides, Shape{0, 0, 0}, Shape{0, 0, 0}, kernel, true, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 3, 2, 3})); EXPECT_EQ(avg_pool->get_strides(), (Strides{2, 3, 4})); @@ -223,7 +223,7 @@ TEST(type_prop, avg_pool_3d_deduce_strided_small) TEST(type_prop, avg_pool_3d_deduce_strided_padded_small) { - const auto param = make_shared(element::f32, Shape{64, 3, 7, 8, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{64, 3, 7, 8, 10}); const Shape kernel{2, 3, 2}; const auto move_strides = Strides{2, 3, 4}; const Shape pads_begin{5, 6, 4}; @@ -231,7 +231,7 @@ TEST(type_prop, avg_pool_3d_deduce_strided_padded_small) const auto avg_pool = make_shared( param, move_strides, pads_begin, pads_end, kernel, false, op::RoundingType::FLOOR); - EXPECT_EQ(avg_pool->get_output_element_type(0), element::f32); + EXPECT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(avg_pool->get_output_shape(0), (Shape{64, 3, 9, 6, 5})); EXPECT_EQ(avg_pool->get_strides(), (Strides{2, 3, 4})); @@ -242,7 +242,7 @@ TEST(type_prop, avg_pool_3d_deduce_strided_padded_small) TEST(type_prop, avg_pool_invalid_0d_input) { - const auto param = make_shared(element::f32, Shape{}); + const auto param = make_shared(element::Type_t::f32, Shape{}); const Shape kernel{}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -251,7 +251,7 @@ TEST(type_prop, avg_pool_invalid_0d_input) TEST(type_prop, avg_pool_invalid_1d_input) { - const auto param = make_shared(element::f32, Shape{2}); + const auto param = make_shared(element::Type_t::f32, Shape{2}); const Shape kernel{}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -260,7 +260,7 @@ TEST(type_prop, avg_pool_invalid_1d_input) TEST(type_prop, avg_pool_invalid_2d_input) { - const auto param = make_shared(element::f32, Shape{2, 6}); + const auto param = make_shared(element::Type_t::f32, Shape{2, 6}); const Shape kernel{}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -269,7 +269,7 @@ TEST(type_prop, avg_pool_invalid_2d_input) TEST(type_prop, avg_pool_invalid_0_batch_size) { - const auto param = make_shared(element::f32, Shape{0, 6, 1}); + const auto param = make_shared(element::Type_t::f32, Shape{0, 6, 1}); const Shape kernel{1}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -278,7 +278,7 @@ TEST(type_prop, avg_pool_invalid_0_batch_size) TEST(type_prop, avg_pool_invalid_0_channels) { - const auto param = make_shared(element::f32, Shape{6, 0, 1}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 0, 1}); const Shape kernel{1}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -287,7 +287,7 @@ TEST(type_prop, avg_pool_invalid_0_channels) TEST(type_prop, avg_pool_invalid_wrong_number_of_window_dimensions_too_many) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); const Shape kernel{3, 3, 3}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -296,7 +296,7 @@ TEST(type_prop, avg_pool_invalid_wrong_number_of_window_dimensions_too_many) TEST(type_prop, avg_pool_invalid_wrong_number_of_window_dimensions_too_few) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); const Shape kernel{3}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -305,7 +305,7 @@ TEST(type_prop, avg_pool_invalid_wrong_number_of_window_dimensions_too_few) TEST(type_prop, avg_pool_invalid_movement_stride_rank) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); const Shape kernel{3, 3}; const auto move_strides = Strides{2, 3, 8}; EXPECT_THROW(make_shared( @@ -315,7 +315,7 @@ TEST(type_prop, avg_pool_invalid_movement_stride_rank) TEST(type_prop, avg_pool_invalid_padding_below_rank) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); const Shape kernel{3, 3}; const auto move_strides = Strides{2, 3}; const Shape pads_begin{1, 2, 3}; @@ -328,7 +328,7 @@ TEST(type_prop, avg_pool_invalid_padding_below_rank) TEST(type_prop, avg_pool_invalid_padding_above_rank) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); const Shape kernel{3, 3}; const auto move_strides = Strides{2, 3}; const Shape pads_begin{1, 2}; @@ -341,7 +341,7 @@ TEST(type_prop, avg_pool_invalid_padding_above_rank) TEST(type_prop, avg_pool_invalid_input_item_size_0) { - const auto param = make_shared(element::f32, Shape{6, 2, 0, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 0, 10}); const Shape kernel{3, 3}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -350,7 +350,7 @@ TEST(type_prop, avg_pool_invalid_input_item_size_0) TEST(type_prop, avg_pool_invalid_window_size_0) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); const Shape kernel{3, 0}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -359,7 +359,7 @@ TEST(type_prop, avg_pool_invalid_window_size_0) TEST(type_prop, avg_pool_invalid_dilated_too_large) { - const auto param = make_shared(element::f32, Shape{6, 2, 8, 8}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 8, 8}); const Shape kernel{9, 9}; EXPECT_THROW(make_shared( param, Strides{1}, Shape{}, Shape{}, kernel, true, op::RoundingType::FLOOR), @@ -368,7 +368,7 @@ TEST(type_prop, avg_pool_invalid_dilated_too_large) TEST(type_prop, avg_pool_larger_than_pre_padding_but_fits_in_post_padding) { - const auto param = make_shared(element::f32, Shape{6, 2, 8, 8}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 8, 8}); const Shape kernel{9, 9}; const Strides window_strides{1, 1}; const Shape pads_begin{0, 0}; @@ -376,13 +376,13 @@ TEST(type_prop, avg_pool_larger_than_pre_padding_but_fits_in_post_padding) const auto avg_pool = make_shared( param, window_strides, pads_begin, pads_end, kernel, true, op::RoundingType::FLOOR); - ASSERT_EQ(avg_pool->get_output_element_type(0), element::f32); + ASSERT_EQ(avg_pool->get_output_element_type(0), element::Type_t::f32); ASSERT_EQ(avg_pool->get_output_shape(0), (Shape{6, 2, 1, 1})); } TEST(type_prop, avg_pool_invalid_movement_stride_0) { - const auto param = make_shared(element::f32, Shape{6, 2, 10, 10}); + const auto param = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); const Shape kernel{3, 3}; const auto move_strides = Strides{0, 1}; EXPECT_THROW(make_shared( @@ -398,7 +398,7 @@ TEST(type_prop, avg_pool_partial_rank_dynamic_ok) const Shape pads_begin{0, 0, 0, 0}; const Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); auto ap = make_shared(param, window_movement_strides, pads_begin, @@ -407,7 +407,7 @@ TEST(type_prop, avg_pool_partial_rank_dynamic_ok) false, op::RoundingType::FLOOR); - ASSERT_EQ(ap->get_output_element_type(0), element::f32); + ASSERT_EQ(ap->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(ap->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(6))); } @@ -419,7 +419,7 @@ TEST(type_prop, avg_pool_partial_rank_dynamic_attrib_rank_mismatch) const Shape pads_begin{0, 0, 0, 0}; const Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); EXPECT_THROW(make_shared(param, window_movement_strides, @@ -439,7 +439,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_ok) const Shape pads_begin{0, 0, 0, 0}; const Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); auto ap = make_shared(param, window_movement_strides, pads_begin, @@ -448,7 +448,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_ok) false, op::RoundingType::FLOOR); - ASSERT_EQ(ap->get_output_element_type(0), element::f32); + ASSERT_EQ(ap->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(ap->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(6))); } @@ -460,7 +460,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_some_dims_known_ok) const Shape pads_begin{0, 0, 0, 0}; const Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); auto ap = make_shared(param, window_movement_strides, pads_begin, @@ -469,7 +469,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_some_dims_known_ok) false, op::RoundingType::FLOOR); - ASSERT_EQ(ap->get_output_element_type(0), element::f32); + ASSERT_EQ(ap->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(ap->get_output_partial_shape(0).same_scheme( PartialShape{5, Dimension::dynamic(), 7, Dimension::dynamic(), 1, 3})); } @@ -482,7 +482,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_attrib_rank_mismatch) const Shape pads_begin{0, 0, 0, 0}; const Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); EXPECT_THROW(make_shared(param, window_movement_strides, @@ -502,7 +502,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_window_not_too_big) const Shape pads_begin{0, 0, 0, 0}; const Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); EXPECT_THROW(make_shared(param, window_movement_strides, @@ -522,7 +522,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_padded_window_not_too_big) const Shape pads_begin{0, 0, 0, 0}; const Shape pads_end{1, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); auto ap = make_shared(param, window_movement_strides, pads_begin, @@ -531,7 +531,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_padded_window_not_too_big) true, op::RoundingType::FLOOR); - ASSERT_EQ(ap->get_output_element_type(0), element::f32); + ASSERT_EQ(ap->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(ap->get_output_partial_shape(0).same_scheme( PartialShape{5, Dimension::dynamic(), 1, Dimension::dynamic(), 1, 3})); } @@ -544,7 +544,7 @@ TEST(type_prop, avg_pool_partial_rank_static_dynamic_window_in_padding) const Shape pads_begin{0, 0, 0, 4}; const Shape pads_end{0, 0, 0, 0}; - const auto param = make_shared(element::f32, arg_shape); + const auto param = make_shared(element::Type_t::f32, arg_shape); EXPECT_THROW(make_shared(param, window_movement_strides, diff --git a/ngraph/test/type_prop/batch_norm.cpp b/ngraph/test/type_prop/batch_norm.cpp index 0ab600a8a51..61eb0b2349f 100644 --- a/ngraph/test/type_prop/batch_norm.cpp +++ b/ngraph/test/type_prop/batch_norm.cpp @@ -29,11 +29,11 @@ TEST(type_prop, batch_norm_inference_partial_all_rank_dynamic) PartialShape mean_shape{PartialShape::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -58,11 +58,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_ok) PartialShape mean_shape{PartialShape::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -88,11 +88,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_zero_chan PartialShape mean_shape{PartialShape::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -124,11 +124,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_dynamic_some_rank_static PartialShape mean_shape{Dimension::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -152,11 +152,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_dynamic_some_rank_static PartialShape mean_shape{Dimension::dynamic(), Dimension::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -191,11 +191,11 @@ TEST(type_prop, PartialShape mean_shape{Dimension::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -229,11 +229,11 @@ TEST(type_prop, PartialShape mean_shape{4}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -266,11 +266,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_some_stat PartialShape mean_shape{3}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -296,11 +296,11 @@ TEST(type_prop, PartialShape mean_shape{3}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -334,11 +334,11 @@ TEST(type_prop, batch_norm_inference_partial_all_rank_dynamic_v5) PartialShape mean_shape{PartialShape::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -363,11 +363,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_ok_v5) PartialShape mean_shape{PartialShape::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -393,11 +393,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_zero_chan PartialShape mean_shape{PartialShape::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -429,11 +429,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_dynamic_some_rank_static PartialShape mean_shape{Dimension::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -458,11 +458,11 @@ TEST(type_prop, PartialShape mean_shape{Dimension::dynamic(), Dimension::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -497,11 +497,11 @@ TEST(type_prop, PartialShape mean_shape{Dimension::dynamic()}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -535,11 +535,11 @@ TEST(type_prop, PartialShape mean_shape{4}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -572,11 +572,11 @@ TEST(type_prop, batch_norm_inference_partial_input_rank_static_dynamic_some_stat PartialShape mean_shape{3}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); @@ -603,11 +603,11 @@ TEST( PartialShape mean_shape{3}; PartialShape variance_shape{PartialShape::dynamic()}; double epsilon = 0.001; - element::Type data_batch_et = element::f32; - element::Type gamma_et = element::f32; - element::Type beta_et = element::f32; - element::Type mean_et = element::f32; - element::Type variance_et = element::f32; + element::Type data_batch_et = element::Type_t::f32; + element::Type gamma_et = element::Type_t::f32; + element::Type beta_et = element::Type_t::f32; + element::Type mean_et = element::Type_t::f32; + element::Type variance_et = element::Type_t::f32; auto data_batch = make_shared(data_batch_et, data_batch_shape); auto gamma = make_shared(gamma_et, gamma_shape); diff --git a/ngraph/test/type_prop/batch_to_space.cpp b/ngraph/test/type_prop/batch_to_space.cpp index dfc9a1eef2a..ab6f8fc7c0b 100644 --- a/ngraph/test/type_prop/batch_to_space.cpp +++ b/ngraph/test/type_prop/batch_to_space.cpp @@ -23,70 +23,75 @@ using namespace ngraph; TEST(type_prop, batch_to_space_output_shape_2D) { - auto data = make_shared(element::f32, Shape{10, 26}); - auto block_shape = make_shared(element::i64, Shape{2}, vector{1, 5}); - auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); - auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); + auto data = make_shared(element::Type_t::f32, Shape{10, 26}); + auto block_shape = + make_shared(element::Type_t::i64, Shape{2}, vector{1, 5}); + auto pads_begin = + make_shared(element::Type_t::i64, Shape{2}, vector{0, 2}); + auto pads_end = + make_shared(element::Type_t::i64, Shape{2}, vector{0, 0}); auto batch_to_space = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); + ASSERT_EQ(batch_to_space->get_element_type(), element::Type_t::f32); ASSERT_EQ(batch_to_space->get_shape(), (Shape{10 / 5, 26 * 5 - 2})); } TEST(type_prop, batch_to_space_output_shape_4D) { - auto data = make_shared(element::f32, Shape{100, 7, 13, 3}); + auto data = make_shared(element::Type_t::f32, Shape{100, 7, 13, 3}); auto block_shape = - make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); + make_shared(element::Type_t::i64, Shape{4}, vector{1, 10, 5, 1}); auto pads_begin = - make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); + make_shared(element::Type_t::i64, Shape{4}, vector{0, 3, 1, 0}); + auto pads_end = + make_shared(element::Type_t::i64, Shape{4}, vector{0, 3, 0, 0}); auto batch_to_space = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); + ASSERT_EQ(batch_to_space->get_element_type(), element::Type_t::f32); ASSERT_EQ(batch_to_space->get_shape(), (Shape{100 / (10 * 5), 7 * 10 - 3 - 3, 13 * 5 - 1, 3})); } TEST(type_prop, batch_to_space_output_shape_5D) { - auto data = make_shared(element::f32, Shape{960, 6, 13, 128, 16}); + auto data = make_shared(element::Type_t::f32, Shape{960, 6, 13, 128, 16}); auto block_shape = - make_shared(element::i32, Shape{5}, vector{1, 6, 5, 1, 16}); + make_shared(element::Type_t::i32, Shape{5}, vector{1, 6, 5, 1, 16}); auto pads_begin = - make_shared(element::i32, Shape{5}, vector{0, 2, 0, 0, 0}); + make_shared(element::Type_t::i32, Shape{5}, vector{0, 2, 0, 0, 0}); auto pads_end = - make_shared(element::i32, Shape{5}, vector{0, 2, 1, 0, 0}); + make_shared(element::Type_t::i32, Shape{5}, vector{0, 2, 1, 0, 0}); auto batch_to_space = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); + ASSERT_EQ(batch_to_space->get_element_type(), element::Type_t::f32); ASSERT_EQ(batch_to_space->get_shape(), (Shape{960 / (6 * 5 * 16), 6 * 6 - 2 - 2, 13 * 5 - 1, 128, 16 * 16})); } TEST(type_prop, batch_to_space_and_space_to_batch) { - auto data = make_shared(element::f32, Shape{4800, 9, 11, 2}); + auto data = make_shared(element::Type_t::f32, Shape{4800, 9, 11, 2}); auto block_shape = - make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); + make_shared(element::Type_t::i64, Shape{4}, vector{1, 12, 100, 2}); auto pads_begin = - make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); + make_shared(element::Type_t::i64, Shape{4}, vector{0, 3, 38, 1}); + auto pads_end = + make_shared(element::Type_t::i64, Shape{4}, vector{0, 5, 38, 0}); auto batch_to_space = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); + ASSERT_EQ(batch_to_space->get_element_type(), element::Type_t::f32); ASSERT_EQ(batch_to_space->get_shape(), (Shape{4800 / (12 * 100 * 2), 9 * 12 - 3 - 5, 11 * 100 - 38 - 38, 2 * 2 - 1})); auto space_to_batch = make_shared(batch_to_space, block_shape, pads_begin, pads_end); - ASSERT_EQ(space_to_batch->get_element_type(), element::f32); + ASSERT_EQ(space_to_batch->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_batch->get_shape(), (Shape{4800, 9, 11, 2})); } diff --git a/ngraph/test/type_prop/binary_convolution.cpp b/ngraph/test/type_prop/binary_convolution.cpp index 2c62adff237..498a0f88a20 100644 --- a/ngraph/test/type_prop/binary_convolution.cpp +++ b/ngraph/test/type_prop/binary_convolution.cpp @@ -33,8 +33,8 @@ TEST(type_prop, binary_conv_v1_partial_auto_padding_same) const float pad_value = 1.0f; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad); @@ -56,8 +56,8 @@ TEST(type_prop, binary_conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_lo const float pad_value = 1.0f; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad); @@ -79,8 +79,8 @@ TEST(type_prop, binary_conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_up const float pad_value = 1.0f; const auto auto_pad = op::PadType::SAME_UPPER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad); @@ -102,8 +102,8 @@ TEST(type_prop, binary_conv_v1_partial_auto_padding_same_spatial_dims_dynamic) const float pad_value = 1.0f; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, mode, pad_value, auto_pad); diff --git a/ngraph/test/type_prop/binary_elementwise.cpp b/ngraph/test/type_prop/binary_elementwise.cpp index 4a77a3bbbaf..a3eba00c806 100644 --- a/ngraph/test/type_prop/binary_elementwise.cpp +++ b/ngraph/test/type_prop/binary_elementwise.cpp @@ -30,10 +30,10 @@ void test_binary(std::string /* node_type */, shared_ptr(f)(const shared_ptr& x, const shared_ptr& y)) { // Check for bad arguments - auto tv0_2_4_param_0 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::i32, Shape{2, 4}); - auto tv0_4_2_param = make_shared(element::f32, Shape{4, 2}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::i32, Shape{2, 4}); + auto tv0_4_2_param = make_shared(element::Type_t::f32, Shape{4, 2}); auto test_binary_bad_arguments_view_shapes = [&](const shared_ptr& x, const shared_ptr& y) { @@ -121,11 +121,11 @@ void test_binary_logical(std::string /* node_type */, shared_ptr(f)(const shared_ptr& x, const shared_ptr& y)) { // Check for bad arguments - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::i32, Shape{2, 4}); - auto tv0_2_4_param_3 = make_shared(element::i32, Shape{2, 4}); - auto tv0_4_2_param = make_shared(element::boolean, Shape{4, 2}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::i32, Shape{2, 4}); + auto tv0_2_4_param_3 = make_shared(element::Type_t::i32, Shape{2, 4}); + auto tv0_4_2_param = make_shared(element::Type_t::boolean, Shape{4, 2}); auto test_binary_bad_arguments_view_shapes = [&](const shared_ptr& x, const shared_ptr& y) { @@ -229,36 +229,37 @@ void test_binary_eltwise_numpy(const element::Type& et, const op::AutoBroadcastS TEST(type_prop, eltwise_auto_bcast) { - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::boolean, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::f32, op::AutoBroadcastType::NUMPY); - test_binary_eltwise_numpy(element::boolean, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::boolean, + op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::f32, op::AutoBroadcastType::NUMPY); + test_binary_eltwise_numpy(element::Type_t::boolean, op::AutoBroadcastType::NUMPY); } TEST(type_prop, comparison_good) { - auto tv0_2_4_param_0 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); auto eq = make_shared(tv0_2_4_param_0, tv0_2_4_param_1); - EXPECT_EQ(eq->get_element_type(), element::boolean); + EXPECT_EQ(eq->get_element_type(), element::Type_t::boolean); EXPECT_EQ(eq->get_shape(), (Shape{2, 4})); } TEST(type_prop, binary_arithmetic_bad_argument_element_types) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::boolean, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::boolean, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1); @@ -278,8 +279,8 @@ TEST(type_prop, binary_arithmetic_bad_argument_element_types) TEST(type_prop, binary_elementwise_arithmetic_both_dynamic) { - auto a = make_shared(element::f32, PartialShape::dynamic()); - auto b = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto b = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).rank().is_dynamic()); @@ -287,8 +288,8 @@ TEST(type_prop, binary_elementwise_arithmetic_both_dynamic) TEST(type_prop, binary_elementwise_arithmetic_left_rank_dynamic_right_static) { - auto a = make_shared(element::f32, PartialShape::dynamic()); - auto b = make_shared(element::f32, Shape{1, 2, 3}); + auto a = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto b = make_shared(element::Type_t::f32, Shape{1, 2, 3}); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).is_static()); @@ -297,8 +298,8 @@ TEST(type_prop, binary_elementwise_arithmetic_left_rank_dynamic_right_static) TEST(type_prop, binary_elementwise_arithmetic_left_static_right_rank_dynamic) { - auto a = make_shared(element::f32, Shape{1, 2, 3}); - auto b = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto b = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).is_static()); @@ -307,8 +308,9 @@ TEST(type_prop, binary_elementwise_arithmetic_left_static_right_rank_dynamic) TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_right_rank_dynamic) { - auto a = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 3}); - auto b = make_shared(element::f32, PartialShape::dynamic()); + auto a = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 3}); + auto b = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).rank().is_static()); @@ -319,8 +321,9 @@ TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_right_ran TEST(type_prop, binary_elementwise_arithmetic_left_rank_dynamic_right_rank_static_dynamic) { - auto a = make_shared(element::f32, PartialShape::dynamic()); - auto b = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 3}); + auto a = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 3}); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).rank().is_static()); @@ -332,8 +335,10 @@ TEST(type_prop, binary_elementwise_arithmetic_left_rank_dynamic_right_rank_stati TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_right_rank_static_dynamic_result_static) { - auto a = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 3}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 3}); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).is_static()); @@ -345,8 +350,9 @@ TEST( binary_elementwise_arithmetic_left_rank_static_dynamic_right_rank_static_dynamic_result_rank_static_dynamic) { auto a = make_shared( - element::f32, PartialShape{1, Dimension::dynamic(), Dimension::dynamic()}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + element::Type_t::f32, PartialShape{1, Dimension::dynamic(), Dimension::dynamic()}); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).rank().is_static()); @@ -357,8 +363,9 @@ TEST( TEST(type_prop, binary_elementwise_arithmetic_left_static_right_rank_static_dynamic) { - auto a = make_shared(element::f32, PartialShape{1, 2, 3}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = make_shared(element::Type_t::f32, PartialShape{1, 2, 3}); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).is_static()); @@ -367,8 +374,9 @@ TEST(type_prop, binary_elementwise_arithmetic_left_static_right_rank_static_dyna TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_right_static) { - auto a = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); - auto b = make_shared(element::f32, PartialShape{1, 2, 3}); + auto a = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto b = make_shared(element::Type_t::f32, PartialShape{1, 2, 3}); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_partial_shape(0).is_static()); @@ -377,8 +385,9 @@ TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_right_sta TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_inconsistent) { - auto a = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); - auto b = make_shared(element::f32, PartialShape{1, 3, 3}); + auto a = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto b = make_shared(element::Type_t::f32, PartialShape{1, 3, 3}); try { @@ -397,8 +406,9 @@ TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_inconsist TEST(type_prop, binary_elementwise_arithmetic_right_rank_static_dynamic_inconsistent) { - auto a = make_shared(element::f32, PartialShape{1, 3, 3}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = make_shared(element::Type_t::f32, PartialShape{1, 3, 3}); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); try { @@ -417,8 +427,10 @@ TEST(type_prop, binary_elementwise_arithmetic_right_rank_static_dynamic_inconsis TEST(type_prop, binary_elementwise_arithmetic_both_rank_static_dynamic_inconsistent) { - auto a = make_shared(element::f32, PartialShape{Dimension::dynamic(), 3, 3}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), 3, 3}); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); try { @@ -437,8 +449,9 @@ TEST(type_prop, binary_elementwise_arithmetic_both_rank_static_dynamic_inconsist TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_different_rank) { - auto a = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); - auto b = make_shared(element::f32, PartialShape{1, 2, 3, 4}); + auto a = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto b = make_shared(element::Type_t::f32, PartialShape{1, 2, 3, 4}); try { @@ -457,8 +470,9 @@ TEST(type_prop, binary_elementwise_arithmetic_left_rank_static_dynamic_different TEST(type_prop, binary_elementwise_arithmetic_right_rank_static_dynamic_different_rank) { - auto a = make_shared(element::f32, PartialShape{1, 2, 3, 4}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = make_shared(element::Type_t::f32, PartialShape{1, 2, 3, 4}); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); try { @@ -477,8 +491,10 @@ TEST(type_prop, binary_elementwise_arithmetic_right_rank_static_dynamic_differen TEST(type_prop, binary_elementwise_arithmetic_both_rank_static_dynamic_different_rank) { - auto a = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 3, 4}); - auto b = make_shared(element::f32, PartialShape{1, 2, Dimension::dynamic()}); + auto a = make_shared(element::Type_t::f32, + PartialShape{1, Dimension::dynamic(), 3, 4}); + auto b = + make_shared(element::Type_t::f32, PartialShape{1, 2, Dimension::dynamic()}); try { @@ -497,8 +513,8 @@ TEST(type_prop, binary_elementwise_arithmetic_both_rank_static_dynamic_different TEST(type_prop, binary_elementwise_arithmetic_both_et_dynamic) { - auto a = make_shared(element::dynamic, Shape{1, 2, 3, 4}); - auto b = make_shared(element::dynamic, Shape{1, 2, 3, 4}); + auto a = make_shared(element::Type_t::dynamic, Shape{1, 2, 3, 4}); + auto b = make_shared(element::Type_t::dynamic, Shape{1, 2, 3, 4}); auto add = make_shared(a, b); ASSERT_TRUE(add->get_output_element_type(0).is_dynamic()); @@ -506,20 +522,20 @@ TEST(type_prop, binary_elementwise_arithmetic_both_et_dynamic) TEST(type_prop, binary_elementwise_arithmetic_left_et_dynamic) { - auto a = make_shared(element::dynamic, Shape{1, 2, 3, 4}); - auto b = make_shared(element::u32, Shape{1, 2, 3, 4}); + auto a = make_shared(element::Type_t::dynamic, Shape{1, 2, 3, 4}); + auto b = make_shared(element::Type_t::u32, Shape{1, 2, 3, 4}); auto add = make_shared(a, b); - ASSERT_EQ(add->get_output_element_type(0), element::u32); + ASSERT_EQ(add->get_output_element_type(0), element::Type_t::u32); } TEST(type_prop, binary_elementwise_arithmetic_right_et_dynamic) { - auto a = make_shared(element::i64, Shape{1, 2, 3, 4}); - auto b = make_shared(element::dynamic, Shape{1, 2, 3, 4}); + auto a = make_shared(element::Type_t::i64, Shape{1, 2, 3, 4}); + auto b = make_shared(element::Type_t::dynamic, Shape{1, 2, 3, 4}); auto add = make_shared(a, b); - ASSERT_EQ(add->get_output_element_type(0), element::i64); + ASSERT_EQ(add->get_output_element_type(0), element::Type_t::i64); } TEST(type_prop, logic_arith_compare_partial_et) @@ -552,15 +568,19 @@ TEST(type_prop, logic_arith_compare_partial_et) // dyn int -> int // dyn boo -> ! // dyn dyn -> dyn - ASSERT_EQ(test_arith(element::i32, element::i32)->get_element_type(), element::i32); - ASSERT_ANY_THROW({ test_arith(element::i32, element::boolean); }); - ASSERT_EQ(test_arith(element::i32, element::dynamic)->get_element_type(), element::i32); - ASSERT_ANY_THROW({ test_arith(element::boolean, element::i32); }); - ASSERT_ANY_THROW({ test_arith(element::boolean, element::boolean); }); - ASSERT_ANY_THROW({ test_arith(element::boolean, element::dynamic); }); - ASSERT_EQ(test_arith(element::dynamic, element::i32)->get_element_type(), element::i32); - ASSERT_ANY_THROW({ test_arith(element::dynamic, element::boolean); }); - ASSERT_EQ(test_arith(element::dynamic, element::dynamic)->get_element_type(), element::dynamic); + ASSERT_EQ(test_arith(element::Type_t::i32, element::Type_t::i32)->get_element_type(), + element::Type_t::i32); + ASSERT_ANY_THROW({ test_arith(element::Type_t::i32, element::Type_t::boolean); }); + ASSERT_EQ(test_arith(element::Type_t::i32, element::Type_t::dynamic)->get_element_type(), + element::Type_t::i32); + ASSERT_ANY_THROW({ test_arith(element::Type_t::boolean, element::Type_t::i32); }); + ASSERT_ANY_THROW({ test_arith(element::Type_t::boolean, element::Type_t::boolean); }); + ASSERT_ANY_THROW({ test_arith(element::Type_t::boolean, element::Type_t::dynamic); }); + ASSERT_EQ(test_arith(element::Type_t::dynamic, element::Type_t::i32)->get_element_type(), + element::Type_t::i32); + ASSERT_ANY_THROW({ test_arith(element::Type_t::dynamic, element::Type_t::boolean); }); + ASSERT_EQ(test_arith(element::Type_t::dynamic, element::Type_t::dynamic)->get_element_type(), + element::Type_t::dynamic); // Comparison ops: // @@ -573,19 +593,22 @@ TEST(type_prop, logic_arith_compare_partial_et) // dyn int -> boo // dyn boo -> boo // dyn dyn -> boo - ASSERT_EQ(test_compare(element::i32, element::i32)->get_element_type(), element::boolean); - ASSERT_ANY_THROW({ test_compare(element::i32, element::boolean); }); - ASSERT_EQ(test_compare(element::i32, element::dynamic)->get_element_type(), element::boolean); - ASSERT_ANY_THROW({ test_compare(element::boolean, element::i32); }); - ASSERT_EQ(test_compare(element::boolean, element::boolean)->get_element_type(), - element::boolean); - ASSERT_EQ(test_compare(element::boolean, element::dynamic)->get_element_type(), - element::boolean); - ASSERT_EQ(test_compare(element::dynamic, element::i32)->get_element_type(), element::boolean); - ASSERT_EQ(test_compare(element::dynamic, element::boolean)->get_element_type(), - element::boolean); - ASSERT_EQ(test_compare(element::dynamic, element::dynamic)->get_element_type(), - element::boolean); + ASSERT_EQ(test_compare(element::Type_t::i32, element::Type_t::i32)->get_element_type(), + element::Type_t::boolean); + ASSERT_ANY_THROW({ test_compare(element::Type_t::i32, element::Type_t::boolean); }); + ASSERT_EQ(test_compare(element::Type_t::i32, element::Type_t::dynamic)->get_element_type(), + element::Type_t::boolean); + ASSERT_ANY_THROW({ test_compare(element::Type_t::boolean, element::Type_t::i32); }); + ASSERT_EQ(test_compare(element::Type_t::boolean, element::Type_t::boolean)->get_element_type(), + element::Type_t::boolean); + ASSERT_EQ(test_compare(element::Type_t::boolean, element::Type_t::dynamic)->get_element_type(), + element::Type_t::boolean); + ASSERT_EQ(test_compare(element::Type_t::dynamic, element::Type_t::i32)->get_element_type(), + element::Type_t::boolean); + ASSERT_EQ(test_compare(element::Type_t::dynamic, element::Type_t::boolean)->get_element_type(), + element::Type_t::boolean); + ASSERT_EQ(test_compare(element::Type_t::dynamic, element::Type_t::dynamic)->get_element_type(), + element::Type_t::boolean); // Logical negation op: // @@ -598,7 +621,9 @@ TEST(type_prop, logic_arith_compare_partial_et) // int -> ! // boo -> boo // dyn -> boo - ASSERT_EQ(test_logical_not(element::i32)->get_element_type(), element::i32); - ASSERT_EQ(test_logical_not(element::boolean)->get_element_type(), element::boolean); - ASSERT_EQ(test_logical_not(element::dynamic)->get_element_type(), element::dynamic); + ASSERT_EQ(test_logical_not(element::Type_t::i32)->get_element_type(), element::Type_t::i32); + ASSERT_EQ(test_logical_not(element::Type_t::boolean)->get_element_type(), + element::Type_t::boolean); + ASSERT_EQ(test_logical_not(element::Type_t::dynamic)->get_element_type(), + element::Type_t::dynamic); } diff --git a/ngraph/test/type_prop/broadcast.cpp b/ngraph/test/type_prop/broadcast.cpp index dc4d89f13a6..12ed855445e 100644 --- a/ngraph/test/type_prop/broadcast.cpp +++ b/ngraph/test/type_prop/broadcast.cpp @@ -32,39 +32,43 @@ TYPED_TEST_CASE_P(BroadcastTests); TYPED_TEST_P(BroadcastTests, broadcast_numpy) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 6}); auto bc = make_shared(param, target_shape); - ASSERT_EQ(bc->get_element_type(), element::f32); + ASSERT_EQ(bc->get_element_type(), element::Type_t::f32); ASSERT_EQ(bc->get_shape(), (Shape{2, 3, 6})); } TYPED_TEST_P(BroadcastTests, broadcast_axes_mapping) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 1}); - auto axes_mapping = op::Constant::create(element::i64, Shape{2}, {1, 2}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 1}); + auto axes_mapping = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); auto bc = make_shared(param, target_shape, axes_mapping); - ASSERT_EQ(bc->get_element_type(), element::f32); + ASSERT_EQ(bc->get_element_type(), element::Type_t::f32); ASSERT_EQ(bc->get_shape(), (Shape{2, 3, 1})); } TYPED_TEST_P(BroadcastTests, broadcast_target_shape_as_concat_with_constants) { - auto param = make_shared(element::f32, Shape{16}); - auto target_shape_constant_1 = op::Constant::create(element::i64, Shape{1}, {1}); - auto target_shape_constant_2 = op::Constant::create(element::i64, Shape{1}, {16}); - auto target_shape_constant_3 = op::Constant::create(element::i64, Shape{1}, {50}); - auto target_shape_constant_4 = op::Constant::create(element::i64, Shape{1}, {50}); + auto param = make_shared(element::Type_t::f32, Shape{16}); + auto target_shape_constant_1 = + op::Constant::create(element::Type_t::i64, Shape{1}, {1}); + auto target_shape_constant_2 = + op::Constant::create(element::Type_t::i64, Shape{1}, {16}); + auto target_shape_constant_3 = + op::Constant::create(element::Type_t::i64, Shape{1}, {50}); + auto target_shape_constant_4 = + op::Constant::create(element::Type_t::i64, Shape{1}, {50}); std::int64_t axis = 0; std::vector> args{target_shape_constant_1, target_shape_constant_2, target_shape_constant_3, target_shape_constant_4}; auto target_shape = make_shared(args, axis); - auto axes_mapping = op::Constant::create(element::i64, Shape{1}, {1}); + auto axes_mapping = op::Constant::create(element::Type_t::i64, Shape{1}, {1}); auto bc = make_shared(param, target_shape, axes_mapping, "NONE"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().same_scheme(Rank{4})); @@ -74,18 +78,21 @@ TYPED_TEST_P(BroadcastTests, broadcast_target_shape_as_concat_with_constants) TYPED_TEST_P(BroadcastTests, broadcast_target_shape_as_concat_with_node) { - auto param = make_shared(element::f32, Shape{16}); - auto target_shape_constant_1 = make_shared(element::i64, Shape{1}); - auto target_shape_constant_2 = op::Constant::create(element::i64, Shape{1}, {16}); - auto target_shape_constant_3 = op::Constant::create(element::i64, Shape{1}, {50}); - auto target_shape_constant_4 = op::Constant::create(element::i64, Shape{1}, {50}); + auto param = make_shared(element::Type_t::f32, Shape{16}); + auto target_shape_constant_1 = make_shared(element::Type_t::i64, Shape{1}); + auto target_shape_constant_2 = + op::Constant::create(element::Type_t::i64, Shape{1}, {16}); + auto target_shape_constant_3 = + op::Constant::create(element::Type_t::i64, Shape{1}, {50}); + auto target_shape_constant_4 = + op::Constant::create(element::Type_t::i64, Shape{1}, {50}); std::int64_t axis = 0; std::vector> args{target_shape_constant_1, target_shape_constant_2, target_shape_constant_3, target_shape_constant_4}; auto target_shape = make_shared(args, axis); - auto axes_mapping = op::Constant::create(element::i64, Shape{1}, {1}); + auto axes_mapping = op::Constant::create(element::Type_t::i64, Shape{1}, {1}); auto bc = make_shared(param, target_shape, axes_mapping, "NONE"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().same_scheme(Rank{4})); @@ -96,9 +103,9 @@ TYPED_TEST_P(BroadcastTests, broadcast_target_shape_as_concat_with_node) TYPED_TEST_P(BroadcastTests, broadcast_fail_rank) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 1}); - auto axes_mapping = op::Constant::create(element::i64, Shape{3}, {1, 2, 3}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 1}); + auto axes_mapping = op::Constant::create(element::Type_t::i64, Shape{3}, {1, 2, 3}); try { @@ -119,9 +126,9 @@ TYPED_TEST_P(BroadcastTests, broadcast_fail_rank) TYPED_TEST_P(BroadcastTests, broadcast_fail_transpose) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 1, 3}); - auto axes_mapping = op::Constant::create(element::i64, Shape{2}, {2, 1}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 1, 3}); + auto axes_mapping = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 1}); try { @@ -142,9 +149,9 @@ TYPED_TEST_P(BroadcastTests, broadcast_fail_transpose) TYPED_TEST_P(BroadcastTests, broadcast_fail_axes_map) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 1}); - auto axes_mapping = op::Constant::create(element::i64, Shape{2}, {1, 3}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 1}); + auto axes_mapping = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 3}); try { @@ -163,9 +170,9 @@ TYPED_TEST_P(BroadcastTests, broadcast_fail_axes_map) TYPED_TEST_P(BroadcastTests, broadcast_fail_axes_map_shape) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 3}); - auto axes_mapping = op::Constant::create(element::i64, Shape{2}, {1, 2}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 3}); + auto axes_mapping = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 2}); try { @@ -184,9 +191,9 @@ TYPED_TEST_P(BroadcastTests, broadcast_fail_axes_map_shape) TYPED_TEST_P(BroadcastTests, broadcast_axes_wrong_rank) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto bc_shape = make_shared(element::i64, Shape{1}); - auto bc_axes = make_shared(element::i64, Shape{2, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto bc_shape = make_shared(element::Type_t::i64, Shape{1}); + auto bc_axes = make_shared(element::Type_t::i64, Shape{2, 2}); try { @@ -205,24 +212,24 @@ TYPED_TEST_P(BroadcastTests, broadcast_axes_wrong_rank) TYPED_TEST_P(BroadcastTests, broadcast_fully_dynamic_target_shape) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto bc_shape = make_shared(element::i64, PartialShape::dynamic()); - auto bc_axes = make_shared(element::i64, Shape{2}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto bc_shape = make_shared(element::Type_t::i64, PartialShape::dynamic()); + auto bc_axes = make_shared(element::Type_t::i64, Shape{2}); auto bc = make_shared(arg, bc_shape, bc_axes); ASSERT_TRUE(bc->get_output_partial_shape(0).is_dynamic()); - bc_shape = make_shared(element::i64, Shape{1}); + bc_shape = make_shared(element::Type_t::i64, Shape{1}); bc = make_shared(arg, bc_shape, bc_axes); ASSERT_TRUE(bc->get_output_partial_shape(0).is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_broadcast_shape_et_wrong) { - auto arg = make_shared(element::f32, Shape{2, 4}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); // wrong element type - auto bc_shape = make_shared(element::boolean, Shape{1}); - auto bc_axes = make_shared(element::i64, Shape{2}); + auto bc_shape = make_shared(element::Type_t::boolean, Shape{1}); + auto bc_axes = make_shared(element::Type_t::i64, Shape{2}); try { @@ -242,10 +249,10 @@ TYPED_TEST_P(BroadcastTests, broadcast_broadcast_shape_et_wrong) TYPED_TEST_P(BroadcastTests, broadcast_axes_et_wrong) { - auto arg = make_shared(element::f32, Shape{2, 4}); - auto bc_shape = make_shared(element::i64, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4}); + auto bc_shape = make_shared(element::Type_t::i64, Shape{1}); // wrong element type - auto bc_axes = make_shared(element::f32, Shape{2}); + auto bc_axes = make_shared(element::Type_t::f32, Shape{2}); try { @@ -267,42 +274,47 @@ TYPED_TEST_P(BroadcastTests, broadcast_axes_et_wrong) TYPED_TEST_P(BroadcastTests, broadcast_explicit_all_inputs_dynamic) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic()); - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto target_shape = + make_shared(element::Type_t::i64, PartialShape::dynamic()); + const auto axes_mapping = + make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{3}, vector{0, 1, 2}); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{0, 1, 2}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_explicit_target_shape_static_rank) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic(1)); - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto target_shape = + make_shared(element::Type_t::i64, PartialShape::dynamic(1)); + const auto axes_mapping = + make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{3}, vector{0, 1, 2}); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{0, 1, 2}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_explicit_const_target_shape) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); const auto target_shape = - op::Constant::create(element::i64, Shape{3}, vector{1, 2, 3}); - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{1, 2, 3}); + const auto axes_mapping = + make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); @@ -312,7 +324,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_const_target_shape) // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{3}, vector{0, 2, 1}); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{0, 2, 1}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); @@ -322,16 +334,18 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_const_target_shape) TYPED_TEST_P(BroadcastTests, broadcast_explicit_input_rank_static) { - const auto data = make_shared(element::f32, PartialShape::dynamic(3)); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic()); - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + const auto target_shape = + make_shared(element::Type_t::i64, PartialShape::dynamic()); + const auto axes_mapping = + make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{3}, vector{0, 2, 1}); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{0, 2, 1}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } @@ -339,16 +353,17 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_input_rank_static) TYPED_TEST_P(BroadcastTests, broadcast_explicit_target_shape_and_input_data_rank_static) { // static rank data - const auto data = make_shared(element::f32, PartialShape::dynamic(3)); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic(1)); - auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + const auto target_shape = + make_shared(element::Type_t::i64, PartialShape::dynamic(1)); + auto axes_mapping = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{3}, vector{0, 2, 1}); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{0, 2, 1}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } @@ -356,10 +371,10 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_target_shape_and_input_data_rank TYPED_TEST_P(BroadcastTests, broadcast_explicit_const_target_shape_static_rank_input) { const auto target_shape = - op::Constant::create(element::i64, Shape{4}, vector{1, 1, 5, 10}); + op::Constant::create(element::Type_t::i64, Shape{4}, vector{1, 1, 5, 10}); // static rank data - const auto data = make_shared(element::f32, PartialShape::dynamic(3)); - auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + auto axes_mapping = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); @@ -368,7 +383,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_const_target_shape_static_rank_i // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{4}, vector{0, 2, 1, 3}); + op::Constant::create(element::Type_t::i64, Shape{4}, vector{0, 2, 1, 3}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); @@ -377,37 +392,39 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_const_target_shape_static_rank_i TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_input_shape) { - const auto data = make_shared(element::f32, PartialShape{1, 2, 3, 4}); + const auto data = make_shared(element::Type_t::f32, PartialShape{1, 2, 3, 4}); // dynamic target shape and axes mapping - auto target_shape = make_shared(element::i64, PartialShape::dynamic()); - auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + auto target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic()); + auto axes_mapping = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{4}, vector{0, 2, 1, 3}); + op::Constant::create(element::Type_t::i64, Shape{4}, vector{0, 2, 1, 3}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape and const axes mapping - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_input_shape_const_target_shape) { - const auto data = make_shared(element::f32, PartialShape{4}); - auto target_shape = op::Constant::create(element::i64, Shape{4}, vector{1, 4, 2, 3}); + const auto data = make_shared(element::Type_t::f32, PartialShape{4}); + auto target_shape = + op::Constant::create(element::Type_t::i64, Shape{4}, vector{1, 4, 2, 3}); // dynamic axes mapping - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + const auto axes_mapping = + make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); @@ -416,7 +433,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_input_shape_const_target_ // const axes mapping const auto axes_mapping_const = - op::Constant::create(element::i64, Shape{1}, vector{1}); + op::Constant::create(element::Type_t::i64, Shape{1}, vector{1}); bc = make_shared(data, target_shape, axes_mapping_const, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); @@ -426,9 +443,10 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_input_shape_const_target_ TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_target_shape) { // dynamic input - auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto target_shape = make_shared(element::i64, PartialShape{4}); - const auto axes_mapping = make_shared(element::i64, PartialShape::dynamic()); + auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto target_shape = make_shared(element::Type_t::i64, PartialShape{4}); + const auto axes_mapping = + make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); @@ -436,7 +454,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_target_shape) ASSERT_TRUE(bc->get_output_partial_shape(0).is_dynamic()); // static rank input - data = make_shared(element::f32, PartialShape::dynamic(2)); + data = make_shared(element::Type_t::f32, PartialShape::dynamic(2)); bc = make_shared(data, target_shape, axes_mapping, "EXPLICIT"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); @@ -447,15 +465,15 @@ TYPED_TEST_P(BroadcastTests, broadcast_explicit_static_target_shape) TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_shape_dynamic) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); // dynamic output shape - auto target_shape = make_shared(element::i64, PartialShape::dynamic()); + auto target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } @@ -463,16 +481,16 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_shape_dynamic) TYPED_TEST_P(BroadcastTests, broadcast_numpy_target_shape_constant) { // dynamic data - auto data = make_shared(element::f32, PartialShape::dynamic()); + auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); const auto target_shape = - op::Constant::create(element::i64, Shape{3}, vector{1, 2, 3}); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{1, 2, 3}); auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 3); // static rank data - data = make_shared(element::f32, PartialShape::dynamic(2)); + data = make_shared(element::Type_t::f32, PartialShape::dynamic(2)); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 3); @@ -481,22 +499,24 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_target_shape_constant) TYPED_TEST_P(BroadcastTests, broadcast_numpy_target_shape_dynamic) { // static rank data - auto data = make_shared(element::f32, PartialShape::dynamic(3)); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic()); + auto data = make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + const auto target_shape = + make_shared(element::Type_t::i64, PartialShape::dynamic()); auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // static shape data - data = make_shared(element::f32, PartialShape{3, 4, 5, 6}); + data = make_shared(element::Type_t::f32, PartialShape{3, 4, 5, 6}); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); } TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_target_shape_static_rank) { - const auto data = make_shared(element::f32, PartialShape::dynamic(3)); - const auto target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic(3)); + const auto target_shape = + make_shared(element::Type_t::i64, PartialShape::dynamic(1)); const auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); @@ -504,16 +524,16 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_target_shape_static_rank) TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_static_shape) { - const auto data = make_shared(element::f32, PartialShape{1, 2, 3}); + const auto data = make_shared(element::Type_t::f32, PartialShape{1, 2, 3}); // static rank target_shape - auto target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + auto target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_dynamic()); // constant target_shape const auto target_shape_const = - op::Constant::create(element::i64, Shape{3}, vector{3, 2, 3}); + op::Constant::create(element::Type_t::i64, Shape{3}, vector{3, 2, 3}); bc = make_shared(data, target_shape_const, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 3); @@ -525,24 +545,25 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_partially_dynamic) { const Shape expected_target_shape{1, 2, 3, 4}; const auto target_shape = op::Constant::create( - element::i64, + element::Type_t::i64, {expected_target_shape.size()}, std::vector(expected_target_shape.begin(), expected_target_shape.end())); - auto data = make_shared(element::f32, PartialShape{2, 3, Dimension::dynamic()}); + auto data = + make_shared(element::Type_t::f32, PartialShape{2, 3, Dimension::dynamic()}); auto bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_EQ(bc->get_output_partial_shape(0), expected_target_shape); - data = make_shared(element::f32, + data = make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), 3, Dimension::dynamic()}); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_EQ(bc->get_output_partial_shape(0), expected_target_shape); - data = make_shared(element::f32, + data = make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); @@ -550,7 +571,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_partially_dynamic) ASSERT_EQ(bc->get_output_partial_shape(0), expected_target_shape); data = make_shared( - element::f32, + element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); bc = make_shared(data, target_shape, "NUMPY"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); @@ -560,10 +581,10 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_input_partially_dynamic) TYPED_TEST_P(BroadcastTests, broadcast_numpy_static_dims_incorrect) { - const auto target_shape = op::Constant::create(element::i64, Shape{4}, {1, 2, 3, 4}); + const auto target_shape = op::Constant::create(element::Type_t::i64, Shape{4}, {1, 2, 3, 4}); - auto data = - make_shared(element::f32, PartialShape{Dimension::dynamic(), 999, 3, 4}); + auto data = make_shared(element::Type_t::f32, + PartialShape{Dimension::dynamic(), 999, 3, 4}); try { auto bc = make_shared(data, target_shape, "NUMPY"); @@ -580,7 +601,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_static_dims_incorrect) } data = make_shared( - element::f32, + element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), 888}); try { @@ -598,7 +619,7 @@ TYPED_TEST_P(BroadcastTests, broadcast_numpy_static_dims_incorrect) } data = make_shared( - element::f32, + element::Type_t::f32, PartialShape{5, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); try { @@ -654,30 +675,30 @@ INSTANTIATE_TYPED_TEST_CASE_P(type_prop, BroadcastTests, BroadcastTypes, ); // changing AutoBroadcastSpec to BroadcastModeSpec forces runing pdpd tests separately TEST(type_prop, broadcast_v1_pdpd) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 6}); auto bc = make_shared( param, target_shape, op::AutoBroadcastSpec(op::AutoBroadcastType::PDPD, 1)); - ASSERT_EQ(bc->get_element_type(), element::f32); + ASSERT_EQ(bc->get_element_type(), element::Type_t::f32); ASSERT_EQ(bc->get_shape(), (Shape{2, 3, 6})); } TEST(type_prop, broadcast_v3_pdpd) { - auto param = make_shared(element::f32, Shape{3, 1}); - auto target_shape = op::Constant::create(element::i64, Shape{3}, {2, 3, 6}); + auto param = make_shared(element::Type_t::f32, Shape{3, 1}); + auto target_shape = op::Constant::create(element::Type_t::i64, Shape{3}, {2, 3, 6}); auto bc = make_shared( param, target_shape, op::BroadcastModeSpec(op::BroadcastType::PDPD, 1)); - ASSERT_EQ(bc->get_element_type(), element::f32); + ASSERT_EQ(bc->get_element_type(), element::Type_t::f32); ASSERT_EQ(bc->get_shape(), (Shape{2, 3, 6})); } TEST(type_prop, broadcast_v3_bidirectional_mode_string) { - const auto arg = make_shared(element::f32, Shape{1, 4, 1}); - const auto shape = make_shared(element::i32, Shape{2}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 4, 1}); + const auto shape = make_shared(element::Type_t::i32, Shape{2}); const auto broadcast_v3 = make_shared(arg, shape, "BIDIRECTIONAL"); @@ -687,9 +708,9 @@ TEST(type_prop, broadcast_v3_bidirectional_mode_string) TEST(type_prop, broadcast_v3_shape_unexpected_axes_mapping_input) { - const auto arg = make_shared(element::f32, Shape{1, 4, 1}); - const auto shape = make_shared(element::i16, Shape{2}); - const auto axes_mapping = make_shared(element::f32, Shape{3}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 4, 1}); + const auto shape = make_shared(element::Type_t::i16, Shape{2}); + const auto axes_mapping = make_shared(element::Type_t::f32, Shape{3}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; try @@ -712,8 +733,8 @@ TEST(type_prop, broadcast_v3_shape_unexpected_axes_mapping_input) TEST(type_prop, broadcast_v3_not_provided_axes_input_for_explicit_mode) { - const auto arg = make_shared(element::f32, Shape{1, 4, 1}); - const auto shape = make_shared(element::i16, Shape{2}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 4, 1}); + const auto shape = make_shared(element::Type_t::i16, Shape{2}); const auto broadcast_spec = op::BroadcastType::EXPLICIT; try @@ -735,65 +756,65 @@ TEST(type_prop, broadcast_v3_not_provided_axes_input_for_explicit_mode) TEST(type_prop, broadcast_v3_shape) { - const auto arg = make_shared(element::f32, Shape{1, 4, 1}); - const auto shape = op::Constant::create(element::i64, {2}, {1, 4}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 4, 1}); + const auto shape = op::Constant::create(element::Type_t::i64, {2}, {1, 4}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); + ASSERT_EQ(broadcast_v3->get_element_type(), element::Type_t::f32); ASSERT_EQ(broadcast_v3->get_shape(), (Shape{1, 4, 4})); ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{2}))); } TEST(type_prop, broadcast_v3_shape_2) { - const auto arg = make_shared(element::f32, Shape{3, 1}); - const auto shape = op::Constant::create(element::i64, {3}, {2, 1, 6}); + const auto arg = make_shared(element::Type_t::f32, Shape{3, 1}); + const auto shape = op::Constant::create(element::Type_t::i64, {3}, {2, 1, 6}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); + ASSERT_EQ(broadcast_v3->get_element_type(), element::Type_t::f32); ASSERT_EQ(broadcast_v3->get_shape(), (Shape{2, 3, 6})); ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{0, 2}))); } TEST(type_prop, broadcast_v3_shape_3) { - const auto arg = make_shared(element::f32, Shape{2, 1}); - const auto shape = op::Constant::create(element::i64, {2}, {2, 4}); + const auto arg = make_shared(element::Type_t::f32, Shape{2, 1}); + const auto shape = op::Constant::create(element::Type_t::i64, {2}, {2, 4}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); + ASSERT_EQ(broadcast_v3->get_element_type(), element::Type_t::f32); ASSERT_EQ(broadcast_v3->get_shape(), (Shape{2, 4})); ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{1}))); } TEST(type_prop, broadcast_v3_shape_4) { - const auto arg = make_shared(element::f32, Shape{1, 3, 1}); - const auto shape = op::Constant::create(element::i64, {2}, {3, 1}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 3, 1}); + const auto shape = op::Constant::create(element::Type_t::i64, {2}, {3, 1}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); + ASSERT_EQ(broadcast_v3->get_element_type(), element::Type_t::f32); ASSERT_EQ(broadcast_v3->get_shape(), (Shape{1, 3, 1})); ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{}))); } TEST(type_prop, broadcast_v3_shape_5) { - const auto arg = make_shared(element::f32, Shape{16, 1, 1}); - const auto shape = op::Constant::create(element::i64, {4}, {1, 1, 50, 50}); + const auto arg = make_shared(element::Type_t::f32, Shape{16, 1, 1}); + const auto shape = op::Constant::create(element::Type_t::i64, {4}, {1, 1, 50, 50}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); + ASSERT_EQ(broadcast_v3->get_element_type(), element::Type_t::f32); ASSERT_EQ(broadcast_v3->get_shape(), (Shape{1, 16, 50, 50})); ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{0, 2, 3}))); @@ -801,34 +822,34 @@ TEST(type_prop, broadcast_v3_shape_5) TEST(type_prop, broadcast_v3_shape_6) { - const auto arg = make_shared(element::f32, Shape{1, 3, 1}); - const auto shape = op::Constant::create(element::i64, {3}, {3, 1, 3}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 3, 1}); + const auto shape = op::Constant::create(element::Type_t::i64, {3}, {3, 1, 3}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::f32); + ASSERT_EQ(broadcast_v3->get_element_type(), element::Type_t::f32); ASSERT_EQ(broadcast_v3->get_shape(), (Shape{3, 3, 3})); ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{0, 2}))); } TEST(type_prop, broadcast_v3_shape_6_type_infer) { - const auto arg = make_shared(element::u16, Shape{1, 3, 1}); - const auto shape = op::Constant::create(element::i64, {3}, {3, 1, 3}); + const auto arg = make_shared(element::Type_t::u16, Shape{1, 3, 1}); + const auto shape = op::Constant::create(element::Type_t::i64, {3}, {3, 1, 3}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); - ASSERT_EQ(broadcast_v3->get_element_type(), element::u16); + ASSERT_EQ(broadcast_v3->get_element_type(), element::Type_t::u16); ASSERT_EQ(broadcast_v3->get_shape(), (Shape{3, 3, 3})); ASSERT_EQ(broadcast_v3->get_broadcast_axes(), (make_pair(true, AxisSet{0, 2}))); } TEST(type_prop, broadcast_v3_incorrect_target_shape) { - const auto arg = make_shared(element::f32, Shape{4, 3, 2}); - const auto shape = op::Constant::create(element::i64, {3}, {8, 6, 4}); + const auto arg = make_shared(element::Type_t::f32, Shape{4, 3, 2}); + const auto shape = op::Constant::create(element::Type_t::i64, {3}, {8, 6, 4}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; try @@ -850,8 +871,8 @@ TEST(type_prop, broadcast_v3_incorrect_target_shape) TEST(type_prop, broadcast_v3_incorrect_target_shape_2) { - const auto arg = make_shared(element::f32, Shape{1, 1, 2}); - const auto shape = op::Constant::create(element::i64, {2}, {2, 3}); + const auto arg = make_shared(element::Type_t::f32, Shape{1, 1, 2}); + const auto shape = op::Constant::create(element::Type_t::i64, {2}, {2, 3}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; try @@ -873,8 +894,8 @@ TEST(type_prop, broadcast_v3_incorrect_target_shape_2) TEST(type_prop, broadcast_v3_output_rank_not_deduced) { - const auto arg = make_shared(element::f32, PartialShape::dynamic()); - const auto shape = make_shared(element::i64, PartialShape::dynamic(1)); + const auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); @@ -884,8 +905,8 @@ TEST(type_prop, broadcast_v3_output_rank_not_deduced) TEST(type_prop, broadcast_v3_output_rank_deduced_from_arg) { - const auto arg = make_shared(element::f32, PartialShape::dynamic(4)); - const auto shape = op::Constant::create(element::i64, {3}, {8, 6, 4}); + const auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic(4)); + const auto shape = op::Constant::create(element::Type_t::i64, {3}, {8, 6, 4}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); @@ -895,8 +916,8 @@ TEST(type_prop, broadcast_v3_output_rank_deduced_from_arg) TEST(type_prop, broadcast_v3_output_rank_deduced_from_new_shape_input) { - const auto arg = make_shared(element::f32, PartialShape::dynamic(4)); - const auto shape = op::Constant::create(element::i64, {5}, {8, 6, 1, 5, 1}); + const auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic(4)); + const auto shape = op::Constant::create(element::Type_t::i64, {5}, {8, 6, 1, 5, 1}); const auto broadcast_spec = op::BroadcastType::BIDIRECTIONAL; const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); @@ -908,40 +929,40 @@ TEST(type_prop, broadcast_v3_output_rank_deduced_from_new_shape_input) TEST(type_prop, broadcast_v3_bidirectional_dynamic_input) { - const auto arg = make_shared(element::f32, PartialShape::dynamic()); + const auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic()); // dynamic target shape - auto target_shape = make_shared(element::i64, PartialShape::dynamic()); + auto target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // constant target shape - const auto target_shape_const = op::Constant::create(element::i64, {3}, {2, 4, 6}); + const auto target_shape_const = op::Constant::create(element::Type_t::i64, {3}, {2, 4, 6}); broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); } TEST(type_prop, broadcast_v3_bidirectional_static_rank_input) { - const auto arg = make_shared(element::f32, PartialShape::dynamic(4)); + const auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic(4)); // dynamic target shape - auto target_shape = make_shared(element::i64, PartialShape::dynamic()); + auto target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // constant target shape - const auto target_shape_const = op::Constant::create(element::i64, {3}, {2, 4, 6}); + const auto target_shape_const = op::Constant::create(element::Type_t::i64, {3}, {2, 4, 6}); broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(broadcast_v3->get_output_partial_shape(0).rank().get_length(), 4); @@ -950,27 +971,27 @@ TEST(type_prop, broadcast_v3_bidirectional_static_rank_input) TEST(type_prop, broadcast_v3_bidirectional_static_shape_input) { - const auto arg = make_shared(element::f32, PartialShape{1, 2, 3, 1}); + const auto arg = make_shared(element::Type_t::f32, PartialShape{1, 2, 3, 1}); // dynamic target shape - auto target_shape = make_shared(element::i64, PartialShape::dynamic()); + auto target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // static rank target shape - target_shape = make_shared(element::i64, PartialShape::dynamic(1)); + target_shape = make_shared(element::Type_t::i64, PartialShape::dynamic(1)); broadcast_v3 = make_shared(arg, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_dynamic()); // constant target shape - auto target_shape_const = op::Constant::create(element::i64, {4}, {2, 2, 3, 2}); + auto target_shape_const = op::Constant::create(element::Type_t::i64, {4}, {2, 2, 3, 2}); broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(broadcast_v3->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).is_static()); ASSERT_EQ(broadcast_v3->get_output_partial_shape(0), (PartialShape{2, 2, 3, 2})); - target_shape_const = op::Constant::create(element::i64, {4}, {5, 2, 3, 7}); + target_shape_const = op::Constant::create(element::Type_t::i64, {4}, {5, 2, 3, 7}); broadcast_v3 = make_shared(arg, target_shape_const, "BIDIRECTIONAL"); ASSERT_TRUE(broadcast_v3->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(broadcast_v3->get_output_partial_shape(0).rank().get_length(), 4); @@ -981,22 +1002,23 @@ TEST(type_prop, broadcast_v3_bidirectional_static_shape_input) TEST(type_prop, broadcast_v3_bidirectional_partially_dynamic_input) { const auto target_shape = - op::Constant::create(element::i64, Shape{4}, vector{1, 1, 50, 50}); + op::Constant::create(element::Type_t::i64, Shape{4}, vector{1, 1, 50, 50}); - auto data = make_shared(element::f32, PartialShape{16, 1, Dimension::dynamic()}); + auto data = + make_shared(element::Type_t::f32, PartialShape{16, 1, Dimension::dynamic()}); auto bc = make_shared(data, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_EQ(bc->get_output_partial_shape(0), (PartialShape{1, 16, 50, 50})); - data = make_shared(element::f32, + data = make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), 1, Dimension::dynamic()}); bc = make_shared(data, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); ASSERT_EQ(bc->get_output_partial_shape(0).rank().get_length(), 4); ASSERT_EQ(bc->get_output_partial_shape(0), (PartialShape{1, Dimension::dynamic(), 50, 50})); - data = make_shared(element::f32, + data = make_shared(element::Type_t::f32, PartialShape{16, Dimension::dynamic(), Dimension::dynamic()}); bc = make_shared(data, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); @@ -1004,7 +1026,7 @@ TEST(type_prop, broadcast_v3_bidirectional_partially_dynamic_input) ASSERT_EQ(bc->get_output_partial_shape(0), (PartialShape{1, 16, 50, 50})); data = make_shared( - element::f32, + element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); bc = make_shared(data, target_shape, "BIDIRECTIONAL"); ASSERT_TRUE(bc->get_output_partial_shape(0).rank().is_static()); diff --git a/ngraph/test/type_prop/bucketize.cpp b/ngraph/test/type_prop/bucketize.cpp index 44fbc8cbf1e..89cd4d30b39 100644 --- a/ngraph/test/type_prop/bucketize.cpp +++ b/ngraph/test/type_prop/bucketize.cpp @@ -23,62 +23,66 @@ using namespace ngraph; TEST(type_prop, bucketize) { - auto data = make_shared(element::f32, Shape{2, 3, 2}); - auto buckets = make_shared(element::f32, Shape{4}); + auto data = make_shared(element::Type_t::f32, Shape{2, 3, 2}); + auto buckets = make_shared(element::Type_t::f32, Shape{4}); auto bucketize = make_shared(data, buckets); - EXPECT_EQ(bucketize->get_element_type(), element::i64); + EXPECT_EQ(bucketize->get_element_type(), element::Type_t::i64); EXPECT_TRUE(bucketize->get_output_partial_shape(0).same_scheme(PartialShape{2, 3, 2})); } TEST(type_prop, bucketize_output_type) { - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto buckets = make_shared(element::f32, Shape{5}); - auto bucketize = make_shared(data, buckets, element::i32); + auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto buckets = make_shared(element::Type_t::f32, Shape{5}); + auto bucketize = make_shared(data, buckets, element::Type_t::i32); - ASSERT_EQ(bucketize->get_output_element_type(0), element::i32); + ASSERT_EQ(bucketize->get_output_element_type(0), element::Type_t::i32); EXPECT_TRUE(bucketize->get_output_partial_shape(0).same_scheme(PartialShape{1, 2, 3, 4})); } TEST(type_prop, bucketize_output_type_right_bound) { - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto buckets = make_shared(element::f32, Shape{5}); - auto bucketize = make_shared(data, buckets, element::i32, false); + auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto buckets = make_shared(element::Type_t::f32, Shape{5}); + auto bucketize = make_shared(data, buckets, element::Type_t::i32, false); - ASSERT_EQ(bucketize->get_output_element_type(0), element::i32); + ASSERT_EQ(bucketize->get_output_element_type(0), element::Type_t::i32); EXPECT_TRUE(bucketize->get_output_partial_shape(0).same_scheme(PartialShape{1, 2, 3, 4})); } TEST(type_prop, bucketize_dynamic_input) { - auto data = make_shared(element::f64, PartialShape{4, Dimension::dynamic()}); - auto buckets = make_shared(element::f32, Shape{5}); + auto data = + make_shared(element::Type_t::f64, PartialShape{4, Dimension::dynamic()}); + auto buckets = make_shared(element::Type_t::f32, Shape{5}); auto bucketize = make_shared(data, buckets); - EXPECT_EQ(bucketize->get_element_type(), element::i64); + EXPECT_EQ(bucketize->get_element_type(), element::Type_t::i64); EXPECT_TRUE( bucketize->get_output_partial_shape(0).same_scheme(PartialShape{4, Dimension::dynamic()})); } TEST(type_prop, bucketize_dynamic_buckets) { - auto data = make_shared(element::f64, PartialShape{4, Dimension::dynamic()}); - auto buckets = make_shared(element::f32, PartialShape{Dimension::dynamic()}); + auto data = + make_shared(element::Type_t::f64, PartialShape{4, Dimension::dynamic()}); + auto buckets = + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic()}); auto bucketize = make_shared(data, buckets); - EXPECT_EQ(bucketize->get_element_type(), element::i64); + EXPECT_EQ(bucketize->get_element_type(), element::Type_t::i64); EXPECT_TRUE( bucketize->get_output_partial_shape(0).same_scheme(PartialShape{4, Dimension::dynamic()})); } TEST(type_prop, bucketize_fail_output_type) { - auto data = make_shared(element::f64, PartialShape{4, Dimension::dynamic()}); - auto buckets = make_shared(element::f32, Shape{5}); + auto data = + make_shared(element::Type_t::f64, PartialShape{4, Dimension::dynamic()}); + auto buckets = make_shared(element::Type_t::f32, Shape{5}); try { - auto bucketize = make_shared(data, buckets, element::f64); + auto bucketize = make_shared(data, buckets, element::Type_t::f64); // Should have thrown, so fail if it didn't FAIL() << "Invalid output type not detected"; } @@ -94,8 +98,9 @@ TEST(type_prop, bucketize_fail_output_type) TEST(type_prop, bucketize_fail_buckets_dim) { - auto data = make_shared(element::f64, PartialShape{4, Dimension::dynamic()}); - auto buckets = make_shared(element::f32, Shape{5, 5}); + auto data = + make_shared(element::Type_t::f64, PartialShape{4, Dimension::dynamic()}); + auto buckets = make_shared(element::Type_t::f32, Shape{5, 5}); try { auto bucketize = make_shared(data, buckets); diff --git a/ngraph/test/type_prop/clamp.cpp b/ngraph/test/type_prop/clamp.cpp index 63652be8742..8c696d5cb93 100644 --- a/ngraph/test/type_prop/clamp.cpp +++ b/ngraph/test/type_prop/clamp.cpp @@ -23,7 +23,7 @@ using namespace ngraph; TEST(type_prop, fused_clamp) { - const auto data = make_shared(element::f64, Shape{2, 2}); + const auto data = make_shared(element::Type_t::f64, Shape{2, 2}); try { @@ -38,6 +38,6 @@ TEST(type_prop, fused_clamp) } const auto clamp = make_shared(data, 1.0, 2.0); - EXPECT_EQ(clamp->get_element_type(), element::f64); + EXPECT_EQ(clamp->get_element_type(), element::Type_t::f64); EXPECT_EQ(clamp->get_shape(), (Shape{2, 2})); } diff --git a/ngraph/test/type_prop/concat.cpp b/ngraph/test/type_prop/concat.cpp index 450a7feb933..7d912ef6c03 100644 --- a/ngraph/test/type_prop/concat.cpp +++ b/ngraph/test/type_prop/concat.cpp @@ -24,19 +24,19 @@ using namespace ngraph; TEST(type_prop, concat_deduce) { // Deduce type - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::f32, Shape{2, 7, 4}); - auto param2 = make_shared(element::f32, Shape{2, 2, 4}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 2, 4}); auto c = make_shared(NodeVector{param0, param1, param2}, 1); - ASSERT_EQ(c->get_element_type(), element::f32); + ASSERT_EQ(c->get_element_type(), element::Type_t::f32); ASSERT_EQ(c->get_shape(), (Shape{2, 12, 4})); } TEST(type_prop, concat_deduce_wrong_rank) { - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::f32, Shape{2, 7, 4}); - auto param2 = make_shared(element::f32, + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + auto param2 = make_shared(element::Type_t::f32, Shape{ 2, 2, }); @@ -61,9 +61,9 @@ TEST(type_prop, concat_deduce_wrong_rank) TEST(type_prop, concat_deduce_wrong_shape) { - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::f32, Shape{2, 7, 4}); - auto param2 = make_shared(element::f32, Shape{2, 2, 5}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 2, 5}); try { auto c = make_shared(NodeVector{param0, param1, param2}, 1); @@ -85,9 +85,9 @@ TEST(type_prop, concat_deduce_wrong_shape) TEST(type_prop, concat_deduce_axis_oob) { - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::f32, Shape{2, 7, 4}); - auto param2 = make_shared(element::f32, Shape{2, 2, 5}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 2, 5}); try { auto c = make_shared(NodeVector{param0, param1, param2}, 3); @@ -107,19 +107,19 @@ TEST(type_prop, concat_deduce_axis_oob) TEST(type_prop, concat_deduce_axis_barely_in_bounds) { // Deduce type - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::f32, Shape{2, 3, 8}); - auto param2 = make_shared(element::f32, Shape{2, 3, 12}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 3, 8}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 3, 12}); auto c = make_shared(NodeVector{param0, param1, param2}, 2); - ASSERT_EQ(c->get_element_type(), element::f32); + ASSERT_EQ(c->get_element_type(), element::Type_t::f32); ASSERT_EQ(c->get_shape(), (Shape{2, 3, 24})); } TEST(type_prop, concat_deduce_elem_type_mismatch) { - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::i32, Shape{2, 7, 4}); - auto param2 = make_shared(element::f32, Shape{2, 2, 4}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::i32, Shape{2, 7, 4}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 2, 4}); try { auto c = make_shared(NodeVector{param0, param1, param2}, 1); @@ -138,20 +138,20 @@ TEST(type_prop, concat_deduce_elem_type_mismatch) TEST(type_prop, concat_partial_et_consistent) { - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::dynamic, Shape{2, 7, 4}); - auto param2 = make_shared(element::f32, Shape{2, 2, 4}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::dynamic, Shape{2, 7, 4}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 2, 4}); auto c = make_shared(NodeVector{param0, param1, param2}, 1); - ASSERT_EQ(c->get_element_type(), element::f32); + ASSERT_EQ(c->get_element_type(), element::Type_t::f32); ASSERT_EQ(c->get_shape(), (Shape{2, 12, 4})); } TEST(type_prop, concat_partial_et_inconsistent) { - auto param0 = make_shared(element::f32, Shape{2, 3, 4}); - auto param1 = make_shared(element::dynamic, Shape{2, 7, 4}); - auto param2 = make_shared(element::i32, Shape{2, 2, 4}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto param1 = make_shared(element::Type_t::dynamic, Shape{2, 7, 4}); + auto param2 = make_shared(element::Type_t::i32, Shape{2, 2, 4}); try { auto c = make_shared(NodeVector{param0, param1, param2}, 1); @@ -170,9 +170,9 @@ TEST(type_prop, concat_partial_et_inconsistent) TEST(type_prop, concat_partial_all_rank_dynamic) { - auto param0 = make_shared(element::f32, PartialShape::dynamic()); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto c = make_shared(NodeVector{param0, param1, param2}, 1); ASSERT_TRUE(c->get_output_partial_shape(0).rank().is_dynamic()); @@ -181,10 +181,10 @@ TEST(type_prop, concat_partial_all_rank_dynamic) TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_dynamic_consistent) { auto param0 = - make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto param2 = - make_shared(element::f32, PartialShape{2, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{2, 3, Dimension::dynamic()}); auto c = make_shared(NodeVector{param0, param1, param2}, 1); ASSERT_TRUE( @@ -194,10 +194,10 @@ TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_dynamic_cons TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_dynamic_rank_inconsistent) { auto param0 = - make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = - make_shared(element::f32, PartialShape{2, 3, Dimension::dynamic(), 4}); + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::f32, + PartialShape{2, 3, Dimension::dynamic(), 4}); try { auto c = make_shared(NodeVector{param0, param1, param2}, 1); @@ -221,10 +221,10 @@ TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_dynamic_rank TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_dynamic_dims_inconsistent) { auto param0 = - make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto param2 = - make_shared(element::f32, PartialShape{3, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{3, 3, Dimension::dynamic()}); try { auto c = make_shared(NodeVector{param0, param1, param2}, 1); @@ -249,12 +249,12 @@ TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_dynamic_dims_intransitively_inconsistent) { auto param0 = - make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto param2 = make_shared( - element::f32, PartialShape{Dimension::dynamic(), 3, Dimension::dynamic()}); + element::Type_t::f32, PartialShape{Dimension::dynamic(), 3, Dimension::dynamic()}); auto param3 = - make_shared(element::f32, PartialShape{3, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{3, 3, Dimension::dynamic()}); try { auto c = make_shared(NodeVector{param0, param1, param2, param3}, 1); @@ -277,10 +277,10 @@ TEST(type_prop, TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_with_concat_axis_static) { - auto param0 = make_shared(element::f32, PartialShape{2, 2, 3}); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::f32, PartialShape{2, 2, 3}); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto param2 = - make_shared(element::f32, PartialShape{2, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{2, 3, Dimension::dynamic()}); auto c = make_shared(NodeVector{param0, param1, param2}, 1); ASSERT_TRUE( @@ -290,10 +290,10 @@ TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_with_concat_ TEST(type_prop, concat_partial_some_rank_dynamic_others_rank_static_with_concat_axis_static_dims_inconsistent) { - auto param0 = make_shared(element::f32, PartialShape{2, 2, 3}); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::f32, PartialShape{2, 2, 3}); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto param2 = - make_shared(element::f32, PartialShape{3, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{3, 3, Dimension::dynamic()}); try { @@ -317,11 +317,11 @@ TEST(type_prop, TEST(type_prop, concat_partial_all_static_with_concat_axis_static_compatible_result_static) { - auto param0 = make_shared(element::f32, PartialShape{2, 2, 3}); + auto param0 = make_shared(element::Type_t::f32, PartialShape{2, 2, 3}); auto param1 = - make_shared(element::f32, PartialShape{Dimension::dynamic(), 4, 3}); + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), 4, 3}); auto param2 = - make_shared(element::f32, PartialShape{2, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{2, 3, Dimension::dynamic()}); auto c = make_shared(NodeVector{param0, param1, param2}, 1); ASSERT_EQ(c->get_shape(), (Shape{2, 9, 3})); @@ -330,11 +330,11 @@ TEST(type_prop, concat_partial_all_static_with_concat_axis_static_compatible_res TEST(type_prop, concat_partial_all_static_with_concat_axis_static_compatible_result_dynamic) { auto param0 = - make_shared(element::f32, PartialShape{2, 2, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{2, 2, Dimension::dynamic()}); auto param1 = make_shared( - element::f32, PartialShape{Dimension::dynamic(), 4, Dimension::dynamic()}); + element::Type_t::f32, PartialShape{Dimension::dynamic(), 4, Dimension::dynamic()}); auto param2 = - make_shared(element::f32, PartialShape{2, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{2, 3, Dimension::dynamic()}); auto c = make_shared(NodeVector{param0, param1, param2}, 1); ASSERT_TRUE( @@ -343,11 +343,11 @@ TEST(type_prop, concat_partial_all_static_with_concat_axis_static_compatible_res TEST(type_prop, concat_partial_all_static_with_concat_axis_static_dims_incompatible) { - auto param0 = make_shared(element::f32, PartialShape{2, 2, 3}); + auto param0 = make_shared(element::Type_t::f32, PartialShape{2, 2, 3}); auto param1 = - make_shared(element::f32, PartialShape{Dimension::dynamic(), 4, 3}); + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), 4, 3}); auto param2 = - make_shared(element::f32, PartialShape{3, 3, Dimension::dynamic()}); + make_shared(element::Type_t::f32, PartialShape{3, 3, Dimension::dynamic()}); try { auto c = make_shared(NodeVector{param0, param1, param2}, 1); diff --git a/ngraph/test/type_prop/constant.cpp b/ngraph/test/type_prop/constant.cpp index 1de8b9e8f99..b28e89ed7d0 100644 --- a/ngraph/test/type_prop/constant.cpp +++ b/ngraph/test/type_prop/constant.cpp @@ -23,29 +23,29 @@ using namespace ngraph; TEST(type_prop, scalar_constant_deduce_float32) { - auto c = op::Constant::create(element::f32, Shape{}, {208}); - ASSERT_EQ(c->get_element_type(), element::f32); + auto c = op::Constant::create(element::Type_t::f32, Shape{}, {208}); + ASSERT_EQ(c->get_element_type(), element::Type_t::f32); ASSERT_EQ(c->get_shape(), (Shape{})); } TEST(type_prop, scalar_constant_deduce_bool) { - auto c = op::Constant::create(element::boolean, Shape{}, {1}); - ASSERT_EQ(c->get_element_type(), element::boolean); + auto c = op::Constant::create(element::Type_t::boolean, Shape{}, {1}); + ASSERT_EQ(c->get_element_type(), element::Type_t::boolean); ASSERT_EQ(c->get_shape(), (Shape{})); } TEST(type_prop, tensor_constant_deduce_float32) { - auto c = op::Constant::create(element::f32, Shape{2, 2}, {208, 208, 208, 208}); - ASSERT_EQ(c->get_element_type(), element::f32); + auto c = op::Constant::create(element::Type_t::f32, Shape{2, 2}, {208, 208, 208, 208}); + ASSERT_EQ(c->get_element_type(), element::Type_t::f32); ASSERT_EQ(c->get_shape(), (Shape{2, 2})); } TEST(type_prop, tensor_constant_deduce_bool) { - auto c = op::Constant::create(element::boolean, Shape{2, 2}, {1, 1, 1, 1}); - ASSERT_EQ(c->get_element_type(), element::boolean); + auto c = op::Constant::create(element::Type_t::boolean, Shape{2, 2}, {1, 1, 1, 1}); + ASSERT_EQ(c->get_element_type(), element::Type_t::boolean); ASSERT_EQ(c->get_shape(), (Shape{2, 2})); } @@ -53,7 +53,7 @@ TEST(type_prop, tensor_constant_bad_count) { try { - auto c = op::Constant::create(element::boolean, Shape{2, 2}, {1, 1, 1}); + auto c = op::Constant::create(element::Type_t::boolean, Shape{2, 2}, {1, 1, 1}); // Should have thrown, so fail if it didn't FAIL() << "Incorrect number of literals not detected"; } @@ -71,8 +71,8 @@ TEST(type_prop, tensor_constant_bad_count) TEST(type_prop, constant_zero_elements_one_string) { - auto c = - make_shared(element::i64, Shape{2, 0, 2, 2}, std::vector{"42"}); - ASSERT_EQ(c->get_element_type(), element::i64); + auto c = make_shared( + element::Type_t::i64, Shape{2, 0, 2, 2}, std::vector{"42"}); + ASSERT_EQ(c->get_element_type(), element::Type_t::i64); ASSERT_EQ(c->get_shape(), (Shape{2, 0, 2, 2})); } diff --git a/ngraph/test/type_prop/convert.cpp b/ngraph/test/type_prop/convert.cpp index c16b0dcab0c..e3b69a6c93c 100644 --- a/ngraph/test/type_prop/convert.cpp +++ b/ngraph/test/type_prop/convert.cpp @@ -24,8 +24,8 @@ using namespace ngraph; TEST(type_prop, convert_deduce) { // Deduce type - auto param = make_shared(element::f32, Shape{2, 3, 4}); - auto c = make_shared(param, element::i32); - ASSERT_EQ(c->get_element_type(), element::i32); + auto param = make_shared(element::Type_t::f32, Shape{2, 3, 4}); + auto c = make_shared(param, element::Type_t::i32); + ASSERT_EQ(c->get_element_type(), element::Type_t::i32); ASSERT_EQ(c->get_shape(), (Shape{2, 3, 4})); } diff --git a/ngraph/test/type_prop/convolution.cpp b/ngraph/test/type_prop/convolution.cpp index b298f0aa4bc..4a1ca667b46 100644 --- a/ngraph/test/type_prop/convolution.cpp +++ b/ngraph/test/type_prop/convolution.cpp @@ -25,10 +25,10 @@ using namespace ngraph; TEST(type_prop, conv_1d_deduce) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); auto conv = make_shared(param0, param1); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 91})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{1}); @@ -43,8 +43,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce) { // Deduce type Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 91}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 91}); // output delta auto conv = make_shared(data_batch_shape, param0, param1, @@ -53,7 +54,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce) CoordinateDiff{0}, CoordinateDiff{0}, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1}); @@ -67,15 +68,15 @@ TEST(type_prop, conv_1d_back_data_batch_deduce) TEST(type_prop, conv_1d_deduce_padded) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); auto move_strides = Strides{1}; auto dilation_strides = Strides{1}; auto padding_below = CoordinateDiff{2}; auto padding_above = CoordinateDiff{3}; auto conv = make_shared( param0, param1, move_strides, dilation_strides, padding_below, padding_above); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 96})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{1}); @@ -90,8 +91,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_padded) { // Deduce type Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 96}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 96}); // output delta auto move_strides = Strides{1}; auto dilation_strides = Strides{1}; auto padding_below = CoordinateDiff{2}; @@ -104,7 +106,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_padded) padding_below, padding_above, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1}); @@ -118,11 +120,11 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_padded) TEST(type_prop, conv_1d_deduce_strided) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); auto move_strides = Strides{2}; auto conv = make_shared(param0, param1, move_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 46})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{2}); @@ -137,8 +139,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided) { // Deduce type Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 46}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 46}); // output delta auto move_strides = Strides{2}; auto conv = make_shared(data_batch_shape, param0, @@ -148,7 +151,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided) CoordinateDiff{0}, CoordinateDiff{0}, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2}); @@ -162,15 +165,15 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided) TEST(type_prop, conv_1d_deduce_strided_padded) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); auto move_strides = Strides{2}; auto dilation_strides = Strides{1}; auto padding_below = CoordinateDiff{2}; auto padding_above = CoordinateDiff{3}; auto conv = make_shared( param0, param1, move_strides, dilation_strides, padding_below, padding_above); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 48})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{2}); @@ -185,8 +188,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_padded) { // Deduce type Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 48}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 48}); // output delta auto move_strides = Strides{2}; auto dilation_strides = Strides{1}; auto padding_below = CoordinateDiff{2}; @@ -199,7 +203,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_padded) padding_below, padding_above, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2}); @@ -213,11 +217,11 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_padded) TEST(type_prop, conv_1d_deduce_strided_small_uneven) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 5}); - auto param1 = make_shared(element::f32, Shape{128, 3, 2}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 5}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 2}); auto move_strides = Strides{2}; auto conv = make_shared(param0, param1, move_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 2})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{2}); @@ -232,8 +236,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_uneven) { // Deduce type Shape data_batch_shape{64, 3, 5}; - auto param0 = make_shared(element::f32, Shape{128, 3, 2}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 2}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 2}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 2}); // output delta auto move_strides = Strides{2}; auto conv = make_shared(data_batch_shape, param0, @@ -243,7 +248,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_uneven) CoordinateDiff{0}, CoordinateDiff{0}, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2}); @@ -257,11 +262,11 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_uneven) TEST(type_prop, conv_1d_deduce_strided_small_even) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 6}); - auto param1 = make_shared(element::f32, Shape{128, 3, 2}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 6}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 2}); auto move_strides = Strides{2}; auto conv = make_shared(param0, param1, move_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 3})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{2}); @@ -276,8 +281,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_even) { // Deduce type Shape data_batch_shape{64, 3, 6}; - auto param0 = make_shared(element::f32, Shape{128, 3, 2}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 3}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 2}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 3}); // output delta auto move_strides = Strides{2}; auto conv = make_shared(data_batch_shape, param0, @@ -287,7 +293,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_even) CoordinateDiff{0}, CoordinateDiff{0}, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{2}); @@ -301,12 +307,12 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_strided_small_even) TEST(type_prop, conv_1d_deduce_window_dilated) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); auto move_strides = Strides{1}; auto dilate_strides = Strides{2}; auto conv = make_shared(param0, param1, move_strides, dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 82})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{1}); @@ -321,8 +327,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated) { // Deduce type Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 82}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 82}); // output delta auto move_strides = Strides{1}; auto dilate_strides = Strides{2}; auto conv = make_shared(data_batch_shape, @@ -333,7 +340,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated) CoordinateDiff{0}, CoordinateDiff{0}, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1}); @@ -347,15 +354,15 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated) TEST(type_prop, conv_1d_deduce_window_dilated_padded) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); auto move_strides = Strides{1}; auto dilate_strides = Strides{2}; auto padding_below = CoordinateDiff{2}; auto padding_above = CoordinateDiff{3}; auto conv = make_shared( param0, param1, move_strides, dilate_strides, padding_below, padding_above); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 87})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{1}); @@ -370,8 +377,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_padded) { // Deduce type Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 87}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 87}); // output delta auto move_strides = Strides{1}; auto dilate_strides = Strides{2}; auto padding_below = CoordinateDiff{2}; @@ -384,7 +392,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_padded) padding_below, padding_above, Strides{1}); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1}); @@ -398,8 +406,8 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_padded) TEST(type_prop, conv_1d_deduce_window_dilated_data_dilated_padded) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); auto move_strides = Strides{1}; auto dilate_strides = Strides{2}; auto padding_below = CoordinateDiff{2}; @@ -412,7 +420,7 @@ TEST(type_prop, conv_1d_deduce_window_dilated_data_dilated_padded) padding_below, padding_above, data_dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 285})); EXPECT_EQ(conv->get_window_movement_strides(), Strides{1}); @@ -427,8 +435,9 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_data_dilated_padde { // Deduce type Shape data_batch_shape{64, 3, 100}; - auto param0 = make_shared(element::f32, Shape{128, 3, 10}); // filters - auto param1 = make_shared(element::f32, Shape{64, 128, 285}); // output delta + auto param0 = make_shared(element::Type_t::f32, Shape{128, 3, 10}); // filters + auto param1 = + make_shared(element::Type_t::f32, Shape{64, 128, 285}); // output delta auto move_strides = Strides{1}; auto dilate_strides = Strides{2}; auto padding_below = CoordinateDiff{2}; @@ -442,7 +451,7 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_data_dilated_padde padding_below, padding_above, data_dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), data_batch_shape); EXPECT_EQ(conv->get_window_movement_strides_forward(), Strides{1}); @@ -456,10 +465,10 @@ TEST(type_prop, conv_1d_back_data_batch_deduce_window_dilated_data_dilated_padde TEST(type_prop, conv_2d_deduce) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10, 20}); auto conv = make_shared(param0, param1); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 91, 131})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{1, 1})); @@ -473,15 +482,15 @@ TEST(type_prop, conv_2d_deduce) TEST(type_prop, conv_2d_deduce_padded) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10, 20}); auto move_strides = Strides{1, 1}; auto dilate_strides = Strides{1, 1}; auto padding_below = CoordinateDiff{2, 3}; auto padding_above = CoordinateDiff{3, 4}; auto conv = make_shared( param0, param1, move_strides, dilate_strides, padding_below, padding_above); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 96, 138})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{1, 1})); @@ -495,15 +504,15 @@ TEST(type_prop, conv_2d_deduce_padded) TEST(type_prop, conv_2d_deduce_padded_neg) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10, 20}); auto move_strides = Strides{1, 1}; auto dilate_strides = Strides{1, 1}; auto padding_below = CoordinateDiff{2, -3}; auto padding_above = CoordinateDiff{3, -4}; auto conv = make_shared( param0, param1, move_strides, dilate_strides, padding_below, padding_above); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 96, 124})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{1, 1})); @@ -526,8 +535,8 @@ TEST_P(DeduceAutoPadTest, same_lower) image_shape.insert(image_shape.begin(), {1, 1}); // Add {N, C} auto filter_shape = std::get<1>(GetParam()); filter_shape.insert(filter_shape.begin(), {1, 1}); // Add {O, I} - auto param0 = make_shared(element::f32, image_shape); - auto param1 = make_shared(element::f32, filter_shape); + auto param0 = make_shared(element::Type_t::f32, image_shape); + auto param1 = make_shared(element::Type_t::f32, filter_shape); auto conv = make_shared(param0, param1, @@ -589,11 +598,11 @@ INSTANTIATE_TEST_CASE_P(type_prop, TEST(type_prop, conv_2d_deduce_strided) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10, 20}); auto move_strides = Strides{2, 3}; auto conv = make_shared(param0, param1, move_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 46, 44})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3})); @@ -607,12 +616,12 @@ TEST(type_prop, conv_2d_deduce_strided) TEST(type_prop, conv_2d_deduce_strided_window_dilated) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10, 20}); auto move_strides = Strides{2, 3}; auto dilate_strides = Strides{3, 2}; auto conv = make_shared(param0, param1, move_strides, dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 37, 38})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3})); @@ -626,8 +635,8 @@ TEST(type_prop, conv_2d_deduce_strided_window_dilated) TEST(type_prop, conv_2d_deduce_strided_window_dilated_data_dilated) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 100, 150}); - auto param1 = make_shared(element::f32, Shape{128, 3, 10, 20}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 100, 150}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 10, 20}); auto move_strides = Strides{2, 3}; auto dilate_strides = Strides{3, 2}; auto padding_below = CoordinateDiff{0, 0}; @@ -640,7 +649,7 @@ TEST(type_prop, conv_2d_deduce_strided_window_dilated_data_dilated) padding_below, padding_above, data_dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 86, 137})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3})); @@ -654,12 +663,12 @@ TEST(type_prop, conv_2d_deduce_strided_window_dilated_data_dilated) TEST(type_prop, conv_2d_deduce_strided_window_dilated_small) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 7, 8}); - auto param1 = make_shared(element::f32, Shape{128, 3, 2, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 7, 8}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 2, 3}); auto move_strides = Strides{2, 3}; auto dilate_strides = Strides{3, 2}; auto conv = make_shared(param0, param1, move_strides, dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 2, 2})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3})); @@ -673,12 +682,12 @@ TEST(type_prop, conv_2d_deduce_strided_window_dilated_small) TEST(type_prop, conv_3d_deduce_strided_window_dilated_small) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 7, 8, 10}); - auto param1 = make_shared(element::f32, Shape{128, 3, 2, 3, 2}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 7, 8, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 2, 3, 2}); auto move_strides = Strides{2, 3, 4}; auto dilate_strides = Strides{3, 2, 2}; auto conv = make_shared(param0, param1, move_strides, dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 2, 2, 2})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3, 4})); @@ -692,8 +701,8 @@ TEST(type_prop, conv_3d_deduce_strided_window_dilated_small) TEST(type_prop, conv_3d_deduce_strided_window_dilated_data_dilated_small) { // Deduce type - auto param0 = make_shared(element::f32, Shape{64, 3, 7, 8, 10}); - auto param1 = make_shared(element::f32, Shape{128, 3, 2, 3, 2}); + auto param0 = make_shared(element::Type_t::f32, Shape{64, 3, 7, 8, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{128, 3, 2, 3, 2}); auto move_strides = Strides{2, 3, 4}; auto dilate_strides = Strides{3, 2, 2}; auto padding_below = CoordinateDiff{0, 0, 0}; @@ -706,7 +715,7 @@ TEST(type_prop, conv_3d_deduce_strided_window_dilated_data_dilated_small) padding_below, padding_above, data_dilate_strides); - EXPECT_EQ(conv->get_element_type(), element::f32); + EXPECT_EQ(conv->get_element_type(), element::Type_t::f32); EXPECT_EQ(conv->get_shape(), (Shape{64, 128, 5, 6, 5})); EXPECT_EQ(conv->get_window_movement_strides(), (Strides{2, 3, 4})); @@ -720,8 +729,8 @@ TEST(type_prop, conv_3d_deduce_strided_window_dilated_data_dilated_small) TEST(type_prop, conv_invalid_element_type_mismatch) { // Deduce type - auto param0 = make_shared(element::f32, Shape{3, 3, 3, 3}); - auto param1 = make_shared(element::i32, Shape{3, 3, 2, 2}); + auto param0 = make_shared(element::Type_t::f32, Shape{3, 3, 3, 3}); + auto param1 = make_shared(element::Type_t::i32, Shape{3, 3, 2, 2}); try { auto conv = make_shared(param0, param1); @@ -743,8 +752,8 @@ TEST(type_prop, conv_invalid_element_type_mismatch) TEST(type_prop, conv_invalid_0d_input) { // Deduce type - auto param0 = make_shared(element::f32, Shape{}); - auto param1 = make_shared(element::f32, Shape{}); + auto param0 = make_shared(element::Type_t::f32, Shape{}); + auto param1 = make_shared(element::Type_t::f32, Shape{}); try { auto conv = make_shared(param0, param1); @@ -768,8 +777,8 @@ TEST(type_prop, conv_invalid_0d_input) TEST(type_prop, conv_invalid_1d_input) { // Deduce type - auto param0 = make_shared(element::f32, Shape{2}); - auto param1 = make_shared(element::f32, Shape{2}); + auto param0 = make_shared(element::Type_t::f32, Shape{2}); + auto param1 = make_shared(element::Type_t::f32, Shape{2}); try { auto conv = make_shared(param0, param1); @@ -793,8 +802,8 @@ TEST(type_prop, conv_invalid_1d_input) TEST(type_prop, conv_invalid_2d_input) { // Deduce type - auto param0 = make_shared(element::f32, Shape{2, 6}); - auto param1 = make_shared(element::f32, Shape{2, 6}); + auto param0 = make_shared(element::Type_t::f32, Shape{2, 6}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 6}); try { auto conv = make_shared(param0, param1); @@ -818,8 +827,8 @@ TEST(type_prop, conv_invalid_2d_input) TEST(type_prop, conv_invalid_0_batch_size) { // Deduce type - auto param0 = make_shared(element::f32, Shape{0, 6, 1}); - auto param1 = make_shared(element::f32, Shape{0, 6, 1}); + auto param0 = make_shared(element::Type_t::f32, Shape{0, 6, 1}); + auto param1 = make_shared(element::Type_t::f32, Shape{0, 6, 1}); try { auto conv = make_shared(param0, param1); @@ -840,8 +849,8 @@ TEST(type_prop, conv_invalid_0_batch_size) TEST(type_prop, conv_invalid_0_input_channels) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 0, 1}); - auto param1 = make_shared(element::f32, Shape{5, 0, 1}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 0, 1}); + auto param1 = make_shared(element::Type_t::f32, Shape{5, 0, 1}); try { auto conv = make_shared(param0, param1); @@ -864,8 +873,8 @@ TEST(type_prop, conv_invalid_0_input_channels) TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_many) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{5, 2, 3, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{5, 2, 3, 3, 3}); try { auto conv = make_shared(param0, param1); @@ -886,8 +895,8 @@ TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_many) TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_few) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{5, 2, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{5, 2, 3}); try { auto conv = make_shared(param0, param1); @@ -908,8 +917,8 @@ TEST(type_prop, conv_invalid_wrong_number_of_filter_dimensions_too_few) TEST(type_prop, conv_invalid_0_output_channels) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{0, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{0, 2, 3, 3}); try { auto conv = make_shared(param0, param1); @@ -930,8 +939,8 @@ TEST(type_prop, conv_invalid_0_output_channels) TEST(type_prop, conv_invalid_input_channel_mismatch) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 3, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 3, 3, 3}); try { auto conv = make_shared(param0, param1); @@ -955,8 +964,8 @@ TEST(type_prop, conv_invalid_input_channel_mismatch) TEST(type_prop, conv_invalid_movement_stride_rank) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, param1, Strides{2, 3, 8}); @@ -984,8 +993,8 @@ TEST(type_prop, conv_invalid_movement_stride_rank) TEST(type_prop, conv_invalid_window_dilation_stride_rank) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = @@ -1014,8 +1023,8 @@ TEST(type_prop, conv_invalid_window_dilation_stride_rank) TEST(type_prop, conv_invalid_data_dilation_stride_rank) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, @@ -1049,8 +1058,8 @@ TEST(type_prop, conv_invalid_data_dilation_stride_rank) TEST(type_prop, conv_invalid_padding_below_rank) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, @@ -1083,8 +1092,8 @@ TEST(type_prop, conv_invalid_padding_below_rank) TEST(type_prop, conv_invalid_padding_above_rank) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, @@ -1117,8 +1126,8 @@ TEST(type_prop, conv_invalid_padding_above_rank) TEST(type_prop, conv_invalid_input_spatial_size_negative_after_padding) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, @@ -1146,8 +1155,8 @@ TEST(type_prop, conv_invalid_input_spatial_size_negative_after_padding) TEST(type_prop, conv_invalid_input_spatial_size_zero_after_padding) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, @@ -1175,8 +1184,8 @@ TEST(type_prop, conv_invalid_input_spatial_size_zero_after_padding) TEST(type_prop, conv_invalid_input_spatial_size_0) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 0, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 0, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, param1); @@ -1199,8 +1208,8 @@ TEST(type_prop, conv_invalid_input_spatial_size_0) TEST(type_prop, conv_invalid_window_size_0) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 0}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 0}); try { auto conv = make_shared(param0, param1); @@ -1223,8 +1232,8 @@ TEST(type_prop, conv_invalid_window_size_0) TEST(type_prop, conv_invalid_window_dilation_stride_0) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, param1, Strides{2, 3}, Strides{2, 0}); @@ -1247,8 +1256,8 @@ TEST(type_prop, conv_invalid_window_dilation_stride_0) TEST(type_prop, conv_invalid_data_dilation_stride_0) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, @@ -1277,8 +1286,8 @@ TEST(type_prop, conv_invalid_data_dilation_stride_0) TEST(type_prop, conv_invalid_dilated_window_too_large) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 8, 8}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 8, 8}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, param1, Strides{1, 1}, Strides{4, 4}); @@ -1301,8 +1310,8 @@ TEST(type_prop, conv_invalid_dilated_window_too_large) TEST(type_prop, conv_invalid_movement_stride_0) { // Deduce type - auto param0 = make_shared(element::f32, Shape{6, 2, 10, 10}); - auto param1 = make_shared(element::f32, Shape{6, 2, 3, 3}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 2, 10, 10}); + auto param1 = make_shared(element::Type_t::f32, Shape{6, 2, 3, 3}); try { auto conv = make_shared(param0, param1, Strides{0, 1}); @@ -1332,8 +1341,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_ok) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -1343,7 +1352,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_ok) padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } @@ -1357,8 +1366,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_strides_rank_wrong CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1398,8 +1407,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_strides_dim_zero) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1435,8 +1444,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_dilation_rank_wron CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1476,8 +1485,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_window_dilation_dim_zero) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1513,8 +1522,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_padding_below_rank_wrong) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1554,8 +1563,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_padding_above_rank_wrong) CoordinateDiff padding_above{0, 0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1595,8 +1604,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_data_dilation_rank_wrong) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1636,8 +1645,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_dynamic_data_dilation_dim_zero) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 0}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1673,8 +1682,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_ok) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -1684,7 +1693,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_ok) padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } @@ -1698,8 +1707,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_data_batch_rank_wr CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1741,8 +1750,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_batch_size_known_o CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -1752,7 +1761,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_batch_size_known_o padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{64, Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()})); } @@ -1768,8 +1777,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_batch_size_known_z CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1804,8 +1813,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_input_channel_coun CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -1815,7 +1824,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_input_channel_coun padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } @@ -1830,8 +1839,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_dynamic_input_channel_coun CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1868,8 +1877,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_output_channel_cou CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -1879,7 +1888,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_output_channel_cou padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{Dimension::dynamic(), 32, Dimension::dynamic(), Dimension::dynamic()})); } @@ -1894,8 +1903,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_output_channel_cou CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1929,8 +1938,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_input_channel_coun CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -1940,7 +1949,7 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_input_channel_coun padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } @@ -1954,8 +1963,8 @@ TEST(type_prop, conv_partial_rank_dynamic_rank_static_dynamic_input_channel_coun CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -1991,8 +2000,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_ok) CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2002,7 +2011,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_ok) padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } @@ -2016,8 +2025,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_arg_ranks_m CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2054,8 +2063,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_input_chann CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2065,7 +2074,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_input_chann padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } @@ -2081,8 +2090,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_input_chann CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2119,8 +2128,8 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspat CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2130,7 +2139,7 @@ TEST(type_prop, conv_partial_rank_static_dynamic_rank_static_dynamic_all_nonspat padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{64, 100, Dimension::dynamic(), Dimension::dynamic()})); } @@ -2146,8 +2155,8 @@ TEST(type_prop, CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2157,7 +2166,7 @@ TEST(type_prop, padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{64, 100, 196, Dimension::dynamic()})); } @@ -2174,8 +2183,8 @@ TEST( CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2213,8 +2222,8 @@ TEST( CoordinateDiff padding_above{-1, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2224,7 +2233,7 @@ TEST( padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{64, 100, 1, Dimension::dynamic()})); } @@ -2241,8 +2250,8 @@ TEST( CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{2, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2252,7 +2261,7 @@ TEST( padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{64, 100, 199, Dimension::dynamic()})); } @@ -2269,8 +2278,8 @@ TEST( CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{2, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2280,7 +2289,7 @@ TEST( padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{64, 100, 67, Dimension::dynamic()})); } @@ -2297,8 +2306,8 @@ TEST( CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2336,8 +2345,8 @@ TEST( CoordinateDiff padding_above{0, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2375,8 +2384,8 @@ TEST( CoordinateDiff padding_above{0, -1}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2386,7 +2395,7 @@ TEST( padding_above, data_dilation_strides); - ASSERT_EQ(conv->get_output_element_type(0), element::f32); + ASSERT_EQ(conv->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(conv->get_output_partial_shape(0).same_scheme( PartialShape{64, 100, 196, Dimension::dynamic()})); } @@ -2403,8 +2412,8 @@ TEST( CoordinateDiff padding_above{0, -20}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2442,8 +2451,8 @@ TEST( CoordinateDiff padding_above{0, -20}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2481,8 +2490,8 @@ TEST(type_prop, conv_partial_dynamic_et) CoordinateDiff padding_above{-1, 0}; Strides data_dilation_strides{1, 1}; - auto param0 = make_shared(element::dynamic, data_batch_shape); - auto param1 = make_shared(element::dynamic, filters_shape); + auto param0 = make_shared(element::Type_t::dynamic, data_batch_shape); + auto param1 = make_shared(element::Type_t::dynamic, filters_shape); auto conv = make_shared(param0, param1, @@ -2500,11 +2509,11 @@ TEST(type_prop, conv_partial_dynamic_et) TEST(type_prop, conv_bprop_data_v1_output_partial_shape_dynamic) { Shape shape_filter{6, 3, 3, 3}; - auto filters = make_shared(element::f32, shape_filter); + auto filters = make_shared(element::Type_t::f32, shape_filter); Shape shape_delta{2, 6, 3, 3}; - auto deltas = make_shared(element::f32, shape_delta); + auto deltas = make_shared(element::Type_t::f32, shape_delta); Shape shape_data_batch_shape{2, 3, 5, 5}; - auto data_batch_shape = make_shared(element::i64, Shape{2, 3, 5, 5}); + auto data_batch_shape = make_shared(element::Type_t::i64, Shape{2, 3, 5, 5}); auto strides = Strides{1, 1}; auto dilations = Strides{1, 1}; auto padding_begin = CoordinateDiff{0, 0}; @@ -2519,9 +2528,9 @@ TEST(type_prop, conv_bprop_data_v1_output_partial_shape_dynamic) TEST(type_prop, conv_bprop_data_v1_output_partial_shape_dynamic_static_rank) { PartialShape shape_filter{20, 10, 3, 3}; - auto filters = make_shared(element::f32, shape_filter); + auto filters = make_shared(element::Type_t::f32, shape_filter); PartialShape shape_delta{Dimension(), 20, 224, 224}; - auto deltas = make_shared(element::f32, shape_delta); + auto deltas = make_shared(element::Type_t::f32, shape_delta); auto strides = Strides{2, 2}; auto dilations = Strides{1, 1}; auto padding_begin = CoordinateDiff{1, 1}; @@ -2546,8 +2555,8 @@ TEST(type_prop, conv_v1_partial_rank) CoordinateDiff padding_below{0, 0}; CoordinateDiff padding_above{0, 0}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared(param0, param1, @@ -2569,8 +2578,8 @@ TEST(type_prop, conv_v1_partial_auto_padding_same) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -2590,8 +2599,8 @@ TEST(type_prop, conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_lower) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -2611,8 +2620,8 @@ TEST(type_prop, conv_v1_partial_auto_padding_same_nc_dims_dynamic_same_upper) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_UPPER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -2632,8 +2641,8 @@ TEST(type_prop, conv_v1_partial_auto_padding_same_spatial_dims_dynamic) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -2654,8 +2663,8 @@ TEST(type_prop, conv_v1_partial_data_shape_dynamic) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -2676,10 +2685,10 @@ TEST(type_prop, conv_bprop_v1_partial_auto_padding_upper) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_UPPER; - auto in1 = make_shared(element::f32, shape1); - auto in2 = make_shared(element::f32, shape2); + auto in1 = make_shared(element::Type_t::f32, shape1); + auto in2 = make_shared(element::Type_t::f32, shape2); std::vector data = {1, 74}; - element::Type type = element::i64; + element::Type type = element::Type_t::i64; auto in3 = make_shared(type, shape3, data); auto conv = make_shared( @@ -2701,10 +2710,10 @@ TEST(type_prop, conv_bprop_v1_partial_auto_padding_lower) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto in1 = make_shared(element::f32, shape1); - auto in2 = make_shared(element::f32, shape2); + auto in1 = make_shared(element::Type_t::f32, shape1); + auto in2 = make_shared(element::Type_t::f32, shape2); std::vector data = {1, 74}; - element::Type type = element::i64; + element::Type type = element::Type_t::i64; auto in3 = make_shared(type, shape3, data); auto conv = make_shared( @@ -2721,9 +2730,9 @@ TEST(type_prop, deformable_conv_incorrect_group) const PartialShape deformable_values_shape{1, 50, 5, 5}; const PartialShape filters_shape{4, 3, 5, 5}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, deformable_values_shape); - auto param2 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, deformable_values_shape); + auto param2 = make_shared(element::Type_t::f32, filters_shape); try { @@ -2770,9 +2779,9 @@ TEST(type_prop, deformable_conv_incorrect_deformable_group) const PartialShape deformable_values_shape{1, 50, 5, 5}; const PartialShape filters_shape{3, 3, 5, 5}; - auto param0 = make_shared(element::f32, data_batch_shape); - auto param1 = make_shared(element::f32, deformable_values_shape); - auto param2 = make_shared(element::f32, filters_shape); + auto param0 = make_shared(element::Type_t::f32, data_batch_shape); + auto param1 = make_shared(element::Type_t::f32, deformable_values_shape); + auto param2 = make_shared(element::Type_t::f32, filters_shape); try { diff --git a/ngraph/test/type_prop/ctc_greedy_decoder.cpp b/ngraph/test/type_prop/ctc_greedy_decoder.cpp index b02593244de..119c5ceb3ec 100644 --- a/ngraph/test/type_prop/ctc_greedy_decoder.cpp +++ b/ngraph/test/type_prop/ctc_greedy_decoder.cpp @@ -26,10 +26,10 @@ TEST(type_prop, ctc_greedy_decoder_static_shapes) PartialShape logits_shape{100, 3, 1200}; PartialShape seq_mask_shape{100, 3}; Shape out_shape{3, 100, 1, 1}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); auto G = make_shared(P, I, false); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_EQ(G->get_shape(), out_shape); } @@ -38,10 +38,10 @@ TEST(type_prop, ctc_greedy_decoder_output_static_shape1) PartialShape logits_shape{Dimension::dynamic(), 3, 1200}; PartialShape seq_mask_shape{100, 3}; Shape out_shape{3, 100, 1, 1}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); auto G = make_shared(P, I, false); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_EQ(G->get_shape(), out_shape); } @@ -50,10 +50,10 @@ TEST(type_prop, ctc_greedy_decoder_output_static_shape2) PartialShape logits_shape{Dimension::dynamic(), 3, 1200}; PartialShape seq_mask_shape{100, Dimension::dynamic()}; Shape out_shape{3, 100, 1, 1}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); auto G = make_shared(P, I, false); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_EQ(G->get_shape(), out_shape); } @@ -62,10 +62,10 @@ TEST(type_prop, ctc_greedy_decoder_dynamic_shapes) PartialShape logits_shape{Dimension::dynamic(), Dimension::dynamic(), 1200}; PartialShape seq_mask_shape{Dimension::dynamic(), Dimension::dynamic()}; PartialShape out_shape{Dimension::dynamic(), Dimension::dynamic(), 1, 1}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); auto G = make_shared(P, I, false); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_TRUE(G->get_output_partial_shape(0).same_scheme(out_shape)); } @@ -74,10 +74,10 @@ TEST(type_prop, ctc_greedy_decoder_dynamic_ranks1) PartialShape logits_shape = PartialShape::dynamic(); PartialShape seq_mask_shape{100, Dimension::dynamic()}; PartialShape out_shape{Dimension::dynamic(), 100, 1, 1}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); auto G = make_shared(P, I, false); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_TRUE(G->get_output_partial_shape(0).same_scheme(out_shape)); } @@ -86,10 +86,10 @@ TEST(type_prop, ctc_greedy_decoder_dynamic_ranks2) PartialShape logits_shape = PartialShape::dynamic(); PartialShape seq_mask_shape = PartialShape::dynamic(); PartialShape out_shape{Dimension::dynamic(), Dimension::dynamic(), 1, 1}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); auto G = make_shared(P, I, false); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_TRUE(G->get_output_partial_shape(0).same_scheme(out_shape)); } @@ -97,8 +97,8 @@ TEST(type_prop, ctc_greedy_decoder_incorrect_rank) { PartialShape logits_shape{Dimension::dynamic(), 3, 1200, 5}; PartialShape seq_mask_shape{100, 3}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); try { @@ -121,8 +121,8 @@ TEST(type_prop, ctc_greedy_decoder_incorrect_rank2) { PartialShape logits_shape{Dimension::dynamic(), 3, 1200}; PartialShape seq_mask_shape{100, 3, 2}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); try { @@ -145,8 +145,8 @@ TEST(type_prop, ctc_greedy_decoder_mismatched_dim1) { PartialShape logits_shape{100, 4, 1200}; PartialShape seq_mask_shape{100, 3}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); try { @@ -169,8 +169,8 @@ TEST(type_prop, ctc_greedy_decoder_mismatched_dim2) { PartialShape logits_shape{101, 3, 1200}; PartialShape seq_mask_shape{100, 3}; - auto P = make_shared(element::f32, logits_shape); - auto I = make_shared(element::f32, seq_mask_shape); + auto P = make_shared(element::Type_t::f32, logits_shape); + auto I = make_shared(element::Type_t::f32, seq_mask_shape); try { diff --git a/ngraph/test/type_prop/ctc_loss.cpp b/ngraph/test/type_prop/ctc_loss.cpp index 2b2cc6f1847..4933c1c24c6 100644 --- a/ngraph/test/type_prop/ctc_loss.cpp +++ b/ngraph/test/type_prop/ctc_loss.cpp @@ -24,91 +24,92 @@ using namespace ngraph; TEST(type_prop, ctc_loss) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); // create CTCLoss node auto ctc_loss = make_shared(logits, logit_length, labels, label_length, blank_index); // check type and shape infer - EXPECT_EQ(ctc_loss->get_element_type(), element::f32); + EXPECT_EQ(ctc_loss->get_element_type(), element::Type_t::f32); EXPECT_TRUE(ctc_loss->get_output_partial_shape(0).same_scheme(PartialShape{10})); } TEST(type_prop, ctc_loss_no_blank_index) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); // create CTCLoss node auto ctc_loss = make_shared(logits, logit_length, labels, label_length); // check type and shape infer - EXPECT_EQ(ctc_loss->get_element_type(), element::f32); + EXPECT_EQ(ctc_loss->get_element_type(), element::Type_t::f32); EXPECT_TRUE(ctc_loss->get_output_partial_shape(0).same_scheme(PartialShape{10})); } TEST(type_prop, ctc_loss_output_type) { // create inputs - auto logits = make_shared(element::f64, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f64, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); // create CTCLoss node auto ctc_loss = make_shared(logits, logit_length, labels, label_length, blank_index); // check type and shape infer - EXPECT_EQ(ctc_loss->get_element_type(), element::f64); + EXPECT_EQ(ctc_loss->get_element_type(), element::Type_t::f64); EXPECT_TRUE(ctc_loss->get_output_partial_shape(0).same_scheme(PartialShape{10})); } TEST(type_prop, ctc_loss_non_default_parameters) { // create inputs - auto logits = make_shared(element::f64, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f64, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); // create CTCLoss node auto ctc_loss = make_shared( logits, logit_length, labels, label_length, blank_index, true, false, false); // check type and shape infer - EXPECT_EQ(ctc_loss->get_element_type(), element::f64); + EXPECT_EQ(ctc_loss->get_element_type(), element::Type_t::f64); EXPECT_TRUE(ctc_loss->get_output_partial_shape(0).same_scheme(PartialShape{10})); } TEST(type_prop, ctc_loss_dynamic_input) { // create inputs - auto logits = - make_shared(element::f32, PartialShape{Dimension::dynamic(), 120, 28}); + auto logits = make_shared(element::Type_t::f32, + PartialShape{Dimension::dynamic(), 120, 28}); auto logit_length = - make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto labels = make_shared(element::i32, PartialShape{Dimension::dynamic(), 120}); + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic()}); + auto labels = + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic(), 120}); auto label_length = - make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto blank_index = make_shared(element::i32, Shape{}); + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic()}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); // create CTCLoss node auto ctc_loss = make_shared(logits, logit_length, labels, label_length, blank_index); // check type and shape infer - EXPECT_EQ(ctc_loss->get_element_type(), element::f32); + EXPECT_EQ(ctc_loss->get_element_type(), element::Type_t::f32); EXPECT_TRUE( ctc_loss->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic()})); } @@ -116,31 +117,32 @@ TEST(type_prop, ctc_loss_dynamic_input) TEST(type_prop, ctc_loss_partly_dynamic_input) { // create inputs - auto logits = - make_shared(element::f32, PartialShape{Dimension::dynamic(), 120, 28}); - auto logit_length = make_shared(element::i32, PartialShape{10}); - auto labels = make_shared(element::i32, PartialShape{Dimension::dynamic(), 120}); + auto logits = make_shared(element::Type_t::f32, + PartialShape{Dimension::dynamic(), 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, PartialShape{10}); + auto labels = + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic(), 120}); auto label_length = - make_shared(element::i32, PartialShape{Dimension::dynamic()}); - auto blank_index = make_shared(element::i32, Shape{}); + make_shared(element::Type_t::i32, PartialShape{Dimension::dynamic()}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); // create CTCLoss node auto ctc_loss = make_shared(logits, logit_length, labels, label_length, blank_index); // check type and shape infer - EXPECT_EQ(ctc_loss->get_element_type(), element::f32); + EXPECT_EQ(ctc_loss->get_element_type(), element::Type_t::f32); EXPECT_TRUE(ctc_loss->get_output_partial_shape(0).same_scheme(PartialShape{10})); } TEST(type_prop, ctc_loss_fail_inputs_dim) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 40, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 40, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); try { @@ -164,11 +166,11 @@ TEST(type_prop, ctc_loss_fail_inputs_dim) TEST(type_prop, ctc_loss_fail_logit_length_dim) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10, 20}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10, 20}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); try { @@ -192,11 +194,11 @@ TEST(type_prop, ctc_loss_fail_logit_length_dim) TEST(type_prop, ctc_loss_fail_labels_dim) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10}); - auto label_length = make_shared(element::i32, Shape{10}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); try { @@ -220,11 +222,11 @@ TEST(type_prop, ctc_loss_fail_labels_dim) TEST(type_prop, ctc_loss_fail_label_length_dim) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10, 40}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10, 40}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); try { @@ -248,11 +250,11 @@ TEST(type_prop, ctc_loss_fail_label_length_dim) TEST(type_prop, ctc_loss_fail_blank_index_dim) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{10}); - auto blank_index = make_shared(element::i32, Shape{4}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{10}); + auto blank_index = make_shared(element::Type_t::i32, Shape{4}); try { @@ -276,11 +278,11 @@ TEST(type_prop, ctc_loss_fail_blank_index_dim) TEST(type_prop, ctc_loss_fail_batch_dim_mismatch) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 120}); - auto label_length = make_shared(element::i32, Shape{40}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 120}); + auto label_length = make_shared(element::Type_t::i32, Shape{40}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); try { @@ -307,11 +309,11 @@ TEST(type_prop, ctc_loss_fail_batch_dim_mismatch) TEST(type_prop, ctc_loss_fail_time_dim_mismatch) { // create inputs - auto logits = make_shared(element::f32, Shape{10, 120, 28}); - auto logit_length = make_shared(element::i32, Shape{10}); - auto labels = make_shared(element::i32, Shape{10, 130}); - auto label_length = make_shared(element::i32, Shape{40}); - auto blank_index = make_shared(element::i32, Shape{}); + auto logits = make_shared(element::Type_t::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::Type_t::i32, Shape{10}); + auto labels = make_shared(element::Type_t::i32, Shape{10, 130}); + auto label_length = make_shared(element::Type_t::i32, Shape{40}); + auto blank_index = make_shared(element::Type_t::i32, Shape{}); try { diff --git a/ngraph/test/type_prop/deformable_convolution.cpp b/ngraph/test/type_prop/deformable_convolution.cpp index 508ce147176..83b97c12e9d 100644 --- a/ngraph/test/type_prop/deformable_convolution.cpp +++ b/ngraph/test/type_prop/deformable_convolution.cpp @@ -34,9 +34,9 @@ TEST(type_prop, deformable_conv_v1_partial_auto_padding_same) const int64_t group = 4; const int64_t deformable_group = 2; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto deformable_values = make_shared(element::f32, deformable_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto deformable_values = make_shared(element::Type_t::f32, deformable_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto deformable_conv = make_shared(data_batch, deformable_values, @@ -67,9 +67,9 @@ TEST(type_prop, deformable_conv_v1_partial_auto_padding_same_nc_dims_dynamic_sam const int64_t group = 4; const int64_t deformable_group = 2; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto deformable_values = make_shared(element::f32, deformable_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto deformable_values = make_shared(element::Type_t::f32, deformable_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto deformable_conv = make_shared(data_batch, deformable_values, @@ -101,9 +101,9 @@ TEST(type_prop, deformable_conv_v1_partial_auto_padding_same_nc_dims_dynamic_sam const int64_t group = 4; const int64_t deformable_group = 2; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto deformable_values = make_shared(element::f32, deformable_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto deformable_values = make_shared(element::Type_t::f32, deformable_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto deformable_conv = make_shared(data_batch, deformable_values, @@ -135,9 +135,9 @@ TEST(type_prop, deformable_conv_v1_partial_auto_padding_same_spatial_dims_dynami const int64_t group = 4; const int64_t deformable_group = 2; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto deformable_values = make_shared(element::f32, deformable_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto deformable_values = make_shared(element::Type_t::f32, deformable_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto deformable_conv = make_shared(data_batch, deformable_values, diff --git a/ngraph/test/type_prop/deformable_psroi_pooling.cpp b/ngraph/test/type_prop/deformable_psroi_pooling.cpp index d4b204763df..7d71de721a4 100644 --- a/ngraph/test/type_prop/deformable_psroi_pooling.cpp +++ b/ngraph/test/type_prop/deformable_psroi_pooling.cpp @@ -23,9 +23,9 @@ using namespace ngraph; TEST(type_prop, deformable_psroi_pooling_output_shape) { - auto input = make_shared(element::f32, Shape{1, 1024, 63, 38}); - auto coords = make_shared(element::f32, Shape{300, 5}); - auto offsets = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto input = make_shared(element::Type_t::f32, Shape{1, 1024, 63, 38}); + auto coords = make_shared(element::Type_t::f32, Shape{300, 5}); + auto offsets = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); const int64_t output_dim = 882; const float spatial_scale = 0.0625; const int64_t group_size = 3; @@ -38,9 +38,9 @@ TEST(type_prop, deformable_psroi_pooling_output_shape) TEST(type_prop, deformable_psroi_pooling_output_shape_2) { - auto input = make_shared(element::f32, Shape{1, 7938, 38, 38}); - auto coords = make_shared(element::f32, Shape{300, 5}); - auto offsets = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto input = make_shared(element::Type_t::f32, Shape{1, 7938, 38, 38}); + auto coords = make_shared(element::Type_t::f32, Shape{300, 5}); + auto offsets = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); const int64_t output_dim = 162; const float spatial_scale = 0.0625; const int64_t group_size = 7; @@ -53,9 +53,9 @@ TEST(type_prop, deformable_psroi_pooling_output_shape_2) TEST(type_prop, deformable_psroi_pooling_invalid_input_rank) { - auto input = make_shared(element::f32, Shape{1, 2, 3}); - auto coords = make_shared(element::f32, Shape{1, 2}); - auto offsets = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto input = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto coords = make_shared(element::Type_t::f32, Shape{1, 2}); + auto offsets = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); const int64_t output_dim = 4; const float spatial_scale = 0.9; const int64_t group_size = 7; @@ -79,9 +79,9 @@ TEST(type_prop, deformable_psroi_pooling_invalid_input_rank) TEST(type_prop, deformable_psroi_pooling_invalid_box_coordinates_rank) { - auto input = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto coords = make_shared(element::f32, Shape{1, 2, 3}); - auto offsets = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto input = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto coords = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto offsets = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); const int64_t output_dim = 4; const float spatial_scale = 0.9; const int64_t group_size = 7; @@ -106,9 +106,9 @@ TEST(type_prop, deformable_psroi_pooling_invalid_box_coordinates_rank) TEST(type_prop, deformable_psroi_pooling_invalid_offstes_rank) { - auto input = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto coords = make_shared(element::f32, Shape{1, 2}); - auto offsets = make_shared(element::f32, Shape{1, 2, 3, 4, 5}); + auto input = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto coords = make_shared(element::Type_t::f32, Shape{1, 2}); + auto offsets = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4, 5}); const int64_t output_dim = 4; const float spatial_scale = 0.9; const int64_t group_size = 7; diff --git a/ngraph/test/type_prop/depth_to_space.cpp b/ngraph/test/type_prop/depth_to_space.cpp index 4375b9ab818..779ddd13d92 100644 --- a/ngraph/test/type_prop/depth_to_space.cpp +++ b/ngraph/test/type_prop/depth_to_space.cpp @@ -23,57 +23,57 @@ using namespace ngraph; TEST(type_prop, depth_to_space_output_shape_block_first_4D) { - auto A = make_shared(element::f32, Shape{1, 128, 8, 8}); + auto A = make_shared(element::Type_t::f32, Shape{1, 128, 8, 8}); auto space_to_depth = make_shared(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 8); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 2, 64, 64})); } TEST(type_prop, depth_to_space_output_shape_block_first_4D_2) { - auto A = make_shared(element::f32, Shape{1, 12, 1080, 1616}); + auto A = make_shared(element::Type_t::f32, Shape{1, 12, 1080, 1616}); auto space_to_depth = make_shared(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 3, 2 * 1080, 2 * 1616})); } TEST(type_prop, depth_to_space_output_shape_block_first_5D) { - auto A = make_shared(element::f32, Shape{1, 16, 3, 1080, 1616}); + auto A = make_shared(element::Type_t::f32, Shape{1, 16, 3, 1080, 1616}); auto space_to_depth = make_shared(A, op::DepthToSpace::DepthToSpaceMode::BLOCKS_FIRST, 2); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 2, 2 * 3, 2 * 1080, 2 * 1616})); } TEST(type_prop, depth_to_space_output_shape_depth_first_4D) { - auto A = make_shared(element::f32, Shape{1, 12, 1080, 1616}); + auto A = make_shared(element::Type_t::f32, Shape{1, 12, 1080, 1616}); auto space_to_depth = make_shared(A, op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 3, 2 * 1080, 2 * 1616})); } TEST(type_prop, depth_to_space_output_shape_depth_first_5D) { - auto A = make_shared(element::f32, Shape{1, 16, 3, 1080, 1616}); + auto A = make_shared(element::Type_t::f32, Shape{1, 16, 3, 1080, 1616}); auto space_to_depth = make_shared(A, op::DepthToSpace::DepthToSpaceMode::DEPTH_FIRST, 2); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 2, 2 * 3, 2 * 1080, 2 * 1616})); } TEST(type_prop, depth_to_space_input_rank_not_supported) { - auto A = make_shared(element::f32, Shape{1, 8}); + auto A = make_shared(element::Type_t::f32, Shape{1, 8}); try { auto space_to_depth = @@ -94,7 +94,7 @@ TEST(type_prop, depth_to_space_input_rank_not_supported) TEST(type_prop, depth_to_space_blocksize_not_matched) { - auto A = make_shared(element::f32, Shape{1, 7, 4, 4}); + auto A = make_shared(element::Type_t::f32, Shape{1, 7, 4, 4}); try { auto space_to_depth = diff --git a/ngraph/test/type_prop/dyn_reshape.cpp b/ngraph/test/type_prop/dyn_reshape.cpp index 760ccf9917f..a8b571ffac2 100644 --- a/ngraph/test/type_prop/dyn_reshape.cpp +++ b/ngraph/test/type_prop/dyn_reshape.cpp @@ -23,20 +23,22 @@ using namespace ngraph; TEST(type_prop, reshape_v1_arg_rank_static_pattern_zero) { - auto arg = make_shared(element::f32, Shape{2, 0, 2, 8}); - auto pattern = op::Constant::create(element::i64, Shape{4}, {1, 2, 0, 32}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 0, 2, 8}); + auto pattern = op::Constant::create(element::Type_t::i64, Shape{4}, {1, 2, 0, 32}); auto reshape_v1_static = make_shared(arg, pattern, true); EXPECT_EQ(reshape_v1_static->get_output_shape(0), Shape({1, 2, 2, 32})); - auto dynamic_arg = make_shared(element::f32, PartialShape::dynamic()); + auto dynamic_arg = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto reshape_v1_dynamic = make_shared(dynamic_arg, pattern, true); EXPECT_TRUE(reshape_v1_dynamic->get_output_partial_shape(0).same_scheme( PartialShape{1, 2, Dimension::dynamic(), 32})); try { - auto static_shape_parameter = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto reshape_output_pattern = op::Constant::create(element::i64, Shape{4}, {2, 2, 3, 4}); + auto static_shape_parameter = + make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto reshape_output_pattern = + op::Constant::create(element::Type_t::i64, Shape{4}, {2, 2, 3, 4}); auto reshape = make_shared(static_shape_parameter, reshape_output_pattern, true); FAIL() << "Expected failure on reshape construction"; diff --git a/ngraph/test/type_prop/elu.cpp b/ngraph/test/type_prop/elu.cpp index 3d2bf279594..82e29aeda75 100644 --- a/ngraph/test/type_prop/elu.cpp +++ b/ngraph/test/type_prop/elu.cpp @@ -24,8 +24,8 @@ using namespace ngraph; TEST(type_prop, elu) { Shape data_shape{2, 4}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); auto elu = make_shared(data, 1); - ASSERT_EQ(elu->get_element_type(), element::f32); + ASSERT_EQ(elu->get_element_type(), element::Type_t::f32); ASSERT_EQ(elu->get_shape(), data_shape); } diff --git a/ngraph/test/type_prop/embedding_segments_sum.cpp b/ngraph/test/type_prop/embedding_segments_sum.cpp index 58f28d3a0d0..dc118c78058 100644 --- a/ngraph/test/type_prop/embedding_segments_sum.cpp +++ b/ngraph/test/type_prop/embedding_segments_sum.cpp @@ -25,19 +25,19 @@ using namespace ngraph; TEST(type_prop, ess) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); auto ess = make_shared( emb_table, indices, segment_ids, num_segments, default_index, per_sample_weights); EXPECT_TRUE( ess->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 2})); EXPECT_TRUE(indices->get_partial_shape().same_scheme(per_sample_weights->get_partial_shape())); - EXPECT_EQ(ess->get_output_element_type(0), element::f32); + EXPECT_EQ(ess->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(indices->get_partial_shape().rank().get_length(), 1); EXPECT_EQ(segment_ids->get_partial_shape().rank().get_length(), 1); } @@ -45,12 +45,12 @@ TEST(type_prop, ess) TEST(type_prop, ess_dynamic_emb_table_number_segment) { auto emb_table = - make_shared(element::f32, PartialShape{5, Dimension::dynamic()}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + make_shared(element::Type_t::f32, PartialShape{5, Dimension::dynamic()}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); auto ess = make_shared( emb_table, indices, segment_ids, num_segments, default_index, per_sample_weights); @@ -61,12 +61,12 @@ TEST(type_prop, ess_dynamic_emb_table_number_segment) TEST(type_prop, ess_fail_indices_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::f32, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::f32, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -86,12 +86,12 @@ TEST(type_prop, ess_fail_indices_element_type) TEST(type_prop, ess_fail_segment_ids_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::f32, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::f32, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -111,12 +111,12 @@ TEST(type_prop, ess_fail_segment_ids_element_type) TEST(type_prop, ess_fail_number_segments_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::f32, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::f32, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -136,12 +136,12 @@ TEST(type_prop, ess_fail_number_segments_element_type) TEST(type_prop, ess_fail_default_index_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::f32, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::f32, Shape{}); try { @@ -161,12 +161,12 @@ TEST(type_prop, ess_fail_default_index_element_type) TEST(type_prop, ess_fail_mismatch_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i32, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i32, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -188,12 +188,12 @@ TEST(type_prop, ess_fail_mismatch_element_type) TEST(type_prop, ess_fail_mismatch_element_type_1) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i32, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i32, Shape{}); try { @@ -215,12 +215,12 @@ TEST(type_prop, ess_fail_mismatch_element_type_1) TEST(type_prop, ess_fail_mismatch_element_type_2) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::i64, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::i64, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -242,12 +242,12 @@ TEST(type_prop, ess_fail_mismatch_element_type_2) TEST(type_prop, ess_fail_mismatch_element_type_3) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i32, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i32, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -270,12 +270,12 @@ TEST(type_prop, ess_fail_mismatch_element_type_3) TEST(type_prop, ess_fail_mismatch_shape) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{3}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{3}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -296,12 +296,12 @@ TEST(type_prop, ess_fail_mismatch_shape) TEST(type_prop, ess_fail_num_segments_scalar) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{2}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{2}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -321,12 +321,12 @@ TEST(type_prop, ess_fail_num_segments_scalar) TEST(type_prop, ess_fail_default_index_scalar) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{2}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{2}); try { @@ -346,12 +346,12 @@ TEST(type_prop, ess_fail_default_index_scalar) TEST(type_prop, ess_fail_indices_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4, 2}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4, 2}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -371,12 +371,12 @@ TEST(type_prop, ess_fail_indices_1d) TEST(type_prop, ess_fail_segment_ids_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{3, 2}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{3, 2}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -396,12 +396,12 @@ TEST(type_prop, ess_fail_segment_ids_1d) TEST(type_prop, ess_fail_per_sample_weights_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); - auto per_sample_weights = make_shared(element::f32, Shape{4, 2}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4, 2}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -421,26 +421,26 @@ TEST(type_prop, ess_fail_per_sample_weights_1d) TEST(type_prop, ess_4_args_api) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); auto ess = make_shared(emb_table, indices, segment_ids, num_segments); EXPECT_TRUE( ess->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 2})); - EXPECT_EQ(ess->get_output_element_type(0), element::f32); + EXPECT_EQ(ess->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(indices->get_partial_shape().rank().get_length(), 1); EXPECT_EQ(segment_ids->get_partial_shape().rank().get_length(), 1); } TEST(type_prop, ess_fail_indices_element_type_4_args_api) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::f32, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::f32, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = make_shared(element::Type_t::i64, Shape{}); try { @@ -460,15 +460,15 @@ TEST(type_prop, ess_fail_indices_element_type_4_args_api) TEST(type_prop, ess_num_segment_const) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto segment_ids = make_shared(element::i64, Shape{4}); - auto num_segments = opset3::Constant::create(element::i64, Shape{}, {3}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto segment_ids = make_shared(element::Type_t::i64, Shape{4}); + auto num_segments = opset3::Constant::create(element::Type_t::i64, Shape{}, {3}); auto ess = make_shared(emb_table, indices, segment_ids, num_segments); EXPECT_TRUE(ess->get_output_partial_shape(0).same_scheme(PartialShape{3, 2})); - EXPECT_EQ(ess->get_output_element_type(0), element::f32); + EXPECT_EQ(ess->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(indices->get_partial_shape().rank().get_length(), 1); EXPECT_EQ(segment_ids->get_partial_shape().rank().get_length(), 1); -} \ No newline at end of file +} diff --git a/ngraph/test/type_prop/embeddingbag_offsetssum.cpp b/ngraph/test/type_prop/embeddingbag_offsetssum.cpp index 5b74d18d4c7..6d4a71b1f4d 100644 --- a/ngraph/test/type_prop/embeddingbag_offsetssum.cpp +++ b/ngraph/test/type_prop/embeddingbag_offsetssum.cpp @@ -23,17 +23,17 @@ using namespace ngraph; TEST(type_prop, ebos) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); auto ebos = make_shared( emb_table, indices, offsets, default_index, per_sample_weights); EXPECT_TRUE(ebos->get_output_partial_shape(0).same_scheme(PartialShape{3, 2})); EXPECT_TRUE(indices->get_partial_shape().same_scheme(per_sample_weights->get_partial_shape())); - EXPECT_EQ(ebos->get_output_element_type(0), element::f32); + EXPECT_EQ(ebos->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(indices->get_partial_shape().rank().get_length(), 1); EXPECT_EQ(offsets->get_partial_shape().rank().get_length(), 1); } @@ -41,11 +41,11 @@ TEST(type_prop, ebos) TEST(type_prop, ebos_dynamic_emb_table) { auto emb_table = - make_shared(element::f32, PartialShape{5, Dimension::dynamic()}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + make_shared(element::Type_t::f32, PartialShape{5, Dimension::dynamic()}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); auto ebos = make_shared( emb_table, indices, offsets, default_index, per_sample_weights); @@ -56,11 +56,12 @@ TEST(type_prop, ebos_dynamic_emb_table) TEST(type_prop, ebos_dynamic_offsets) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, PartialShape{Dimension::dynamic()}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic()}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); auto ebos = make_shared( emb_table, indices, offsets, default_index, per_sample_weights); @@ -72,11 +73,12 @@ TEST(type_prop, ebos_dynamic_offsets) TEST(type_prop, ebos_dynamic_emb_table_offsets) { auto emb_table = - make_shared(element::f32, PartialShape{5, Dimension::dynamic()}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, PartialShape{Dimension::dynamic()}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + make_shared(element::Type_t::f32, PartialShape{5, Dimension::dynamic()}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic()}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); auto ebos = make_shared( emb_table, indices, offsets, default_index, per_sample_weights); @@ -87,11 +89,11 @@ TEST(type_prop, ebos_dynamic_emb_table_offsets) TEST(type_prop, ebos_fail_indices_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::f32, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::f32, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -111,11 +113,11 @@ TEST(type_prop, ebos_fail_indices_element_type) TEST(type_prop, ebos_fail_offsets_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::f32, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::f32, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -135,11 +137,11 @@ TEST(type_prop, ebos_fail_offsets_element_type) TEST(type_prop, ebos_fail_default_index_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::f32, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::f32, Shape{}); try { @@ -159,11 +161,11 @@ TEST(type_prop, ebos_fail_default_index_element_type) TEST(type_prop, ebos_fail_mismatch_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i32, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i32, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -185,11 +187,11 @@ TEST(type_prop, ebos_fail_mismatch_element_type) TEST(type_prop, ebos_fail_mismatch_element_type_1) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i32, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i32, Shape{}); try { @@ -211,11 +213,11 @@ TEST(type_prop, ebos_fail_mismatch_element_type_1) TEST(type_prop, ebos_fail_mismatch_element_type_2) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::i64, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::i64, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -237,11 +239,11 @@ TEST(type_prop, ebos_fail_mismatch_element_type_2) TEST(type_prop, ebos_fail_mismatch_shape) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{3}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{3}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -262,11 +264,11 @@ TEST(type_prop, ebos_fail_mismatch_shape) TEST(type_prop, ebos_fail_default_index_scalar) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{2}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{2}); try { @@ -286,11 +288,11 @@ TEST(type_prop, ebos_fail_default_index_scalar) TEST(type_prop, ebos_fail_indices_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4, 2}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4, 2}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -310,11 +312,11 @@ TEST(type_prop, ebos_fail_indices_1d) TEST(type_prop, ebos_fail_offsets_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3, 2}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3, 2}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -334,11 +336,11 @@ TEST(type_prop, ebos_fail_offsets_1d) TEST(type_prop, ebos_fail_per_sample_weights_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); - auto per_sample_weights = make_shared(element::f32, Shape{4, 2}); - auto default_index = make_shared(element::i64, Shape{}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4, 2}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); try { @@ -358,22 +360,22 @@ TEST(type_prop, ebos_fail_per_sample_weights_1d) TEST(type_prop, ebos_3_args_api) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); auto ebos = make_shared(emb_table, indices, offsets); EXPECT_TRUE(ebos->get_output_partial_shape(0).same_scheme(PartialShape{3, 2})); - EXPECT_EQ(ebos->get_output_element_type(0), element::f32); + EXPECT_EQ(ebos->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(indices->get_partial_shape().rank().get_length(), 1); EXPECT_EQ(offsets->get_partial_shape().rank().get_length(), 1); } TEST(type_prop, ebos_fail_indices_element_type_3_args_api) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::f32, Shape{4}); - auto offsets = make_shared(element::i64, Shape{3}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::f32, Shape{4}); + auto offsets = make_shared(element::Type_t::i64, Shape{3}); try { diff --git a/ngraph/test/type_prop/embeddingbag_packedsum.cpp b/ngraph/test/type_prop/embeddingbag_packedsum.cpp index 2ff631b51b3..6cb35339962 100644 --- a/ngraph/test/type_prop/embeddingbag_packedsum.cpp +++ b/ngraph/test/type_prop/embeddingbag_packedsum.cpp @@ -23,24 +23,24 @@ using namespace ngraph; TEST(type_prop, ebps) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{3, 4}); - auto per_sample_weights = make_shared(element::f32, Shape{3, 4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{3, 4}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{3, 4}); auto ebps = make_shared(emb_table, indices, per_sample_weights); EXPECT_TRUE(ebps->get_output_partial_shape(0).same_scheme(PartialShape{3, 2})); EXPECT_TRUE(indices->get_partial_shape().same_scheme(per_sample_weights->get_partial_shape())); - EXPECT_EQ(ebps->get_output_element_type(0), element::f32); + EXPECT_EQ(ebps->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(indices->get_partial_shape().rank().get_length(), 2); } TEST(type_prop, ebps_dynamic_emb_table) { auto emb_table = - make_shared(element::f32, PartialShape{5, Dimension::dynamic()}); - auto indices = make_shared(element::i64, Shape{3, 4}); - auto per_sample_weights = make_shared(element::f32, Shape{3, 4}); - auto default_index = make_shared(element::i64, Shape{}); + make_shared(element::Type_t::f32, PartialShape{5, Dimension::dynamic()}); + auto indices = make_shared(element::Type_t::i64, Shape{3, 4}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{3, 4}); + auto default_index = make_shared(element::Type_t::i64, Shape{}); auto ebps = make_shared(emb_table, indices, per_sample_weights); @@ -50,10 +50,11 @@ TEST(type_prop, ebps_dynamic_emb_table) TEST(type_prop, ebps_dynamic_indices) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, PartialShape{Dimension::dynamic(), 4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic(), 4}); auto per_sample_weights = - make_shared(element::f32, PartialShape{Dimension::dynamic(), 4}); + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), 4}); auto ebps = make_shared(emb_table, indices, per_sample_weights); @@ -64,10 +65,11 @@ TEST(type_prop, ebps_dynamic_indices) TEST(type_prop, ebps_dynamic_emb_table_indices) { auto emb_table = - make_shared(element::f32, PartialShape{5, Dimension::dynamic()}); - auto indices = make_shared(element::i64, PartialShape{Dimension::dynamic(), 4}); + make_shared(element::Type_t::f32, PartialShape{5, Dimension::dynamic()}); + auto indices = + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic(), 4}); auto per_sample_weights = - make_shared(element::f32, PartialShape{Dimension::dynamic(), 4}); + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), 4}); auto ebps = make_shared(emb_table, indices, per_sample_weights); @@ -77,9 +79,9 @@ TEST(type_prop, ebps_dynamic_emb_table_indices) TEST(type_prop, ebps_fail_indices_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::f32, Shape{3, 4}); - auto per_sample_weights = make_shared(element::f32, Shape{3, 4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::f32, Shape{3, 4}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{3, 4}); try { @@ -99,9 +101,9 @@ TEST(type_prop, ebps_fail_indices_element_type) TEST(type_prop, ebps_fail_mismatch_element_type) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{3, 4}); - auto per_sample_weights = make_shared(element::i64, Shape{3, 4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{3, 4}); + auto per_sample_weights = make_shared(element::Type_t::i64, Shape{3, 4}); try { @@ -123,9 +125,9 @@ TEST(type_prop, ebps_fail_mismatch_element_type) TEST(type_prop, ebps_fail_mismatch_shape) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{3, 4}); - auto per_sample_weights = make_shared(element::f32, Shape{4, 3}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{3, 4}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4, 3}); try { @@ -146,9 +148,9 @@ TEST(type_prop, ebps_fail_mismatch_shape) TEST(type_prop, ebps_fail_indices_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{4}); - auto per_sample_weights = make_shared(element::f32, Shape{3, 4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{3, 4}); try { @@ -168,9 +170,9 @@ TEST(type_prop, ebps_fail_indices_1d) TEST(type_prop, ebps_fail_per_sample_weights_1d) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{3, 4}); - auto per_sample_weights = make_shared(element::f32, Shape{4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{3, 4}); + auto per_sample_weights = make_shared(element::Type_t::f32, Shape{4}); try { @@ -190,19 +192,19 @@ TEST(type_prop, ebps_fail_per_sample_weights_1d) TEST(type_prop, ebps_2_args_api) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::i64, Shape{3, 4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::i64, Shape{3, 4}); auto ebps = make_shared(emb_table, indices); EXPECT_TRUE(ebps->get_output_partial_shape(0).same_scheme(PartialShape{3, 2})); - EXPECT_EQ(ebps->get_output_element_type(0), element::f32); + EXPECT_EQ(ebps->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(indices->get_partial_shape().rank().get_length(), 2); } TEST(type_prop, ebps_fail_indices_element_type_2_args_api) { - auto emb_table = make_shared(element::f32, Shape{5, 2}); - auto indices = make_shared(element::f32, Shape{3, 4}); + auto emb_table = make_shared(element::Type_t::f32, Shape{5, 2}); + auto indices = make_shared(element::Type_t::f32, Shape{3, 4}); try { @@ -217,4 +219,4 @@ TEST(type_prop, ebps_fail_indices_element_type_2_args_api) { FAIL() << "INDICES type check failed for unexpected reason"; } -} \ No newline at end of file +} diff --git a/ngraph/test/type_prop/extractimagepatches.cpp b/ngraph/test/type_prop/extractimagepatches.cpp index de01b066f83..1427dc74f1c 100644 --- a/ngraph/test/type_prop/extractimagepatches.cpp +++ b/ngraph/test/type_prop/extractimagepatches.cpp @@ -23,7 +23,7 @@ using namespace ngraph; TEST(type_prop, extractimagepatches_i32) { - auto data = make_shared(element::i32, Shape{64, 3, 10, 10}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -31,13 +31,13 @@ TEST(type_prop, extractimagepatches_i32) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 2, 2})); } TEST(type_prop, extractimagepatches_i64) { - auto data = make_shared(element::i64, Shape{64, 3, 10, 10}); + auto data = make_shared(element::Type_t::i64, Shape{64, 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -45,13 +45,13 @@ TEST(type_prop, extractimagepatches_i64) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i64); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i64); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 2, 2})); } TEST(type_prop, extractimagepatches_rates_change) { - auto data = make_shared(element::i32, Shape{64, 3, 10, 10}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{2, 2}; @@ -59,13 +59,13 @@ TEST(type_prop, extractimagepatches_rates_change) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 2, 2})); } TEST(type_prop, extractimagepatches_input_shape_change) { - auto data = make_shared(element::i32, Shape{64, 3, 9, 9}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 9, 9}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{2, 2}; @@ -73,13 +73,13 @@ TEST(type_prop, extractimagepatches_input_shape_change) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 1, 1})); } TEST(type_prop, extractimagepatches_dynamic_shape) { - auto data = make_shared(element::i32, PartialShape::dynamic(4)); + auto data = make_shared(element::Type_t::i32, PartialShape::dynamic(4)); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{2, 2}; @@ -87,15 +87,15 @@ TEST(type_prop, extractimagepatches_dynamic_shape) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_TRUE( extractimagepatches->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } TEST(type_prop, extractimagepatches_dynamic_batch_shape) { - auto data = - make_shared(element::i32, PartialShape{Dimension::dynamic(), 3, 10, 10}); + auto data = make_shared(element::Type_t::i32, + PartialShape{Dimension::dynamic(), 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -103,14 +103,14 @@ TEST(type_prop, extractimagepatches_dynamic_batch_shape) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_TRUE(extractimagepatches->get_output_partial_shape(0).same_scheme( PartialShape{Dimension::dynamic(), 27, 2, 2})); } TEST(type_prop, extractimagepatches_padding_same_lower1) { - auto data = make_shared(element::i32, Shape{64, 3, 10, 10}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -118,13 +118,13 @@ TEST(type_prop, extractimagepatches_padding_same_lower1) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 2, 2})); } TEST(type_prop, extractimagepatches_padding_same_lower2) { - auto data = make_shared(element::i32, Shape{64, 3, 9, 9}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 9, 9}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -132,12 +132,12 @@ TEST(type_prop, extractimagepatches_padding_same_lower2) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 2, 2})); } TEST(type_prop, extractimagepatches_padding_same_upper) { - auto data = make_shared(element::i32, Shape{64, 3, 11, 11}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 11, 11}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -145,13 +145,13 @@ TEST(type_prop, extractimagepatches_padding_same_upper) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 3, 3})); } TEST(type_prop, extractimagepatches_padding_same_upper2) { - auto data = make_shared(element::i32, Shape{64, 3, 6, 11}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 6, 11}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -159,13 +159,13 @@ TEST(type_prop, extractimagepatches_padding_same_upper2) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 2, 3})); } TEST(type_prop, extractimagepatches_zero_dim_inputs) { - auto data = make_shared(element::i32, Shape{64, 0, 0, 0}); + auto data = make_shared(element::Type_t::i32, Shape{64, 0, 0, 0}); auto sizes = Shape{3, 3}; auto strides = Strides{5, 5}; auto rates = Shape{1, 1}; @@ -173,13 +173,13 @@ TEST(type_prop, extractimagepatches_zero_dim_inputs) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 0, 0, 0})); } TEST(type_prop, extractimagepatches_large_stride_valid_padding) { - auto data = make_shared(element::i32, Shape{64, 3, 10, 10}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{15, 15}; auto rates = Shape{1, 1}; @@ -187,13 +187,13 @@ TEST(type_prop, extractimagepatches_large_stride_valid_padding) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 1, 1})); } TEST(type_prop, extractimagepatches_large_stride_same_padding) { - auto data = make_shared(element::i32, Shape{64, 3, 10, 10}); + auto data = make_shared(element::Type_t::i32, Shape{64, 3, 10, 10}); auto sizes = Shape{3, 3}; auto strides = Strides{15, 15}; auto rates = Shape{1, 1}; @@ -201,6 +201,6 @@ TEST(type_prop, extractimagepatches_large_stride_same_padding) auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); - EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::i32); + EXPECT_EQ(extractimagepatches->get_output_element_type(0), element::Type_t::i32); EXPECT_EQ(extractimagepatches->get_output_shape(0), (Shape{64, 27, 1, 1})); } diff --git a/ngraph/test/type_prop/fake_quantize.cpp b/ngraph/test/type_prop/fake_quantize.cpp index 9af0464d2df..6fba5c2b5c7 100644 --- a/ngraph/test/type_prop/fake_quantize.cpp +++ b/ngraph/test/type_prop/fake_quantize.cpp @@ -23,41 +23,41 @@ using namespace ngraph; TEST(type_prop, fake_quantize) { - const auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto input_low = make_shared(element::f32, Shape{}); - const auto input_high = make_shared(element::f32, Shape{}); - const auto output_low = make_shared(element::f32, Shape{}); - const auto output_high = make_shared(element::f32, Shape{}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto input_low = make_shared(element::Type_t::f32, Shape{}); + const auto input_high = make_shared(element::Type_t::f32, Shape{}); + const auto output_low = make_shared(element::Type_t::f32, Shape{}); + const auto output_high = make_shared(element::Type_t::f32, Shape{}); const int levels = 5; const auto fake_quantize = make_shared(data, input_low, input_high, output_low, output_high, levels); - EXPECT_EQ(fake_quantize->get_element_type(), element::f32); + EXPECT_EQ(fake_quantize->get_element_type(), element::Type_t::f32); EXPECT_EQ(fake_quantize->get_shape(), (Shape{1, 2, 3, 4})); } TEST(type_prop, fake_quantize_autob) { - const auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto input_low = make_shared(element::f32, Shape{3, 1}); - const auto input_high = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto output_low = make_shared(element::f32, Shape{4}); - const auto output_high = make_shared(element::f32, Shape{}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto input_low = make_shared(element::Type_t::f32, Shape{3, 1}); + const auto input_high = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto output_low = make_shared(element::Type_t::f32, Shape{4}); + const auto output_high = make_shared(element::Type_t::f32, Shape{}); const int levels = 5; const auto fake_quantize = make_shared(data, input_low, input_high, output_low, output_high, levels); - EXPECT_EQ(fake_quantize->get_element_type(), element::f32); + EXPECT_EQ(fake_quantize->get_element_type(), element::Type_t::f32); EXPECT_EQ(fake_quantize->get_shape(), (Shape{1, 2, 3, 4})); } TEST(type_prop, fake_quantize_invalid_autob) { - const auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto input_low = make_shared(element::f32, Shape{3}); - auto input_high = make_shared(element::f32, Shape{}); - auto output_low = make_shared(element::f32, Shape{}); - auto output_high = make_shared(element::f32, Shape{}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto input_low = make_shared(element::Type_t::f32, Shape{3}); + auto input_high = make_shared(element::Type_t::f32, Shape{}); + auto output_low = make_shared(element::Type_t::f32, Shape{}); + auto output_high = make_shared(element::Type_t::f32, Shape{}); const int levels = 5; try diff --git a/ngraph/test/type_prop/gather.cpp b/ngraph/test/type_prop/gather.cpp index 7d74cdf196e..48e28a10645 100644 --- a/ngraph/test/type_prop/gather.cpp +++ b/ngraph/test/type_prop/gather.cpp @@ -28,11 +28,11 @@ TEST(type_prop, gather_axis_0) Shape params_shape{3, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {0}); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {0}); auto G = make_shared(P, I, A); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_EQ(G->get_shape(), out_shape); ASSERT_EQ(G->get_axis(), 0); } @@ -42,20 +42,20 @@ TEST(type_prop, gather_axis_1) Shape params_shape{3, 3}; Shape indices_shape{1, 2}; Shape out_shape{3, 1, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); - auto A = op::Constant::create(element::i64, Shape{}, {1}); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {1}); auto G = make_shared(P, I, A); - ASSERT_EQ(G->get_element_type(), element::f32); + ASSERT_EQ(G->get_element_type(), element::Type_t::f32); ASSERT_EQ(G->get_shape(), out_shape); ASSERT_EQ(G->get_axis(), 1); } TEST(type_prop, gather_v1_incorrect_axis_shape) { - auto params = make_shared(element::f32, Shape{5, 6}); - auto indices = make_shared(element::i64, Shape{4}); - auto axis = make_shared(element::i64, Shape{2}); + auto params = make_shared(element::Type_t::f32, Shape{5, 6}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto axis = make_shared(element::Type_t::i64, Shape{2}); try { auto G = make_shared(params, indices, axis); @@ -75,9 +75,9 @@ TEST(type_prop, gather_v1_incorrect_axis_shape) TEST(type_prop, gather_v1_axis_out_of_input_rank) { - auto params = make_shared(element::f32, Shape{5, 6}); - auto indices = make_shared(element::i64, Shape{4}); - auto axis = make_shared(element::i64, Shape{1}, vector{2}); + auto params = make_shared(element::Type_t::f32, Shape{5, 6}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); + auto axis = make_shared(element::Type_t::i64, Shape{1}, vector{2}); try { auto G = make_shared(params, indices, axis); @@ -97,10 +97,11 @@ TEST(type_prop, gather_v1_axis_out_of_input_rank) TEST(type_prop, gather_v1_negative_axis) { - auto params = make_shared(element::f32, Shape{5, 6, 7}); - auto indices = make_shared(element::i64, Shape{4}); + auto params = make_shared(element::Type_t::f32, Shape{5, 6, 7}); + auto indices = make_shared(element::Type_t::i64, Shape{4}); int64_t axis = -2; - auto axis_node = make_shared(element::i64, Shape{1}, vector{axis}); + auto axis_node = + make_shared(element::Type_t::i64, Shape{1}, vector{axis}); auto gather_v1 = make_shared(params, indices, axis_node); ASSERT_EQ(gather_v1->get_axis(), 1); } diff --git a/ngraph/test/type_prop/gather_nd.cpp b/ngraph/test/type_prop/gather_nd.cpp index ea4c6d23070..6501d7f0e4e 100644 --- a/ngraph/test/type_prop/gather_nd.cpp +++ b/ngraph/test/type_prop/gather_nd.cpp @@ -28,10 +28,10 @@ TEST(type_prop, gather_nd_slices_from_4d_batch_dims0) Shape params_shape{2, 3, 11, 12}; Shape indices_shape{2, 3, 2}; Shape out_shape{2, 3, 11, 12}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I, 0); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -40,10 +40,10 @@ TEST(type_prop, gather_nd_scalars_from_4d_batch_dims2) Shape params_shape{2, 3, 11, 12}; Shape indices_shape{2, 3, 2}; Shape out_shape{6}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I, 2); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -52,10 +52,10 @@ TEST(type_prop, gather_nd_slices_from_5d_batch_dims2) Shape params_shape{7, 5, 11, 12, 32}; Shape indices_shape{7, 5, 3, 1}; Shape out_shape{35, 3, 12, 32}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I, 2); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -64,10 +64,10 @@ TEST(type_prop, gather_nd_batch_dim2_with_dyn_dim) PartialShape params_shape{7, Dimension::dynamic(), 11, 12, 32}; Shape indices_shape{7, 5, 3, 1}; Shape out_shape{35, 3, 12, 32}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I, 2); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -76,10 +76,10 @@ TEST(type_prop, gather_nd_batch_dim2_with_dyn_dim2) PartialShape params_shape{7, Dimension::dynamic(), Dimension::dynamic(), 12, 32}; Shape indices_shape{7, 5, 3, 1}; Shape out_shape{35, 3, 12, 32}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I, 2); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -89,10 +89,10 @@ TEST(type_prop, gather_nd_batch_dim2_with_dyn_dim3) 7, Dimension::dynamic(), Dimension::dynamic(), 12, Dimension::dynamic()}; Shape indices_shape{7, 5, 3, 1}; PartialShape out_shape{35, 3, 12, Dimension::dynamic()}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I, 2); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_TRUE(G5->get_output_partial_shape(0).same_scheme(out_shape)); } @@ -101,10 +101,10 @@ TEST(type_prop, gather_nd_batch_dim0_with_dyn_ind_dim) PartialShape params_shape{ 7, Dimension::dynamic(), Dimension::dynamic(), 12, Dimension::dynamic()}; PartialShape indices_shape{7, 5, 3, Dimension::dynamic()}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I, 0); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_TRUE(G5->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } @@ -112,8 +112,8 @@ TEST(type_prop, gather_nd_fail_batch_dims_greater_indices_rank) { Shape params_shape{2, 3, 4, 5}; Shape indices_shape{2, 1}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); try { @@ -137,8 +137,8 @@ TEST(type_prop, gather_nd_fail_unequal_batch_dims) { Shape params_shape{2, 3, 4, 5}; Shape indices_shape{2, 1, 4}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); try { @@ -161,8 +161,8 @@ TEST(type_prop, gather_nd_fail_indices_tuple_greater_data_rank_batch_dims2) { Shape params_shape{2, 1, 4, 5}; Shape indices_shape{2, 1, 5, 3}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); try { @@ -189,11 +189,11 @@ TEST(type_prop, gather_nd_scalar_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 2}; Shape out_shape{2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -202,11 +202,11 @@ TEST(type_prop, gather_nd_1d_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 1}; Shape out_shape{2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -215,11 +215,11 @@ TEST(type_prop, gather_nd_scalar_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 3}; Shape out_shape{2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -228,11 +228,11 @@ TEST(type_prop, gather_nd_1d_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 2}; Shape out_shape{2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -241,11 +241,11 @@ TEST(type_prop, gather_nd_2d_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{1, 1}; Shape out_shape{1, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -254,11 +254,11 @@ TEST(type_prop, gather_nd_batch_scalar_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 1, 2}; Shape out_shape{2, 1}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -267,11 +267,11 @@ TEST(type_prop, gather_nd_batch_1d_from_2d) Shape params_shape{2, 2}; Shape indices_shape{2, 1, 1}; Shape out_shape{2, 1, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -280,11 +280,11 @@ TEST(type_prop, gather_nd_batch_scalar_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 2, 3}; Shape out_shape{2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -293,11 +293,11 @@ TEST(type_prop, gather_nd_batch_1d_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 2, 2}; Shape out_shape{2, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -306,11 +306,11 @@ TEST(type_prop, gather_nd_batch_2d_from_3d) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 1, 1}; Shape out_shape{2, 1, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); auto G5 = make_shared(P, I); - ASSERT_EQ(G5->get_element_type(), element::f32); + ASSERT_EQ(G5->get_element_type(), element::Type_t::f32); ASSERT_EQ(G5->get_shape(), out_shape); } @@ -319,8 +319,8 @@ TEST(type_prop, gather_nd_fail_params_rank) Shape params_shape{}; Shape indices_shape{2, 1, 1}; Shape out_shape{2, 1, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); try { @@ -343,8 +343,8 @@ TEST(type_prop, gather_nd_fail_indices_rank) Shape params_shape{2, 2, 2}; Shape indices_shape{}; Shape out_shape{2, 1, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::i32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); try { @@ -367,8 +367,8 @@ TEST(type_prop, gather_nd_fail_indices_element_type) Shape params_shape{2, 2, 2}; Shape indices_shape{2, 1, 1}; Shape out_shape{2, 1, 2, 2}; - auto P = make_shared(element::f32, params_shape); - auto I = make_shared(element::f32, indices_shape); + auto P = make_shared(element::Type_t::f32, params_shape); + auto I = make_shared(element::Type_t::f32, indices_shape); try { diff --git a/ngraph/test/type_prop/gather_tree.cpp b/ngraph/test/type_prop/gather_tree.cpp index 7cca7206a94..eb3e71200e4 100644 --- a/ngraph/test/type_prop/gather_tree.cpp +++ b/ngraph/test/type_prop/gather_tree.cpp @@ -23,24 +23,24 @@ using namespace ngraph; TEST(type_prop, gather_tree_output_shape) { - auto step_ids = make_shared(element::i64, Shape{1, 2, 3}); - auto parent_idx = make_shared(element::i64, Shape{1, 2, 3}); - auto max_seq_len = make_shared(element::i64, Shape{1}); - auto end_token = make_shared(element::i64, Shape{}); + auto step_ids = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto parent_idx = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto max_seq_len = make_shared(element::Type_t::i64, Shape{1}); + auto end_token = make_shared(element::Type_t::i64, Shape{}); auto gather_tree = make_shared(step_ids, parent_idx, max_seq_len, end_token); ASSERT_EQ(gather_tree->get_output_shape(0), (Shape{1, 2, 3})); - ASSERT_EQ(gather_tree->get_output_element_type(0), element::i64); + ASSERT_EQ(gather_tree->get_output_element_type(0), element::Type_t::i64); } TEST(type_prop, gather_tree_pooling_step_ids_invalid_rank) { - auto step_ids = make_shared(element::i64, Shape{1, 2, 3, 4}); - auto parent_idx = make_shared(element::i64, Shape{1, 2, 3}); - auto max_seq_len = make_shared(element::i64, Shape{1}); - auto end_token = make_shared(element::i64, Shape{}); + auto step_ids = make_shared(element::Type_t::i64, Shape{1, 2, 3, 4}); + auto parent_idx = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto max_seq_len = make_shared(element::Type_t::i64, Shape{1}); + auto end_token = make_shared(element::Type_t::i64, Shape{}); try { auto gather_tree = @@ -61,10 +61,10 @@ TEST(type_prop, gather_tree_pooling_step_ids_invalid_rank) TEST(type_prop, gather_tree_parent_idx_invalid_rank) { - auto step_ids = make_shared(element::i64, Shape{1, 2, 3}); - auto parent_idx = make_shared(element::i64, Shape{1, 2, 3, 4}); - auto max_seq_len = make_shared(element::i64, Shape{1}); - auto end_token = make_shared(element::i64, Shape{}); + auto step_ids = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto parent_idx = make_shared(element::Type_t::i64, Shape{1, 2, 3, 4}); + auto max_seq_len = make_shared(element::Type_t::i64, Shape{1}); + auto end_token = make_shared(element::Type_t::i64, Shape{}); try { auto gather_tree = @@ -86,10 +86,10 @@ TEST(type_prop, gather_tree_parent_idx_invalid_rank) TEST(type_prop, gather_tree_max_seq_len_invalid_rank) { - auto step_ids = make_shared(element::i64, Shape{1, 2, 3}); - auto parent_idx = make_shared(element::i64, Shape{1, 2, 3}); - auto max_seq_len = make_shared(element::i64, Shape{1, 2}); - auto end_token = make_shared(element::i64, Shape{}); + auto step_ids = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto parent_idx = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto max_seq_len = make_shared(element::Type_t::i64, Shape{1, 2}); + auto end_token = make_shared(element::Type_t::i64, Shape{}); try { auto gather_tree = @@ -111,10 +111,10 @@ TEST(type_prop, gather_tree_max_seq_len_invalid_rank) TEST(type_prop, gather_tree_end_token_invalid_rank) { - auto step_ids = make_shared(element::i64, Shape{1, 2, 3}); - auto parent_idx = make_shared(element::i64, Shape{1, 2, 3}); - auto max_seq_len = make_shared(element::i64, Shape{1}); - auto end_token = make_shared(element::i64, Shape{1}); + auto step_ids = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto parent_idx = make_shared(element::Type_t::i64, Shape{1, 2, 3}); + auto max_seq_len = make_shared(element::Type_t::i64, Shape{1}); + auto end_token = make_shared(element::Type_t::i64, Shape{1}); try { auto gather_tree = diff --git a/ngraph/test/type_prop/grn.cpp b/ngraph/test/type_prop/grn.cpp index ba91245c451..4a5441c4795 100644 --- a/ngraph/test/type_prop/grn.cpp +++ b/ngraph/test/type_prop/grn.cpp @@ -25,17 +25,17 @@ TEST(type_prop, grn) { float bias = 1.25f; Shape data_shape{2, 3, 4, 5}; - auto A = make_shared(element::f32, data_shape); + auto A = make_shared(element::Type_t::f32, data_shape); auto grn = make_shared(A, bias); - ASSERT_EQ(grn->get_element_type(), element::f32); + ASSERT_EQ(grn->get_element_type(), element::Type_t::f32); ASSERT_EQ(grn->get_shape(), data_shape); } TEST(type_prop, grn_invalid_data_rank) { float bias = 1.25f; - auto A = make_shared(element::f32, Shape{4}); + auto A = make_shared(element::Type_t::f32, Shape{4}); try { @@ -53,7 +53,7 @@ TEST(type_prop, grn_invalid_data_rank) FAIL() << "Deduced type check failed for unexpected reason"; } - A = make_shared(element::f32, Shape{1, 2, 3, 4, 5}); + A = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4, 5}); try { diff --git a/ngraph/test/type_prop/group_convolution.cpp b/ngraph/test/type_prop/group_convolution.cpp index 054c1864a2d..62ec12d83fd 100644 --- a/ngraph/test/type_prop/group_convolution.cpp +++ b/ngraph/test/type_prop/group_convolution.cpp @@ -31,8 +31,8 @@ TEST(type_prop, group_conv_v1_partial_auto_padding_same_lower) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -52,8 +52,8 @@ TEST(type_prop, group_conv_v1_partial_auto_padding_same_upper) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_UPPER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -73,8 +73,8 @@ TEST(type_prop, group_conv_v1_partial_auto_padding_same_lower_nc_dims_dynamic) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -94,8 +94,8 @@ TEST(type_prop, group_conv_v1_partial_auto_padding_same_upper_nc_dims_dynamic) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_UPPER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); @@ -115,8 +115,8 @@ TEST(type_prop, group_conv_v1_partial_auto_padding_same_spatial_dims_dynamic) Strides dilations{1, 1}; const auto auto_pad = op::PadType::SAME_LOWER; - auto data_batch = make_shared(element::f32, data_batch_shape); - auto filters = make_shared(element::f32, filters_shape); + auto data_batch = make_shared(element::Type_t::f32, data_batch_shape); + auto filters = make_shared(element::Type_t::f32, filters_shape); auto conv = make_shared( data_batch, filters, strides, pads_begin, pads_end, dilations, auto_pad); diff --git a/ngraph/test/type_prop/group_convolution_backprop_data.cpp b/ngraph/test/type_prop/group_convolution_backprop_data.cpp index dc5422bc155..b792a489350 100644 --- a/ngraph/test/type_prop/group_convolution_backprop_data.cpp +++ b/ngraph/test/type_prop/group_convolution_backprop_data.cpp @@ -24,12 +24,12 @@ using namespace ngraph; TEST(type_prop, group_conv_backprop_data) { // GROUPS x C_IN x C_OUT x kH x kW - const auto weights = make_shared(element::f32, Shape{2, 8, 2, 3, 3}); + const auto weights = make_shared(element::Type_t::f32, Shape{2, 8, 2, 3, 3}); // N x C_IN * GROUPS x H x W - const auto data = make_shared(element::f32, Shape{1, 16, 6, 6}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 16, 6, 6}); const auto gcbd = make_shared( data, weights, Strides{}, CoordinateDiff{}, CoordinateDiff{}, Strides{}); - EXPECT_EQ(gcbd->get_element_type(), element::f32); + EXPECT_EQ(gcbd->get_element_type(), element::Type_t::f32); EXPECT_EQ(gcbd->get_output_shape(0), (Shape{1, 4, 8, 8})); EXPECT_EQ(gcbd->get_strides(), (Strides{1, 1})); EXPECT_EQ(gcbd->get_dilations(), (Strides{1, 1})); @@ -42,14 +42,14 @@ TEST(type_prop, group_conv_backprop_data) TEST(type_prop, group_conv_backprop_data_output_shape) { // N x C_IN * GROUPS x H x W - const auto data = make_shared(element::f32, Shape{1, 16, 5, 5}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 16, 5, 5}); // GROUPS x C_IN x C_OUT x kH x kW - const auto weights = make_shared(element::f32, Shape{1, 16, 2, 3, 3}); - const auto output_shape = op::Constant::create(element::i64, Shape{2}, {3, 3}); + const auto weights = make_shared(element::Type_t::f32, Shape{1, 16, 2, 3, 3}); + const auto output_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {3, 3}); const auto gcbd = make_shared( data, weights, output_shape, Strides{}, Strides{}, op::PadType::SAME_UPPER); - EXPECT_EQ(gcbd->get_element_type(), element::f32); + EXPECT_EQ(gcbd->get_element_type(), element::Type_t::f32); EXPECT_EQ(gcbd->get_output_shape(0), (Shape{1, 2, 3, 3})); EXPECT_EQ(gcbd->get_strides(), (Strides{1, 1})); EXPECT_EQ(gcbd->get_dilations(), (Strides{1, 1})); @@ -62,9 +62,9 @@ TEST(type_prop, group_conv_backprop_data_output_shape) TEST(type_prop, group_conv_bprop_data_v1_output_partial_shape_dynamic_static_rank) { PartialShape shape_filter{4, 5, 2, 3, 3}; - auto filters = make_shared(element::f32, shape_filter); + auto filters = make_shared(element::Type_t::f32, shape_filter); PartialShape shape_data{Dimension(), 20, 224, 224}; - auto data = make_shared(element::f32, shape_data); + auto data = make_shared(element::Type_t::f32, shape_data); auto strides = Strides{2, 2}; auto dilations = Strides{1, 1}; auto padding_begin = CoordinateDiff{1, 1}; @@ -83,9 +83,9 @@ TEST(type_prop, group_conv_bprop_data_v1_output_partial_shape_dynamic_static_ran TEST(type_prop, group_conv_backprop_data_invalid_params) { // GROUPS x C_IN x C_OUT x kH x kW - auto weights = make_shared(element::f32, Shape{21, 16, 20, 3, 3}); + auto weights = make_shared(element::Type_t::f32, Shape{21, 16, 20, 3, 3}); // N x C_IN * GROUPS x H x W - const auto data = make_shared(element::f32, Shape{1, 16, 5, 5}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 16, 5, 5}); try { @@ -105,7 +105,7 @@ TEST(type_prop, group_conv_backprop_data_invalid_params) } // GROUPS x C_IN x C_OUT x kH x kW - weights = make_shared(element::f32, Shape{4, 16, 20, 3, 3}); + weights = make_shared(element::Type_t::f32, Shape{4, 16, 20, 3, 3}); try { @@ -126,7 +126,7 @@ TEST(type_prop, group_conv_backprop_data_invalid_params) } // GROUPS x C_IN x C_OUT x kH x kW - weights = make_shared(element::f32, Shape{4, 4, 20, 3, 3}); + weights = make_shared(element::Type_t::f32, Shape{4, 4, 20, 3, 3}); try { diff --git a/ngraph/test/type_prop/gru_cell.cpp b/ngraph/test/type_prop/gru_cell.cpp index a7b3558b908..ef2969e3146 100644 --- a/ngraph/test/type_prop/gru_cell.cpp +++ b/ngraph/test/type_prop/gru_cell.cpp @@ -29,15 +29,16 @@ TEST(type_prop, gru_cell) const size_t hidden_size = 3; const size_t gates_count = 3; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); const auto gru_cell = make_shared(X, H_t, W, R, hidden_size); - EXPECT_EQ(gru_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(gru_cell->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(gru_cell->get_output_shape(0), (Shape{batch_size, hidden_size})); } @@ -48,13 +49,13 @@ TEST(type_prop, gru_cell_invalid_input) const size_t hidden_size = 3; const size_t gates_count = 3; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + const auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + auto H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); // Invalid W tensor shape. - auto W = make_shared(element::f32, Shape{hidden_size, input_size}); + auto W = make_shared(element::Type_t::f32, Shape{hidden_size, input_size}); try { const auto gru_cell = make_shared(X, H_t, W, R, hidden_size); @@ -67,8 +68,9 @@ TEST(type_prop, gru_cell_invalid_input) } // Invalid R tensor shape. - W = make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - R = make_shared(element::f32, Shape{hidden_size, 1}); + W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + R = make_shared(element::Type_t::f32, Shape{hidden_size, 1}); try { const auto gru_cell = make_shared(X, H_t, W, R, hidden_size); @@ -83,8 +85,9 @@ TEST(type_prop, gru_cell_invalid_input) } // Invalid H_t tensor shape. - R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - H_t = make_shared(element::f32, Shape{4, hidden_size}); + R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + H_t = make_shared(element::Type_t::f32, Shape{4, hidden_size}); try { const auto gru_cell = make_shared(X, H_t, W, R, hidden_size); @@ -98,8 +101,8 @@ TEST(type_prop, gru_cell_invalid_input) } // Invalid B tensor shape. - H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - auto B = make_shared(element::f32, Shape{hidden_size}); + H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + auto B = make_shared(element::Type_t::f32, Shape{hidden_size}); try { const auto gru_cell = make_shared(X, H_t, W, R, B, hidden_size); @@ -119,16 +122,17 @@ TEST(type_prop, gru_cell_dynamic_batch_size) const size_t hidden_size = 3; const size_t gates_count = 3; - const auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - const auto W = make_shared(element::f32, + const auto X = + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); - const auto R = make_shared(element::f32, + const auto R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto gru_cell = make_shared(X, H_t, W, R, hidden_size); - EXPECT_EQ(gru_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(gru_cell->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(gru_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); } @@ -139,16 +143,17 @@ TEST(type_prop, gru_cell_dynamic_hidden_size) const auto hidden_size = Dimension::dynamic(); const size_t gates_count = 3; - const auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - const auto W = make_shared(element::f32, + const auto X = + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, PartialShape{hidden_size * gates_count, input_size}); - const auto R = make_shared(element::f32, + const auto R = make_shared(element::Type_t::f32, PartialShape{hidden_size * gates_count, hidden_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto gru_cell = make_shared(X, H_t, W, R, 3); - EXPECT_EQ(gru_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(gru_cell->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(gru_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); } @@ -158,16 +163,19 @@ TEST(type_prop, gru_cell_dynamic_inputs) const auto input_size = Dimension::dynamic(); const auto hidden_size = Dimension::dynamic(); - const auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - const auto W = make_shared(element::f32, PartialShape{hidden_size, input_size}); - const auto R = make_shared(element::f32, PartialShape{hidden_size, hidden_size}); + const auto X = + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + const auto W = + make_shared(element::Type_t::f32, PartialShape{hidden_size, input_size}); + const auto R = + make_shared(element::Type_t::f32, PartialShape{hidden_size, hidden_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto gru_cell = make_shared(X, H_t, W, R, 2); EXPECT_EQ(gru_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); - EXPECT_EQ(gru_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(gru_cell->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, gru_cell_invalid_input_rank0) @@ -177,43 +185,44 @@ TEST(type_prop, gru_cell_invalid_input_rank0) const size_t hidden_size = 3; const size_t gates_count = 3; - auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - auto R = make_shared(element::f32, + auto X = make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + auto R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); + auto H_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); // Invalid rank0 for W tensor. - auto W = make_shared(element::f32, PartialShape{}); + auto W = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "GRUCell node was created with invalid data."; // Invalid rank0 for X tensor. - W = make_shared(element::f32, + W = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); - X = make_shared(element::f32, PartialShape{}); + X = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "GRUCell node was created with invalid data."; // Invalid rank0 for H_t tensor. - X = make_shared(element::f32, PartialShape{batch_size, input_size}); - H_t = make_shared(element::f32, PartialShape{}); + X = make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + H_t = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "GRUCell node was created with invalid data."; // Invalid rank0 for R tensor. - H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - R = make_shared(element::f32, PartialShape{}); + H_t = make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + R = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "GRUCell node was created with invalid data."; // Invalid rank0 for B tensor. - R = make_shared(element::f32, + R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); - auto B = make_shared(element::f32, PartialShape{}); + auto B = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, B, hidden_size), ngraph::NodeValidationFailure) << "GRUCell node was created with invalid data."; @@ -226,10 +235,11 @@ TEST(type_prop, gru_cell_invalid_input_dynamic_rank) const size_t hidden_size = 3; const size_t gates_count = 3; - auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - auto R = make_shared(element::f32, + auto X = make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + auto R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); + auto H_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); auto check_dynamic_gru = [](const shared_ptr& gru) -> bool { return gru->output(0).get_partial_shape() == PartialShape::dynamic() && @@ -237,32 +247,34 @@ TEST(type_prop, gru_cell_invalid_input_dynamic_rank) }; // Invalid dynamic rank for W tensor. - auto W = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto W = + make_shared(element::Type_t::f32, PartialShape::dynamic(Rank::dynamic())); auto gru_w = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_gru(gru_w), true); // Invalid dynamic rank for X tensor. - W = make_shared(element::f32, PartialShape{hidden_size, input_size}); - X = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + W = make_shared(element::Type_t::f32, PartialShape{hidden_size, input_size}); + X = make_shared(element::Type_t::f32, PartialShape::dynamic(Rank::dynamic())); auto gru_x = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_gru(gru_x), true); // Invalid dynamic rank for H_t tensor. - X = make_shared(element::f32, PartialShape{batch_size, input_size}); - H_t = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + X = make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + H_t = make_shared(element::Type_t::f32, PartialShape::dynamic(Rank::dynamic())); auto gru_h = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_gru(gru_h), true); // Invalid dynamic rank for R tensor. - H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - R = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + H_t = make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + R = make_shared(element::Type_t::f32, PartialShape::dynamic(Rank::dynamic())); auto gru_r = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_gru(gru_r), true); // Invalid dynamic rank for B tensor. - R = make_shared(element::f32, + R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto B = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto B = + make_shared(element::Type_t::f32, PartialShape::dynamic(Rank::dynamic())); auto gru_b = make_shared(X, H_t, W, R, B, hidden_size); EXPECT_EQ(check_dynamic_gru(gru_b), true); } diff --git a/ngraph/test/type_prop/gru_sequence.cpp b/ngraph/test/type_prop/gru_sequence.cpp index 47cc47fa89a..d8c6665cccd 100644 --- a/ngraph/test/type_prop/gru_sequence.cpp +++ b/ngraph/test/type_prop/gru_sequence.cpp @@ -30,17 +30,18 @@ TEST(type_prop, gru_sequence_forward) const size_t input_size = 4; const size_t hidden_size = 128; - const auto X = - make_shared(element::f32, Shape{batch_size, seq_length, input_size}); + const auto X = make_shared(element::Type_t::f32, + Shape{batch_size, seq_length, input_size}); const auto initial_hidden_state = make_shared( - element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); + const auto sequence_lengths = + make_shared(element::Type_t::i32, Shape{batch_size}); const auto W = make_shared( - element::f32, Shape{num_directions, 3 * hidden_size, input_size}); + element::Type_t::f32, Shape{num_directions, 3 * hidden_size, input_size}); const auto R = make_shared( - element::f32, Shape{num_directions, 3 * hidden_size, hidden_size}); - const auto B = - make_shared(element::f32, Shape{num_directions, 3 * hidden_size}); + element::Type_t::f32, Shape{num_directions, 3 * hidden_size, hidden_size}); + const auto B = make_shared(element::Type_t::f32, + Shape{num_directions, 3 * hidden_size}); const auto direction = op::RecurrentSequenceDirection::FORWARD; @@ -55,10 +56,10 @@ TEST(type_prop, gru_sequence_forward) EXPECT_EQ(sequence->get_activations()[1], "tanh"); EXPECT_EQ(sequence->get_clip(), 0.f); EXPECT_EQ(sequence->get_linear_before_reset(), false); - EXPECT_EQ(sequence->get_output_element_type(0), element::f32); + EXPECT_EQ(sequence->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(sequence->outputs().size(), 2); EXPECT_EQ(sequence->get_output_shape(0), (Shape{batch_size, num_directions, seq_length, hidden_size})); - EXPECT_EQ(sequence->get_output_element_type(1), element::f32); + EXPECT_EQ(sequence->get_output_element_type(1), element::Type_t::f32); EXPECT_EQ(sequence->get_output_shape(1), (Shape{batch_size, num_directions, hidden_size})); } diff --git a/ngraph/test/type_prop/hard_sigmoid.cpp b/ngraph/test/type_prop/hard_sigmoid.cpp index b213e1f0bfe..dc59fc97e29 100644 --- a/ngraph/test/type_prop/hard_sigmoid.cpp +++ b/ngraph/test/type_prop/hard_sigmoid.cpp @@ -25,10 +25,10 @@ TEST(type_prop, hardsigmoid) { const Shape data_shape{3, 5}; - const auto P = make_shared(element::f32, data_shape); + const auto P = make_shared(element::Type_t::f32, data_shape); const auto alpha = op::Constant::create(P->get_element_type(), Shape{}, {0.1f}); const auto beta = op::Constant::create(P->get_element_type(), Shape{}, {1.2f}); const auto H = make_shared(P, alpha, beta); - ASSERT_EQ(H->get_element_type(), element::f32); + ASSERT_EQ(H->get_element_type(), element::Type_t::f32); ASSERT_EQ(H->get_shape(), data_shape); } diff --git a/ngraph/test/type_prop/hsigmoid.cpp b/ngraph/test/type_prop/hsigmoid.cpp index 9ef8e4833a7..15e116f6aff 100644 --- a/ngraph/test/type_prop/hsigmoid.cpp +++ b/ngraph/test/type_prop/hsigmoid.cpp @@ -23,31 +23,33 @@ using namespace ngraph; TEST(type_prop, hsigmoid) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto hsigmoid_func = make_shared(data); - EXPECT_EQ(hsigmoid_func->get_element_type(), element::f32); + EXPECT_EQ(hsigmoid_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(hsigmoid_func->get_shape(), data->get_output_shape(0)); } TEST(type_prop, hsigmoid_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto hsigmoid_func = make_shared(data); - EXPECT_EQ(hsigmoid_func->get_element_type(), element::f32); + EXPECT_EQ(hsigmoid_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE( hsigmoid_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); // rank unknown auto hsigmoid_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic())); + make_shared(element::Type_t::f32, PartialShape::dynamic())); ASSERT_TRUE(hsigmoid_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, hsigmoid_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto hsigmoid_func = make_shared(data); - EXPECT_EQ(hsigmoid_func->get_element_type(), element::f32); + EXPECT_EQ(hsigmoid_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE( hsigmoid_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); ASSERT_TRUE(hsigmoid_func->get_output_partial_shape(0).rank().is_static()); diff --git a/ngraph/test/type_prop/hswish.cpp b/ngraph/test/type_prop/hswish.cpp index 9df6d19b86a..053ca1609e2 100644 --- a/ngraph/test/type_prop/hswish.cpp +++ b/ngraph/test/type_prop/hswish.cpp @@ -23,31 +23,33 @@ using namespace ngraph; TEST(type_prop, hswish) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto hswish_func = make_shared(data); - EXPECT_EQ(hswish_func->get_element_type(), element::f32); + EXPECT_EQ(hswish_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(hswish_func->get_shape(), data->get_output_shape(0)); } TEST(type_prop, hswish_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto hswish_func = make_shared(data); - EXPECT_EQ(hswish_func->get_element_type(), element::f32); + EXPECT_EQ(hswish_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE( hswish_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); // rank unknown auto hswish_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic())); + make_shared(element::Type_t::f32, PartialShape::dynamic())); ASSERT_TRUE(hswish_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, hswish_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto hswish_func = make_shared(data); - EXPECT_EQ(hswish_func->get_element_type(), element::f32); + EXPECT_EQ(hswish_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE( hswish_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); ASSERT_TRUE(hswish_func->get_output_partial_shape(0).rank().is_static()); diff --git a/ngraph/test/type_prop/interpolate.cpp b/ngraph/test/type_prop/interpolate.cpp index 22c795bc563..e0050f999ce 100644 --- a/ngraph/test/type_prop/interpolate.cpp +++ b/ngraph/test/type_prop/interpolate.cpp @@ -28,10 +28,10 @@ using ShapeCalcMode = op::v4::Interpolate::ShapeCalcMode; TEST(type_prop, interpolate_v4) { - auto image = std::make_shared(element::f32, Shape{2, 2, 30, 60}); - auto target_shape = std::make_shared(element::f32, Shape{2, 2, 15, 30}); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::Type_t::f32, Shape{2, 2, 30, 60}); + auto target_shape = std::make_shared(element::Type_t::f32, Shape{2, 2, 15, 30}); + auto scales = op::Constant::create(element::Type_t::f32, Shape{2}, {0.5f, 0.5f}); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::nearest; @@ -44,7 +44,7 @@ TEST(type_prop, interpolate_v4) attrs.cube_coeff = -0.75; auto interp = std::make_shared(image, target_shape, scales, axes, attrs); - EXPECT_EQ(interp->get_element_type(), element::f32); + EXPECT_EQ(interp->get_element_type(), element::Type_t::f32); EXPECT_EQ(interp->get_shape(), (Shape{2, 2, 15, 30})); } @@ -52,10 +52,10 @@ TEST(type_prop, interpolate_v4_partial) { auto partial_shape = PartialShape{2, 2, Dimension::dynamic(), Dimension::dynamic()}; - auto image = std::make_shared(element::f32, partial_shape); - auto target_shape = std::make_shared(element::f32, Shape{2, 2, 15, 30}); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::Type_t::f32, partial_shape); + auto target_shape = std::make_shared(element::Type_t::f32, Shape{2, 2, 15, 30}); + auto scales = op::Constant::create(element::Type_t::f32, Shape{2}, {0.5f, 0.5f}); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::nearest; @@ -68,11 +68,12 @@ TEST(type_prop, interpolate_v4_partial) attrs.cube_coeff = -0.75; auto interp = std::make_shared(image, target_shape, scales, axes, attrs); - EXPECT_EQ(interp->get_element_type(), element::f32); + EXPECT_EQ(interp->get_element_type(), element::Type_t::f32); ASSERT_TRUE(interp->get_output_partial_shape(0).same_scheme(partial_shape)); // rank unknown - auto partial_param = std::make_shared(element::f32, PartialShape::dynamic()); + auto partial_param = + std::make_shared(element::Type_t::f32, PartialShape::dynamic()); auto interp_part = std::make_shared(partial_param, target_shape, scales, axes, attrs); ASSERT_TRUE(interp_part->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); @@ -82,10 +83,10 @@ TEST(type_prop, interpolate_v4_partial_static_rank) { auto partial_shape = PartialShape{2, 2, Dimension::dynamic(), Dimension::dynamic()}; - auto image = std::make_shared(element::f32, partial_shape); - auto target_shape = std::make_shared(element::f32, Shape{2, 2, 15, 30}); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::Type_t::f32, partial_shape); + auto target_shape = std::make_shared(element::Type_t::f32, Shape{2, 2, 15, 30}); + auto scales = op::Constant::create(element::Type_t::f32, Shape{2}, {0.5f, 0.5f}); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::nearest; @@ -98,7 +99,7 @@ TEST(type_prop, interpolate_v4_partial_static_rank) attrs.cube_coeff = -0.75; auto interp = std::make_shared(image, target_shape, scales, axes, attrs); - EXPECT_EQ(interp->get_element_type(), element::f32); + EXPECT_EQ(interp->get_element_type(), element::Type_t::f32); ASSERT_TRUE(interp->get_output_partial_shape(0).same_scheme(partial_shape)); ASSERT_TRUE(interp->get_output_partial_shape(0).rank().is_static()); } @@ -108,10 +109,10 @@ TEST(type_prop, interpolate_v4_partial_static_rank2) auto partial_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), 10, 20}; auto out_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), 5, 10}; - auto image = std::make_shared(element::f32, partial_shape); - auto target_shape = std::make_shared(element::f32, Shape{2, 2, 15, 30}); - auto scales = op::Constant::create(element::f32, Shape{2}, {0.5f, 0.5f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::Type_t::f32, partial_shape); + auto target_shape = std::make_shared(element::Type_t::f32, Shape{2, 2, 15, 30}); + auto scales = op::Constant::create(element::Type_t::f32, Shape{2}, {0.5f, 0.5f}); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::nearest; @@ -124,7 +125,7 @@ TEST(type_prop, interpolate_v4_partial_static_rank2) attrs.cube_coeff = -0.75; auto interp = std::make_shared(image, target_shape, scales, axes, attrs); - EXPECT_EQ(interp->get_element_type(), element::f32); + EXPECT_EQ(interp->get_element_type(), element::Type_t::f32); ASSERT_TRUE(interp->get_output_partial_shape(0).same_scheme(out_shape)); ASSERT_TRUE(interp->get_output_partial_shape(0).rank().is_static()); } @@ -134,10 +135,11 @@ TEST(type_prop, interpolate_v4_partial_static_rank3) auto partial_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, 3}; auto out_shape = PartialShape{Dimension::dynamic(), Dimension::dynamic(), 1, 1}; - auto image = std::make_shared(element::f32, partial_shape); - auto target_shape = std::make_shared(element::f32, Shape{2, 2, 1, 1}); - auto scales = op::Constant::create(element::f32, Shape{2}, {1.0f / 3.0f, 1.0f / 3.0f}); - auto axes = op::Constant::create(element::i64, Shape{2}, {2, 3}); + auto image = std::make_shared(element::Type_t::f32, partial_shape); + auto target_shape = std::make_shared(element::Type_t::f32, Shape{2, 2, 1, 1}); + auto scales = + op::Constant::create(element::Type_t::f32, Shape{2}, {1.0f / 3.0f, 1.0f / 3.0f}); + auto axes = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 3}); InterpolateAttrs attrs; attrs.mode = InterpolateMode::nearest; @@ -150,7 +152,7 @@ TEST(type_prop, interpolate_v4_partial_static_rank3) attrs.cube_coeff = -0.75; auto interp = std::make_shared(image, target_shape, scales, axes, attrs); - EXPECT_EQ(interp->get_element_type(), element::f32); + EXPECT_EQ(interp->get_element_type(), element::Type_t::f32); ASSERT_TRUE(interp->get_output_partial_shape(0).same_scheme(out_shape)); ASSERT_TRUE(interp->get_output_partial_shape(0).rank().is_static()); } diff --git a/ngraph/test/type_prop/log_softmax.cpp b/ngraph/test/type_prop/log_softmax.cpp index 5fe7caad4ca..82fb61d87ee 100644 --- a/ngraph/test/type_prop/log_softmax.cpp +++ b/ngraph/test/type_prop/log_softmax.cpp @@ -23,15 +23,15 @@ using namespace ngraph; TEST(type_prop, log_softmax) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto log_softmax_func = make_shared(data, 1); - EXPECT_EQ(log_softmax_func->get_element_type(), element::f32); + EXPECT_EQ(log_softmax_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(log_softmax_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, log_softmax_incorrect_axis) { - const auto data = make_shared(element::f32, Shape{1, 3, 6}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); try { @@ -48,24 +48,26 @@ TEST(type_prop, log_softmax_incorrect_axis) TEST(type_prop, log_softmax_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto log_softmax_func = make_shared(data, 1); - EXPECT_EQ(log_softmax_func->get_element_type(), element::f32); + EXPECT_EQ(log_softmax_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(log_softmax_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); // rank unknown auto log_softmax_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic())); + make_shared(element::Type_t::f32, PartialShape::dynamic())); ASSERT_TRUE( log_softmax_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, log_softmax_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto log_softmax_func = make_shared(data, 1); - EXPECT_EQ(log_softmax_func->get_element_type(), element::f32); + EXPECT_EQ(log_softmax_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(log_softmax_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); ASSERT_TRUE(log_softmax_func->get_output_partial_shape(0).rank().is_static()); diff --git a/ngraph/test/type_prop/loop.cpp b/ngraph/test/type_prop/loop.cpp index f4dfe846d88..55356511a96 100644 --- a/ngraph/test/type_prop/loop.cpp +++ b/ngraph/test/type_prop/loop.cpp @@ -29,23 +29,23 @@ using namespace ngraph; TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto current_iteration = make_shared(element::Type_t::i64, Shape{1}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto body_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{1}, 10); auto exec_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -134,23 +134,23 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes) TEST(type_prop, loop_operation_dowhile_mode_1_iter_static_shapes) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto current_iteration = make_shared(element::Type_t::i64, Shape{1}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto body_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, false); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, false); - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{1}, 10); auto exec_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -239,24 +239,24 @@ TEST(type_prop, loop_operation_dowhile_mode_1_iter_static_shapes) TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_static_shapes) { // That which we iterate over - auto X = make_shared(element::f32, Shape{1}); - auto Y = make_shared(element::f32, Shape{1}); - auto M = make_shared(element::f32, Shape{1}); + auto X = make_shared(element::Type_t::f32, Shape{1}); + auto Y = make_shared(element::Type_t::f32, Shape{1}); + auto M = make_shared(element::Type_t::f32, Shape{1}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto condition_const = - std::make_shared(ngraph::element::f32, ngraph::Shape{1}, 10); + auto current_iteration = make_shared(element::Type_t::i64, Shape{1}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto condition_const = std::make_shared( + ngraph::element::Type_t::f32, ngraph::Shape{1}, 10); auto body_condition = std::make_shared(M_body, condition_const); - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{1}, 10); auto exec_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -338,24 +338,24 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_static_shapes TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_dynamic_shapes) { // That which we iterate over - auto X = make_shared(element::f32, Shape{1}); - auto Y = make_shared(element::f32, Shape{1}); - auto M = make_shared(element::f32, Shape{1}); + auto X = make_shared(element::Type_t::f32, Shape{1}); + auto Y = make_shared(element::Type_t::f32, Shape{1}); + auto M = make_shared(element::Type_t::f32, Shape{1}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto condition_const = - std::make_shared(ngraph::element::f32, ngraph::Shape{1}, 10); + auto current_iteration = make_shared(element::Type_t::i64, Shape{1}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto condition_const = std::make_shared( + ngraph::element::Type_t::f32, ngraph::Shape{1}, 10); auto body_condition = std::make_shared(M_body, condition_const); - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{1}, 10); auto exec_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -442,23 +442,23 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_dynamic_shape TEST(type_prop, loop_operation_infinite_loop_mode_dynamic_iter_dynamic_shapes) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto current_iteration = make_shared(element::Type_t::i64, Shape{1}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto body_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{1}, -1); + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{1}, -1); auto exec_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -548,23 +548,23 @@ TEST(type_prop, loop_operation_infinite_loop_mode_dynamic_iter_dynamic_shapes) TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{1}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto current_iteration = make_shared(element::Type_t::i64, Shape{1}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto body_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{1}, 10); + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{1}, 10); auto exec_condition = std::make_shared( - ngraph::element::boolean, ngraph::Shape{1}, true); + ngraph::element::Type_t::boolean, ngraph::Shape{1}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -654,23 +654,23 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports_scalars) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 1, 10}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto body_condition = - std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + auto current_iteration = make_shared(element::Type_t::i64, Shape{}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared( + ngraph::element::Type_t::boolean, ngraph::Shape{}, true); - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{}, 10); - auto exec_condition = - std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{}, 10); + auto exec_condition = std::make_shared( + ngraph::element::Type_t::boolean, ngraph::Shape{}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); @@ -760,23 +760,23 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports TEST(type_prop, loop_operation_10_iter_static_shapes_sliced_inputs) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 1, 10}); - auto Y = make_shared(element::f32, Shape{32, 10, 1}); - auto M = make_shared(element::f32, Shape{32, 1, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 1, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 10, 1}); + auto M = make_shared(element::Type_t::f32, Shape{32, 1, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto current_iteration = make_shared(element::i64, Shape{}); - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); - auto body_condition = - std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + auto current_iteration = make_shared(element::Type_t::i64, Shape{}); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto body_condition = std::make_shared( + ngraph::element::Type_t::boolean, ngraph::Shape{}, true); - auto trip_count = - std::make_shared(ngraph::element::i64, ngraph::Shape{}, 10); - auto exec_condition = - std::make_shared(ngraph::element::boolean, ngraph::Shape{}, true); + auto trip_count = std::make_shared( + ngraph::element::Type_t::i64, ngraph::Shape{}, 10); + auto exec_condition = std::make_shared( + ngraph::element::Type_t::boolean, ngraph::Shape{}, true); // Body auto sum = make_shared(Xi, Yi); auto Zo = make_shared(sum, M_body); diff --git a/ngraph/test/type_prop/lrn.cpp b/ngraph/test/type_prop/lrn.cpp index d4f5b8f162a..354506e0f7a 100644 --- a/ngraph/test/type_prop/lrn.cpp +++ b/ngraph/test/type_prop/lrn.cpp @@ -23,8 +23,8 @@ using namespace ngraph; TEST(type_prop, lrn_invalid_axes_rank) { - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto axes = make_shared(element::f32, Shape{1, 2}); + auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto axes = make_shared(element::Type_t::f32, Shape{1, 2}); double alpha = 0.1, beta = 0.2, bias = 0.3; size_t size = 3; try @@ -42,7 +42,7 @@ TEST(type_prop, lrn_invalid_axes_rank) FAIL() << "Deduced type check failed for unexpected reason"; } - axes = make_shared(element::f32, Shape{5}); + axes = make_shared(element::Type_t::f32, Shape{5}); try { auto lrn = make_shared(data, axes, alpha, beta, bias, size); @@ -63,8 +63,8 @@ TEST(type_prop, lrn_invalid_axes_rank) TEST(type_prop, lrn_incorrect_axes_value) { - auto data = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{3, 4}); + auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{3, 4}); double alpha = 0.1, beta = 0.2, bias = 0.3; size_t size = 3; try diff --git a/ngraph/test/type_prop/lstm_cell.cpp b/ngraph/test/type_prop/lstm_cell.cpp index e8275d8973f..f56d31e7bb1 100644 --- a/ngraph/test/type_prop/lstm_cell.cpp +++ b/ngraph/test/type_prop/lstm_cell.cpp @@ -29,13 +29,16 @@ TEST(type_prop, lstm_cell) const size_t hidden_size = 3; const size_t gates_count = 4; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto W = - make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - const auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto C_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + const auto X = + make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto C_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); const auto lstm_cell = make_shared(X, H_t, C_t, W, R, hidden_size); EXPECT_EQ(lstm_cell->get_hidden_size(), hidden_size); @@ -45,9 +48,9 @@ TEST(type_prop, lstm_cell) EXPECT_EQ(lstm_cell->get_activations()[0], "sigmoid"); EXPECT_EQ(lstm_cell->get_activations()[1], "tanh"); EXPECT_EQ(lstm_cell->get_activations()[2], "tanh"); - EXPECT_EQ(lstm_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(lstm_cell->get_output_shape(0), (Shape{batch_size, hidden_size})); - EXPECT_EQ(lstm_cell->get_output_element_type(1), element::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(1), element::Type_t::f32); EXPECT_EQ(lstm_cell->get_output_shape(1), (Shape{batch_size, hidden_size})); } @@ -58,14 +61,15 @@ TEST(type_prop, lstm_cell_invalid_input) const size_t hidden_size = 3; const size_t gates_count = 4; - auto X = make_shared(element::f32, Shape{batch_size, input_size}); - auto R = - make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - auto C_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + auto R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + auto H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + auto C_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); // Invalid W tensor shape. - auto W = make_shared(element::f32, Shape{1 * hidden_size, input_size}); + auto W = + make_shared(element::Type_t::f32, Shape{1 * hidden_size, input_size}); try { const auto lstm_cell = make_shared(X, H_t, C_t, W, R, hidden_size); @@ -78,8 +82,9 @@ TEST(type_prop, lstm_cell_invalid_input) } // Invalid R tensor shape. - W = make_shared(element::f32, Shape{gates_count * hidden_size, input_size}); - R = make_shared(element::f32, Shape{gates_count * hidden_size, 1}); + W = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, input_size}); + R = make_shared(element::Type_t::f32, Shape{gates_count * hidden_size, 1}); try { const auto lstm_cell = make_shared(X, H_t, C_t, W, R, hidden_size); @@ -93,8 +98,9 @@ TEST(type_prop, lstm_cell_invalid_input) } // Invalid H_t tensor shape. - R = make_shared(element::f32, Shape{gates_count * hidden_size, hidden_size}); - H_t = make_shared(element::f32, Shape{4, hidden_size}); + R = make_shared(element::Type_t::f32, + Shape{gates_count * hidden_size, hidden_size}); + H_t = make_shared(element::Type_t::f32, Shape{4, hidden_size}); try { const auto lstm_cell = make_shared(X, H_t, C_t, W, R, hidden_size); @@ -108,8 +114,8 @@ TEST(type_prop, lstm_cell_invalid_input) } // Invalid C_t tensor shape. - H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - C_t = make_shared(element::f32, Shape{4, hidden_size}); + H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + C_t = make_shared(element::Type_t::f32, Shape{4, hidden_size}); try { const auto lstm_cell = make_shared(X, H_t, C_t, W, R, hidden_size); @@ -123,9 +129,10 @@ TEST(type_prop, lstm_cell_invalid_input) } // Invalid B tensor shape. - C_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - auto B = make_shared(element::f32, Shape{2 * gates_count * hidden_size}); - auto P = make_shared(element::f32, Shape{3 * hidden_size}); + C_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + auto B = + make_shared(element::Type_t::f32, Shape{2 * gates_count * hidden_size}); + auto P = make_shared(element::Type_t::f32, Shape{3 * hidden_size}); try { const auto lstm_cell = make_shared(X, H_t, C_t, W, R, B, hidden_size); @@ -146,22 +153,22 @@ TEST(type_prop, lstm_cell_dynamic_batch_size) const size_t gates_count = 4; const auto X = - make_shared(element::f32, PartialShape{batch_size, input_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); const auto W = make_shared( - element::f32, PartialShape{gates_count * hidden_size, input_size}); + element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); const auto R = make_shared( - element::f32, PartialShape{gates_count * hidden_size, hidden_size}); + element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto C_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto lstm_cell = make_shared(X, H_t, C_t, W, R, hidden_size); EXPECT_EQ(lstm_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); EXPECT_EQ(lstm_cell->get_output_partial_shape(1), (PartialShape{batch_size, hidden_size})); - EXPECT_EQ(lstm_cell->get_output_element_type(0), element::f32); - EXPECT_EQ(lstm_cell->get_output_element_type(1), element::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(0), element::Type_t::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(1), element::Type_t::f32); } TEST(type_prop, lstm_cell_dynamic_hidden_size) @@ -172,22 +179,22 @@ TEST(type_prop, lstm_cell_dynamic_hidden_size) const size_t gates_count = 4; const auto X = - make_shared(element::f32, PartialShape{batch_size, input_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); const auto W = make_shared( - element::f32, PartialShape{hidden_size * gates_count, input_size}); + element::Type_t::f32, PartialShape{hidden_size * gates_count, input_size}); const auto R = make_shared( - element::f32, PartialShape{hidden_size * gates_count, hidden_size}); + element::Type_t::f32, PartialShape{hidden_size * gates_count, hidden_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto C_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto lstm_cell = make_shared(X, H_t, C_t, W, R, 3); EXPECT_EQ(lstm_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); EXPECT_EQ(lstm_cell->get_output_partial_shape(1), (PartialShape{batch_size, hidden_size})); - EXPECT_EQ(lstm_cell->get_output_element_type(0), element::f32); - EXPECT_EQ(lstm_cell->get_output_element_type(1), element::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(0), element::Type_t::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(1), element::Type_t::f32); } TEST(type_prop, lstm_cell_dynamic_inputs) @@ -198,22 +205,22 @@ TEST(type_prop, lstm_cell_dynamic_inputs) const size_t gates_count = 4; const auto X = - make_shared(element::f32, PartialShape{batch_size, input_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); const auto W = make_shared( - element::f32, PartialShape{hidden_size * gates_count, input_size}); + element::Type_t::f32, PartialShape{hidden_size * gates_count, input_size}); const auto R = make_shared( - element::f32, PartialShape{hidden_size * gates_count, hidden_size}); + element::Type_t::f32, PartialShape{hidden_size * gates_count, hidden_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto C_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto lstm_cell = make_shared(X, H_t, C_t, W, R, 3); EXPECT_EQ(lstm_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); EXPECT_EQ(lstm_cell->get_output_partial_shape(1), (PartialShape{batch_size, hidden_size})); - EXPECT_EQ(lstm_cell->get_output_element_type(0), element::f32); - EXPECT_EQ(lstm_cell->get_output_element_type(1), element::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(0), element::Type_t::f32); + EXPECT_EQ(lstm_cell->get_output_element_type(1), element::Type_t::f32); } TEST(type_prop, lstm_cell_invalid_input_rank0) @@ -223,53 +230,58 @@ TEST(type_prop, lstm_cell_invalid_input_rank0) const size_t hidden_size = 3; const size_t gates_count = 4; - auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - auto W = make_shared(element::f32, + auto X = + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + auto W = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); - auto R = make_shared(element::f32, + auto R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - auto C_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); + auto H_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + auto C_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); // Invalid rank0 for W tensor. - W = make_shared(element::f32, PartialShape{}); + W = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, C_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "LSTMCell node was created with invalid data."; // Invalid rank0 for X tensor. - W = make_shared(element::f32, + W = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); - X = make_shared(element::f32, PartialShape{}); + X = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, C_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "LSTMCell node was created with invalid data."; // Invalid rank0 for H_t tensor. - X = make_shared(element::f32, PartialShape{batch_size, input_size}); - H_t = make_shared(element::f32, PartialShape{}); + X = make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + H_t = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, C_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "LSTMCell node was created with invalid data."; // Invalid rank0 for C_t tensor. - H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - C_t = make_shared(element::f32, PartialShape{}); + H_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + C_t = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, C_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "LSTMCell node was created with invalid data."; // Invalid rank0 for R tensor. - C_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - R = make_shared(element::f32, PartialShape{}); + C_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + R = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, C_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "LSTMCell node was created with invalid data."; // Invalid rank0 for B tensor. - R = make_shared(element::f32, + R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto B = make_shared(element::f32, PartialShape{}); + auto B = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, C_t, W, R, B, hidden_size), ngraph::NodeValidationFailure) << "LSTMCell node was created with invalid data."; @@ -282,13 +294,16 @@ TEST(type_prop, lstm_cell_invalid_input_dynamic_rank) const size_t hidden_size = 3; const size_t gates_count = 4; - auto X = make_shared(element::f32, PartialShape{batch_size, input_size}); - auto W = make_shared(element::f32, + auto X = + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + auto W = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); - auto R = make_shared(element::f32, + auto R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - auto C_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); + auto H_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + auto C_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); auto check_dynamic_lstm = [](const shared_ptr& lstm) -> bool { return lstm->output(0).get_partial_shape() == PartialShape::dynamic() && @@ -297,39 +312,47 @@ TEST(type_prop, lstm_cell_invalid_input_dynamic_rank) }; // Invalid dynamic rank for W tensor. - W = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + W = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); auto lstm = make_shared(X, H_t, C_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_lstm(lstm), true); // Invalid dynamic rank for X tensor. - W = make_shared(element::f32, + W = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, input_size}); - X = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + X = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); lstm = make_shared(X, H_t, C_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_lstm(lstm), true); // Invalid dynamic rank for H_t tensor. - X = make_shared(element::f32, PartialShape{batch_size, input_size}); - H_t = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + X = make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + H_t = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); lstm = make_shared(X, H_t, C_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_lstm(lstm), true); // Invalid dynamic rank for C_t tensor. - H_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - C_t = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + H_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + C_t = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); lstm = make_shared(X, H_t, C_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_lstm(lstm), true); // Invalid dynamic rank for R tensor. - C_t = make_shared(element::f32, PartialShape{batch_size, hidden_size}); - R = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + C_t = + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); + R = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); lstm = make_shared(X, H_t, C_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_lstm(lstm), true); // Invalid dynamic rank for B tensor. - R = make_shared(element::f32, + R = make_shared(element::Type_t::f32, PartialShape{gates_count * hidden_size, hidden_size}); - auto B = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto B = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); lstm = make_shared(X, H_t, C_t, W, R, B, hidden_size); EXPECT_EQ(check_dynamic_lstm(lstm), true); } diff --git a/ngraph/test/type_prop/lstm_sequence.cpp b/ngraph/test/type_prop/lstm_sequence.cpp index 756e7d9c90b..48631b366e3 100644 --- a/ngraph/test/type_prop/lstm_sequence.cpp +++ b/ngraph/test/type_prop/lstm_sequence.cpp @@ -35,7 +35,7 @@ struct recurrent_sequence_parameters Dimension seq_length = 12; Dimension input_size = 8; Dimension hidden_size = 256; - ngraph::element::Type et = element::f32; + ngraph::element::Type et = element::Type_t::f32; }; // @@ -86,19 +86,20 @@ TEST(type_prop, lstm_sequence_forward) const size_t input_size = 4; const size_t hidden_size = 128; - const auto X = - make_shared(element::f32, Shape{batch_size, seq_length, input_size}); + const auto X = make_shared(element::Type_t::f32, + Shape{batch_size, seq_length, input_size}); const auto initial_hidden_state = make_shared( - element::f32, Shape{batch_size, num_directions, hidden_size}); + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); const auto initial_cell_state = make_shared( - element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); + const auto sequence_lengths = + make_shared(element::Type_t::i32, Shape{batch_size}); const auto W = make_shared( - element::f32, Shape{num_directions, 4 * hidden_size, input_size}); + element::Type_t::f32, Shape{num_directions, 4 * hidden_size, input_size}); const auto R = make_shared( - element::f32, Shape{num_directions, 4 * hidden_size, hidden_size}); - const auto B = - make_shared(element::f32, Shape{num_directions, 4 * hidden_size}); + element::Type_t::f32, Shape{num_directions, 4 * hidden_size, hidden_size}); + const auto B = make_shared(element::Type_t::f32, + Shape{num_directions, 4 * hidden_size}); const auto lstm_direction = op::RecurrentSequenceDirection::FORWARD; @@ -120,13 +121,13 @@ TEST(type_prop, lstm_sequence_forward) EXPECT_EQ(lstm_sequence->get_activations()[1], "tanh"); EXPECT_EQ(lstm_sequence->get_activations()[2], "tanh"); EXPECT_EQ(lstm_sequence->get_clip(), 0.f); - EXPECT_EQ(lstm_sequence->get_output_element_type(0), element::f32); + EXPECT_EQ(lstm_sequence->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(lstm_sequence->outputs().size(), 3); EXPECT_EQ(lstm_sequence->get_output_shape(0), (Shape{batch_size, num_directions, seq_length, hidden_size})); - EXPECT_EQ(lstm_sequence->get_output_element_type(1), element::f32); + EXPECT_EQ(lstm_sequence->get_output_element_type(1), element::Type_t::f32); EXPECT_EQ(lstm_sequence->get_output_shape(1), (Shape{batch_size, num_directions, hidden_size})); - EXPECT_EQ(lstm_sequence->get_output_element_type(2), element::f32); + EXPECT_EQ(lstm_sequence->get_output_element_type(2), element::Type_t::f32); EXPECT_EQ(lstm_sequence->get_output_shape(2), (Shape{batch_size, num_directions, hidden_size})); } @@ -138,19 +139,20 @@ TEST(type_prop, lstm_sequence_bidirectional) const size_t input_size = 8; const size_t hidden_size = 256; - const auto X = - make_shared(element::f32, Shape{batch_size, seq_length, input_size}); + const auto X = make_shared(element::Type_t::f32, + Shape{batch_size, seq_length, input_size}); const auto initial_hidden_state = make_shared( - element::f32, Shape{batch_size, num_directions, hidden_size}); + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); const auto initial_cell_state = make_shared( - element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); + const auto sequence_lengths = + make_shared(element::Type_t::i32, Shape{batch_size}); const auto W = make_shared( - element::f32, Shape{num_directions, 4 * hidden_size, input_size}); + element::Type_t::f32, Shape{num_directions, 4 * hidden_size, input_size}); const auto R = make_shared( - element::f32, Shape{num_directions, 4 * hidden_size, hidden_size}); - const auto B = - make_shared(element::f32, Shape{num_directions, 4 * hidden_size}); + element::Type_t::f32, Shape{num_directions, 4 * hidden_size, hidden_size}); + const auto B = make_shared(element::Type_t::f32, + Shape{num_directions, 4 * hidden_size}); const auto lstm_direction = opset5::LSTMSequence::direction::BIDIRECTIONAL; const std::vector activations_alpha = {2.7, 7.0, 32.367}; @@ -177,12 +179,12 @@ TEST(type_prop, lstm_sequence_bidirectional) EXPECT_EQ(lstm_sequence->get_activations()[1], "sigmoid"); EXPECT_EQ(lstm_sequence->get_activations()[2], "sigmoid"); EXPECT_EQ(lstm_sequence->get_clip(), 0.f); - EXPECT_EQ(lstm_sequence->get_output_element_type(0), element::f32); + EXPECT_EQ(lstm_sequence->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(lstm_sequence->get_output_shape(0), (Shape{batch_size, num_directions, seq_length, hidden_size})); - EXPECT_EQ(lstm_sequence->get_output_element_type(1), element::f32); + EXPECT_EQ(lstm_sequence->get_output_element_type(1), element::Type_t::f32); EXPECT_EQ(lstm_sequence->get_output_shape(1), (Shape{batch_size, num_directions, hidden_size})); - EXPECT_EQ(lstm_sequence->get_output_element_type(2), element::f32); + EXPECT_EQ(lstm_sequence->get_output_element_type(2), element::Type_t::f32); EXPECT_EQ(lstm_sequence->get_output_shape(2), (Shape{batch_size, num_directions, hidden_size})); } @@ -195,7 +197,7 @@ TEST(type_prop, lstm_sequence_dynamic_batch_size) param.seq_length = 12; param.input_size = 8; param.hidden_size = 256; - param.et = element::f32; + param.et = element::Type_t::f32; auto lstm_sequence = lstm_seq_tensor_initialization(param); lstm_sequence->validate_and_infer_types(); @@ -221,7 +223,7 @@ TEST(type_prop, lstm_sequence_dynamic_num_directions) param.seq_length = 12; param.input_size = 8; param.hidden_size = 256; - param.et = element::f32; + param.et = element::Type_t::f32; auto lstm_sequence = lstm_seq_tensor_initialization(param); lstm_sequence->validate_and_infer_types(); @@ -247,7 +249,7 @@ TEST(type_prop, lstm_sequence_dynamic_seq_length) param.seq_length = Dimension::dynamic(); param.input_size = 8; param.hidden_size = 256; - param.et = element::f32; + param.et = element::Type_t::f32; auto lstm_sequence = lstm_seq_tensor_initialization(param); lstm_sequence->validate_and_infer_types(); @@ -273,7 +275,7 @@ TEST(type_prop, lstm_sequence_dynamic_hidden_size) param.seq_length = 12; param.input_size = 8; param.hidden_size = Dimension::dynamic(); - param.et = element::f32; + param.et = element::Type_t::f32; auto lstm_sequence = lstm_seq_tensor_initialization(param); lstm_sequence->validate_and_infer_types(); @@ -299,7 +301,7 @@ TEST(type_prop, lstm_sequence_dynamic_inputs) param.hidden_size = Dimension::dynamic(); param.num_directions = Dimension::dynamic(); param.seq_length = Dimension::dynamic(); - param.et = element::f32; + param.et = element::Type_t::f32; auto lstm_sequence = lstm_seq_tensor_initialization(param); lstm_sequence->validate_and_infer_types(); @@ -325,7 +327,7 @@ TEST(type_prop, lstm_sequence_invalid_input_dimension) param.seq_length = 12; param.input_size = 8; param.hidden_size = 256; - param.et = element::f32; + param.et = element::Type_t::f32; auto lstm_sequence = lstm_seq_tensor_initialization(param); auto invalid_rank0_tensor = make_shared(param.et, PartialShape{}); @@ -350,7 +352,7 @@ TEST(type_prop, lstm_sequence_invalid_input_dynamic_rank) param.seq_length = 12; param.input_size = 8; param.hidden_size = 256; - param.et = element::f32; + param.et = element::Type_t::f32; auto check_dynamic_lstm = [](const shared_ptr& lstm) -> bool { return lstm->output(0).get_partial_shape() == PartialShape::dynamic() && diff --git a/ngraph/test/type_prop/matmul.cpp b/ngraph/test/type_prop/matmul.cpp index eb452d77949..f5ec44b3e10 100644 --- a/ngraph/test/type_prop/matmul.cpp +++ b/ngraph/test/type_prop/matmul.cpp @@ -23,113 +23,114 @@ using namespace ngraph; TEST(type_prop, matmul_2D_same) { - auto A = make_shared(element::f32, Shape{2, 2}); - auto B = make_shared(element::f32, Shape{2, 2}); + auto A = make_shared(element::Type_t::f32, Shape{2, 2}); + auto B = make_shared(element::Type_t::f32, Shape{2, 2}); auto matmul = make_shared(A, B); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{2, 2})); } TEST(type_prop, matmul_4D_same) { - auto A = make_shared(element::f32, Shape{2, 2, 3, 3}); - auto B = make_shared(element::f32, Shape{2, 2, 3, 3}); + auto A = make_shared(element::Type_t::f32, Shape{2, 2, 3, 3}); + auto B = make_shared(element::Type_t::f32, Shape{2, 2, 3, 3}); auto matmul = make_shared(A, B); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{2, 2, 3, 3})); } TEST(type_prop, matmul_2D) { - auto A = make_shared(element::f32, Shape{3, 6}); - auto B = make_shared(element::f32, Shape{6, 4}); + auto A = make_shared(element::Type_t::f32, Shape{3, 6}); + auto B = make_shared(element::Type_t::f32, Shape{6, 4}); auto matmul = make_shared(A, B); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{3, 4})); } TEST(type_prop, matmul_4D) { - auto A = make_shared(element::f32, Shape{2, 2, 3, 6}); - auto B = make_shared(element::f32, Shape{2, 2, 6, 4}); + auto A = make_shared(element::Type_t::f32, Shape{2, 2, 3, 6}); + auto B = make_shared(element::Type_t::f32, Shape{2, 2, 6, 4}); auto matmul = make_shared(A, B); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{2, 2, 3, 4})); } TEST(type_prop, matmul_5D_x_3D_transpose_a_transpose_b) { - auto A = make_shared(element::f32, Shape{2, 1, 6, 3}); - auto B = make_shared(element::f32, Shape{7, 1, 5, 4, 6}); + auto A = make_shared(element::Type_t::f32, Shape{2, 1, 6, 3}); + auto B = make_shared(element::Type_t::f32, Shape{7, 1, 5, 4, 6}); auto matmul = make_shared(A, B, true, true); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{7, 2, 5, 3, 4})); } TEST(type_prop, matmul_2D_transpose_a) { - auto A = make_shared(element::f32, Shape{6, 3}); - auto B = make_shared(element::f32, Shape{6, 4}); + auto A = make_shared(element::Type_t::f32, Shape{6, 3}); + auto B = make_shared(element::Type_t::f32, Shape{6, 4}); auto matmul = make_shared(A, B, 1); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{3, 4})); } TEST(type_prop, matmul_4D_transpose_a) { - auto A = make_shared(element::f32, Shape{2, 2, 6, 3}); - auto B = make_shared(element::f32, Shape{2, 2, 6, 4}); + auto A = make_shared(element::Type_t::f32, Shape{2, 2, 6, 3}); + auto B = make_shared(element::Type_t::f32, Shape{2, 2, 6, 4}); auto matmul = make_shared(A, B, 1); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{2, 2, 3, 4})); } TEST(type_prop, matmul_2D_transpose_b) { - auto A = make_shared(element::f32, Shape{3, 6}); - auto B = make_shared(element::f32, Shape{4, 6}); + auto A = make_shared(element::Type_t::f32, Shape{3, 6}); + auto B = make_shared(element::Type_t::f32, Shape{4, 6}); auto matmul = make_shared(A, B, 0, 1); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{3, 4})); } TEST(type_prop, matmul_4D_transpose_b) { - auto A = make_shared(element::f32, Shape{2, 2, 3, 6}); - auto B = make_shared(element::f32, Shape{2, 2, 4, 6}); + auto A = make_shared(element::Type_t::f32, Shape{2, 2, 3, 6}); + auto B = make_shared(element::Type_t::f32, Shape{2, 2, 4, 6}); auto matmul = make_shared(A, B, 0, 1); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{2, 2, 3, 4})); } TEST(type_prop, matmul_dynamic_5D_transpose_b) { Dimension dynamic = Dimension::dynamic(); - auto A = - make_shared(element::f32, PartialShape{dynamic, 4, dynamic, dynamic, 6}); - auto B = make_shared(element::f32, PartialShape{1, dynamic, dynamic, 4, 6}); + auto A = make_shared(element::Type_t::f32, + PartialShape{dynamic, 4, dynamic, dynamic, 6}); + auto B = + make_shared(element::Type_t::f32, PartialShape{1, dynamic, dynamic, 4, 6}); auto matmul = make_shared(A, B, 0, 1); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), (PartialShape{Dimension(1, -1), 4, dynamic, dynamic, 4})); } @@ -137,24 +138,24 @@ TEST(type_prop, matmul_dynamic_5D_transpose_b) TEST(type_prop, matmul_dynamic_2D_transpose_a) { Dimension dynamic = Dimension::dynamic(); - auto A = make_shared(element::f32, PartialShape{dynamic, 3}); - auto B = make_shared(element::f32, PartialShape{4, dynamic}); + auto A = make_shared(element::Type_t::f32, PartialShape{dynamic, 3}); + auto B = make_shared(element::Type_t::f32, PartialShape{4, dynamic}); auto matmul = make_shared(A, B, 1, 0); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), (PartialShape{3, dynamic})); } TEST(type_prop, matmul_dynamic_1D_3D) { Dimension dynamic = Dimension::dynamic(); - auto A = make_shared(element::f32, PartialShape{dynamic}); - auto B = make_shared(element::f32, PartialShape{2, 4, dynamic}); + auto A = make_shared(element::Type_t::f32, PartialShape{dynamic}); + auto B = make_shared(element::Type_t::f32, PartialShape{2, 4, dynamic}); auto matmul = make_shared(A, B); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), (PartialShape{2, dynamic})); } @@ -162,52 +163,52 @@ TEST(type_prop, matmul_dynamic_1D_3D) // 1D x 1D TEST(type_prop, matmul_1D_x_1D_false_false) { - auto A = make_shared(element::f32, Shape{1}); - auto B = make_shared(element::f32, Shape{1}); + auto A = make_shared(element::Type_t::f32, Shape{1}); + auto B = make_shared(element::Type_t::f32, Shape{1}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{})); } TEST(type_prop, matmul_1D_x_1D_false_true) { - auto A = make_shared(element::f32, Shape{1}); - auto B = make_shared(element::f32, Shape{1}); + auto A = make_shared(element::Type_t::f32, Shape{1}); + auto B = make_shared(element::Type_t::f32, Shape{1}); auto matmul = make_shared(A, B, false, true); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{})); } TEST(type_prop, matmul_1D_x_1D_true_false) { - auto A = make_shared(element::f32, Shape{1}); - auto B = make_shared(element::f32, Shape{1}); + auto A = make_shared(element::Type_t::f32, Shape{1}); + auto B = make_shared(element::Type_t::f32, Shape{1}); auto matmul = make_shared(A, B, true, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{})); } TEST(type_prop, matmul_1D_x_1D_true_true) { - auto A = make_shared(element::f32, Shape{1}); - auto B = make_shared(element::f32, Shape{1}); + auto A = make_shared(element::Type_t::f32, Shape{1}); + auto B = make_shared(element::Type_t::f32, Shape{1}); auto matmul = make_shared(A, B, true, true); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{})); } TEST(type_prop, matmul_1D_x_1D_incompatible) { - auto A = make_shared(element::f32, Shape{3}); - auto B = make_shared(element::f32, Shape{4}); + auto A = make_shared(element::Type_t::f32, Shape{3}); + auto B = make_shared(element::Type_t::f32, Shape{4}); try { @@ -228,30 +229,30 @@ TEST(type_prop, matmul_1D_x_1D_incompatible) // 2D x 1D TEST(type_prop, matmul_2D_x_1D_false_false) { - auto A = make_shared(element::f32, Shape{1, 2}); - auto B = make_shared(element::f32, Shape{2}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2}); + auto B = make_shared(element::Type_t::f32, Shape{2}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1})); } TEST(type_prop, matmul_2D_x_1D_false_true) { - auto A = make_shared(element::f32, Shape{1, 2}); - auto B = make_shared(element::f32, Shape{2}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2}); + auto B = make_shared(element::Type_t::f32, Shape{2}); auto matmul = make_shared(A, B, false, true); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1})); } TEST(type_prop, matmul_2D_x_1D_true_false) { - auto A = make_shared(element::f32, Shape{1, 2}); - auto B = make_shared(element::f32, Shape{2}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2}); + auto B = make_shared(element::Type_t::f32, Shape{2}); try { @@ -271,8 +272,8 @@ TEST(type_prop, matmul_2D_x_1D_true_false) TEST(type_prop, matmul_2D_x_1D_true_true) { - auto A = make_shared(element::f32, Shape{1, 2}); - auto B = make_shared(element::f32, Shape{2}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2}); + auto B = make_shared(element::Type_t::f32, Shape{2}); try { @@ -293,19 +294,19 @@ TEST(type_prop, matmul_2D_x_1D_true_true) // 1D x 2D TEST(type_prop, matmul_1D_x_2D_false_false) { - auto A = make_shared(element::f32, Shape{2}); - auto B = make_shared(element::f32, Shape{2, 1}); + auto A = make_shared(element::Type_t::f32, Shape{2}); + auto B = make_shared(element::Type_t::f32, Shape{2, 1}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1})); } TEST(type_prop, matmul_1D_x_2D_false_true) { - auto A = make_shared(element::f32, Shape{2}); - auto B = make_shared(element::f32, Shape{2, 1}); + auto A = make_shared(element::Type_t::f32, Shape{2}); + auto B = make_shared(element::Type_t::f32, Shape{2, 1}); try { @@ -325,18 +326,18 @@ TEST(type_prop, matmul_1D_x_2D_false_true) TEST(type_prop, matmul_1D_x_2D_true_false) { - auto A = make_shared(element::f32, Shape{2}); - auto B = make_shared(element::f32, Shape{2, 1}); + auto A = make_shared(element::Type_t::f32, Shape{2}); + auto B = make_shared(element::Type_t::f32, Shape{2, 1}); auto matmul = make_shared(A, B, true, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1})); } TEST(type_prop, matmul_1D_x_2D_true_true) { - auto A = make_shared(element::f32, Shape{2}); - auto B = make_shared(element::f32, Shape{2, 1}); + auto A = make_shared(element::Type_t::f32, Shape{2}); + auto B = make_shared(element::Type_t::f32, Shape{2, 1}); try { @@ -357,65 +358,65 @@ TEST(type_prop, matmul_1D_x_2D_true_true) // 1D x 4D TEST(type_prop, matmul_1D_x_4D_false_false) { - auto A = make_shared(element::f32, Shape{3}); - auto B = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto A = make_shared(element::Type_t::f32, Shape{3}); + auto B = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1, 2, 4})); } // 4D x 1D TEST(type_prop, matmul_4D_x_1D_false_false) { - auto A = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto B = make_shared(element::f32, Shape{4}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto B = make_shared(element::Type_t::f32, Shape{4}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{1, 2, 3})); } // Batch broadcast TEST(type_prop, matmul_batch_broadcast) { - auto A = make_shared(element::f32, Shape{5, 1, 1, 4, 3}); - auto B = make_shared(element::f32, Shape{1, 1, 6, 3, 2}); + auto A = make_shared(element::Type_t::f32, Shape{5, 1, 1, 4, 3}); + auto B = make_shared(element::Type_t::f32, Shape{1, 1, 6, 3, 2}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{5, 1, 6, 4, 2})); } TEST(type_prop, matmul_batch_broadcast_expand_to_A) { - auto A = make_shared(element::f32, Shape{1, 4, 3}); - auto B = make_shared(element::f32, Shape{7, 8, 5, 3, 2}); + auto A = make_shared(element::Type_t::f32, Shape{1, 4, 3}); + auto B = make_shared(element::Type_t::f32, Shape{7, 8, 5, 3, 2}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{7, 8, 5, 4, 2})); } TEST(type_prop, matmul_batch_broadcast_expand_to_B) { - auto A = make_shared(element::f32, Shape{8, 7, 6, 1, 4, 3}); - auto B = make_shared(element::f32, Shape{1, 5, 3, 2}); + auto A = make_shared(element::Type_t::f32, Shape{8, 7, 6, 1, 4, 3}); + auto B = make_shared(element::Type_t::f32, Shape{1, 5, 3, 2}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_shape(), (Shape{8, 7, 6, 5, 4, 2})); } TEST(type_prop, matmul_incompatible_batch_dims) { - auto A = make_shared(element::f32, Shape{7, 4, 3}); - auto B = make_shared(element::f32, Shape{6, 3, 2}); + auto A = make_shared(element::Type_t::f32, Shape{7, 4, 3}); + auto B = make_shared(element::Type_t::f32, Shape{6, 3, 2}); try { @@ -435,14 +436,14 @@ TEST(type_prop, matmul_incompatible_batch_dims) TEST(type_prop, matmul_matrix_dynamic_bounds) { - auto A = - make_shared(element::f32, PartialShape{Dimension(2, 5), Dimension(6, 10)}); - auto B = - make_shared(element::f32, PartialShape{Dimension(7, 8), Dimension(15, 20)}); + auto A = make_shared(element::Type_t::f32, + PartialShape{Dimension(2, 5), Dimension(6, 10)}); + auto B = make_shared(element::Type_t::f32, + PartialShape{Dimension(7, 8), Dimension(15, 20)}); auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), (PartialShape{Dimension(2, 5), Dimension(15, 20)})); } @@ -517,35 +518,35 @@ TEST(type_prop, matmul_batch_dynamic_bounds) 5, // 18 4}; // 19 - auto A = make_shared(element::f32, A_shape); - auto B = make_shared(element::f32, B_shape); + auto A = make_shared(element::Type_t::f32, A_shape); + auto B = make_shared(element::Type_t::f32, B_shape); auto matmul = make_shared(A, B); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), expected_output_shape); } TEST(type_prop, matmul_incompatible_matrix_dim_bounds) { - auto A = - make_shared(element::f32, PartialShape{Dimension(2, 5), Dimension(3, 4)}); - auto B = - make_shared(element::f32, PartialShape{Dimension(1, 2), Dimension(15, 20)}); + auto A = make_shared(element::Type_t::f32, + PartialShape{Dimension(2, 5), Dimension(3, 4)}); + auto B = make_shared(element::Type_t::f32, + PartialShape{Dimension(1, 2), Dimension(15, 20)}); auto expected_output_shape = PartialShape{Dimension(2, 5), Dimension(15, 20)}; // No error for backward compatibility auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), expected_output_shape); } TEST(type_prop, matmul_incompatible_batch_dim_bounds) { - auto A = make_shared(element::f32, PartialShape{Dimension(2, 5), 4, 3}); - auto B = make_shared(element::f32, PartialShape{Dimension(6, 10), 3, 2}); + auto A = make_shared(element::Type_t::f32, PartialShape{Dimension(2, 5), 4, 3}); + auto B = make_shared(element::Type_t::f32, PartialShape{Dimension(6, 10), 3, 2}); Dimension dynamic = Dimension::dynamic(); auto expected_output_shape = PartialShape{dynamic, 4, 2}; @@ -553,6 +554,6 @@ TEST(type_prop, matmul_incompatible_batch_dim_bounds) // No error for backward compatibility auto matmul = make_shared(A, B, false, false); - ASSERT_EQ(matmul->get_element_type(), element::f32); + ASSERT_EQ(matmul->get_element_type(), element::Type_t::f32); ASSERT_EQ(matmul->get_output_partial_shape(0), expected_output_shape); } diff --git a/ngraph/test/type_prop/max_pool.cpp b/ngraph/test/type_prop/max_pool.cpp index 0fb8bd7fc79..e274e733f1a 100644 --- a/ngraph/test/type_prop/max_pool.cpp +++ b/ngraph/test/type_prop/max_pool.cpp @@ -31,7 +31,7 @@ TEST(type_prop, max_pool_auto_padding) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); @@ -50,7 +50,7 @@ TEST(type_prop, max_pool_auto_padding_nc_dims_dynamic_same_lower) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); @@ -70,7 +70,7 @@ TEST(type_prop, max_pool_auto_padding_nc_dims_dynamic_same_upper) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_UPPER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); @@ -90,7 +90,7 @@ TEST(type_prop, max_pool_auto_padding_spatial_dims_dynamic) const auto rounding_mode = op::RoundingType::FLOOR; const auto auto_pad = op::PadType::SAME_LOWER; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto mp = make_shared( arg, strides, pads_begin, pads_end, kernel_shape, rounding_mode, auto_pad); diff --git a/ngraph/test/type_prop/mish.cpp b/ngraph/test/type_prop/mish.cpp index 68ec076374f..c28a9faceaf 100644 --- a/ngraph/test/type_prop/mish.cpp +++ b/ngraph/test/type_prop/mish.cpp @@ -23,31 +23,33 @@ using namespace ngraph; TEST(type_prop, mish) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto mish_func = make_shared(data); - EXPECT_EQ(mish_func->get_element_type(), element::f32); + EXPECT_EQ(mish_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(mish_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, mish_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto mish_func = make_shared(data); - EXPECT_EQ(mish_func->get_element_type(), element::f32); + EXPECT_EQ(mish_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(mish_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); // rank unknown auto mish_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic())); + make_shared(element::Type_t::f32, PartialShape::dynamic())); ASSERT_TRUE(mish_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, mish_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto mish_func = make_shared(data); - EXPECT_EQ(mish_func->get_element_type(), element::f32); + EXPECT_EQ(mish_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(mish_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); ASSERT_TRUE(mish_func->get_output_partial_shape(0).rank().is_static()); diff --git a/ngraph/test/type_prop/mvn.cpp b/ngraph/test/type_prop/mvn.cpp index 7b37b95a268..87247422d40 100644 --- a/ngraph/test/type_prop/mvn.cpp +++ b/ngraph/test/type_prop/mvn.cpp @@ -23,17 +23,18 @@ using namespace ngraph; TEST(type_prop, mvn) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto mvn_func = make_shared(data); - EXPECT_EQ(mvn_func->get_element_type(), element::f32); + EXPECT_EQ(mvn_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(mvn_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, mvn_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto mvn_func = make_shared(data); - EXPECT_EQ(mvn_func->get_element_type(), element::f32); + EXPECT_EQ(mvn_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(mvn_func->get_reduction_axes(), (AxisSet{1, 2})); ASSERT_TRUE(mvn_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); @@ -42,8 +43,8 @@ TEST(type_prop, mvn_partial) EXPECT_EQ(make_shared(data, false)->get_reduction_axes(), (AxisSet{2})); // rank unknown - auto mvn_partial = - make_shared(make_shared(element::f32, PartialShape::dynamic())); + auto mvn_partial = make_shared( + make_shared(element::Type_t::f32, PartialShape::dynamic())); EXPECT_EQ(mvn_partial->get_reduction_axes(), AxisSet{}); ASSERT_TRUE(mvn_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } diff --git a/ngraph/test/type_prop/non_max_suppression.cpp b/ngraph/test/type_prop/non_max_suppression.cpp index 8202486b25d..1c2d7572b07 100644 --- a/ngraph/test/type_prop/non_max_suppression.cpp +++ b/ngraph/test/type_prop/non_max_suppression.cpp @@ -27,8 +27,8 @@ TEST(type_prop, nms_incorrect_boxes_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -42,8 +42,8 @@ TEST(type_prop, nms_incorrect_scores_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2}); make_shared(boxes, scores); } @@ -57,8 +57,8 @@ TEST(type_prop, nms_incorrect_scheme_num_batches) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{2, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 2, 3}); make_shared(boxes, scores); } @@ -73,8 +73,8 @@ TEST(type_prop, nms_incorrect_scheme_num_boxes) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -88,11 +88,11 @@ TEST(type_prop, nms_incorrect_scheme_num_boxes) TEST(type_prop, nms_scalar_inputs_check) { - const auto boxes = make_shared(element::f32, Shape{1, 2, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 2}); - const auto scalar = make_shared(element::f32, Shape{}); - const auto non_scalar = make_shared(element::f32, Shape{1}); + const auto scalar = make_shared(element::Type_t::f32, Shape{}); + const auto non_scalar = make_shared(element::Type_t::f32, Shape{1}); try { @@ -125,8 +125,8 @@ TEST(type_prop, nms_scalar_inputs_check) TEST(type_prop, nms_output_shape) { - const auto boxes = make_shared(element::f32, Shape{1, 2, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 2}); const auto nms = make_shared(boxes, scores); const auto nms_out_ps = nms->get_output_partial_shape(0); @@ -138,46 +138,49 @@ TEST(type_prop, nms_output_shape) TEST(type_prop, nms_output_shape_2) { - const auto boxes = make_shared(element::f32, Shape{1, 6, 4}); - const auto scores = make_shared(element::f32, Shape{1, 1, 6}); - const auto max_output_boxes_per_class = op::Constant::create(element::i32, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 6, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 6}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i32, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_EQ(nms->get_shape(), (Shape{3, 3})); } TEST(type_prop, nms_output_shape_3) { - const auto boxes = make_shared(element::f32, Shape{1, 1, 4}); - const auto scores = make_shared(element::f32, Shape{1, 1, 1}); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 1, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 1}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_EQ(nms->get_shape(), (Shape{1, 3})); } TEST(type_prop, nms_dynamic_boxes_and_scores) { - const auto boxes = make_shared(element::f32, PartialShape::dynamic()); - const auto scores = make_shared(element::f32, PartialShape::dynamic()); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto scores = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_TRUE( nms->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 3})); } @@ -188,8 +191,8 @@ TEST(type_prop, nms_v3_incorrect_boxes_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -203,8 +206,8 @@ TEST(type_prop, nms_v3_incorrect_scores_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2}); make_shared(boxes, scores); } @@ -218,8 +221,8 @@ TEST(type_prop, nms_v3_incorrect_scheme_num_batches) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{2, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 2, 3}); make_shared(boxes, scores); } @@ -234,8 +237,8 @@ TEST(type_prop, nms_v3_incorrect_scheme_num_boxes) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -249,11 +252,11 @@ TEST(type_prop, nms_v3_incorrect_scheme_num_boxes) TEST(type_prop, nms_v3_scalar_inputs_check) { - const auto boxes = make_shared(element::f32, Shape{1, 2, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 2}); - const auto scalar = make_shared(element::f32, Shape{}); - const auto non_scalar = make_shared(element::f32, Shape{1}); + const auto scalar = make_shared(element::Type_t::f32, Shape{}); + const auto non_scalar = make_shared(element::Type_t::f32, Shape{1}); try { @@ -286,8 +289,8 @@ TEST(type_prop, nms_v3_scalar_inputs_check) TEST(type_prop, nms_v3_output_shape) { - const auto boxes = make_shared(element::f32, Shape{1, 2, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 2}); const auto nms = make_shared(boxes, scores); const auto nms_out_ps = nms->get_output_partial_shape(0); @@ -299,41 +302,44 @@ TEST(type_prop, nms_v3_output_shape) TEST(type_prop, nms_v3_output_shape_2) { - const auto boxes = make_shared(element::f32, Shape{1, 6, 4}); - const auto scores = make_shared(element::f32, Shape{1, 1, 6}); - const auto max_output_boxes_per_class = op::Constant::create(element::i32, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 6, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 6}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i32, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_EQ(nms->get_shape(), (Shape{3, 3})); } TEST(type_prop, nms_v3_output_shape_3) { - const auto boxes = make_shared(element::f32, Shape{1, 1, 4}); - const auto scores = make_shared(element::f32, Shape{1, 1, 1}); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 1, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 1}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_EQ(nms->get_shape(), (Shape{1, 3})); } TEST(type_prop, nms_v3_output_shape_i32) { - const auto boxes = make_shared(element::f32, Shape{1, 1, 4}); - const auto scores = make_shared(element::f32, Shape{1, 1, 1}); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 1, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 1, 1}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared(boxes, @@ -343,24 +349,25 @@ TEST(type_prop, nms_v3_output_shape_i32) score_threshold, op::v3::NonMaxSuppression::BoxEncodingType::CORNER, true, - element::i32); + element::Type_t::i32); - ASSERT_EQ(nms->get_element_type(), element::i32); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i32); ASSERT_EQ(nms->get_shape(), (Shape{1, 3})); } TEST(type_prop, nms_v3_dynamic_boxes_and_scores) { - const auto boxes = make_shared(element::f32, PartialShape::dynamic()); - const auto scores = make_shared(element::f32, PartialShape::dynamic()); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto scores = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_TRUE( nms->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 3})); } @@ -371,8 +378,8 @@ TEST(type_prop, nms_v4_incorrect_boxes_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -386,8 +393,8 @@ TEST(type_prop, nms_v4_incorrect_scores_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2}); make_shared(boxes, scores); } @@ -401,8 +408,8 @@ TEST(type_prop, nms_v4_incorrect_scheme_num_batches) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{2, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 2, 3}); make_shared(boxes, scores); } @@ -417,8 +424,8 @@ TEST(type_prop, nms_v4_incorrect_scheme_num_boxes) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -432,11 +439,11 @@ TEST(type_prop, nms_v4_incorrect_scheme_num_boxes) TEST(type_prop, nms_v4_scalar_inputs_check) { - const auto boxes = make_shared(element::f32, Shape{1, 2, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 2}); - const auto scalar = make_shared(element::f32, Shape{}); - const auto non_scalar = make_shared(element::f32, Shape{1}); + const auto scalar = make_shared(element::Type_t::f32, Shape{}); + const auto non_scalar = make_shared(element::Type_t::f32, Shape{1}); try { @@ -469,8 +476,8 @@ TEST(type_prop, nms_v4_scalar_inputs_check) TEST(type_prop, nms_v4_output_shape) { - const auto boxes = make_shared(element::f32, Shape{5, 2, 4}); - const auto scores = make_shared(element::f32, Shape{5, 3, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{5, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{5, 3, 2}); const auto nms = make_shared(boxes, scores); const auto nms_out_ps = nms->get_output_partial_shape(0); @@ -482,41 +489,44 @@ TEST(type_prop, nms_v4_output_shape) TEST(type_prop, nms_v4_output_shape_2) { - const auto boxes = make_shared(element::f32, Shape{2, 7, 4}); - const auto scores = make_shared(element::f32, Shape{2, 5, 7}); - const auto max_output_boxes_per_class = op::Constant::create(element::i32, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 5, 7}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i32, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_EQ(nms->get_shape(), (Shape{2 * 5 * 3, 3})); } TEST(type_prop, nms_v4_output_shape_3) { - const auto boxes = make_shared(element::f32, Shape{2, 7, 4}); - const auto scores = make_shared(element::f32, Shape{2, 5, 7}); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {1000}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 5, 7}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {1000}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_EQ(nms->get_shape(), (Shape{2 * 5 * 7, 3})); } TEST(type_prop, nms_v4_output_shape_i32) { - const auto boxes = make_shared(element::f32, Shape{2, 7, 4}); - const auto scores = make_shared(element::f32, Shape{2, 5, 7}); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 5, 7}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared(boxes, @@ -526,24 +536,25 @@ TEST(type_prop, nms_v4_output_shape_i32) score_threshold, op::v3::NonMaxSuppression::BoxEncodingType::CORNER, true, - element::i32); + element::Type_t::i32); - ASSERT_EQ(nms->get_element_type(), element::i32); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i32); ASSERT_EQ(nms->get_shape(), (Shape{30, 3})); } TEST(type_prop, nms_v4_dynamic_boxes_and_scores) { - const auto boxes = make_shared(element::f32, PartialShape::dynamic()); - const auto scores = make_shared(element::f32, PartialShape::dynamic()); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto scores = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_element_type(), element::i64); + ASSERT_EQ(nms->get_element_type(), element::Type_t::i64); ASSERT_TRUE( nms->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 3})); } @@ -554,8 +565,8 @@ TEST(type_prop, nms_v5_incorrect_boxes_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -569,8 +580,8 @@ TEST(type_prop, nms_v5_incorrect_scores_rank) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2}); make_shared(boxes, scores); } @@ -584,8 +595,8 @@ TEST(type_prop, nms_v5_incorrect_scheme_num_batches) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{2, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 2, 3}); make_shared(boxes, scores); } @@ -600,8 +611,8 @@ TEST(type_prop, nms_v5_incorrect_scheme_num_boxes) { try { - const auto boxes = make_shared(element::f32, Shape{1, 2, 3}); - const auto scores = make_shared(element::f32, Shape{1, 2, 3}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 3}); make_shared(boxes, scores); } @@ -615,11 +626,11 @@ TEST(type_prop, nms_v5_incorrect_scheme_num_boxes) TEST(type_prop, nms_v5_scalar_inputs_check) { - const auto boxes = make_shared(element::f32, Shape{1, 2, 4}); - const auto scores = make_shared(element::f32, Shape{1, 2, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{1, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{1, 2, 2}); - const auto scalar = make_shared(element::f32, Shape{}); - const auto non_0d_or_1d = make_shared(element::f32, Shape{2}); + const auto scalar = make_shared(element::Type_t::f32, Shape{}); + const auto non_0d_or_1d = make_shared(element::Type_t::f32, Shape{2}); try { @@ -664,8 +675,8 @@ TEST(type_prop, nms_v5_scalar_inputs_check) TEST(type_prop, nms_v5_output_shape) { - const auto boxes = make_shared(element::f32, Shape{5, 2, 4}); - const auto scores = make_shared(element::f32, Shape{5, 3, 2}); + const auto boxes = make_shared(element::Type_t::f32, Shape{5, 2, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{5, 3, 2}); const auto nms = make_shared(boxes, scores); @@ -679,18 +690,19 @@ TEST(type_prop, nms_v5_output_shape) TEST(type_prop, nms_v5_output_shape_2) { - const auto boxes = make_shared(element::f32, Shape{2, 7, 4}); - const auto scores = make_shared(element::f32, Shape{2, 5, 7}); - const auto max_output_boxes_per_class = op::Constant::create(element::i32, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 5, 7}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i32, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_output_element_type(0), element::i64); - ASSERT_EQ(nms->get_output_element_type(1), element::f32); - ASSERT_EQ(nms->get_output_element_type(2), element::i64); + ASSERT_EQ(nms->get_output_element_type(0), element::Type_t::i64); + ASSERT_EQ(nms->get_output_element_type(1), element::Type_t::f32); + ASSERT_EQ(nms->get_output_element_type(2), element::Type_t::i64); EXPECT_EQ(nms->get_output_partial_shape(0), PartialShape({Dimension(0, 30), Dimension(3)})); EXPECT_EQ(nms->get_output_partial_shape(1), PartialShape({Dimension(0, 30), Dimension(3)})); @@ -699,18 +711,19 @@ TEST(type_prop, nms_v5_output_shape_2) TEST(type_prop, nms_v5_output_shape_3) { - const auto boxes = make_shared(element::f32, Shape{2, 7, 4}); - const auto scores = make_shared(element::f32, Shape{2, 5, 7}); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {1000}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 5, 7}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {1000}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_output_element_type(0), element::i64); - ASSERT_EQ(nms->get_output_element_type(1), element::f32); - ASSERT_EQ(nms->get_output_element_type(2), element::i64); + ASSERT_EQ(nms->get_output_element_type(0), element::Type_t::i64); + ASSERT_EQ(nms->get_output_element_type(1), element::Type_t::f32); + ASSERT_EQ(nms->get_output_element_type(2), element::Type_t::i64); EXPECT_EQ(nms->get_output_partial_shape(0), PartialShape({Dimension(0, 70), Dimension(3)})); EXPECT_EQ(nms->get_output_partial_shape(1), PartialShape({Dimension(0, 70), Dimension(3)})); EXPECT_EQ(nms->get_output_shape(2), (Shape{1})); @@ -718,11 +731,12 @@ TEST(type_prop, nms_v5_output_shape_3) TEST(type_prop, nms_v5_output_shape_i32) { - const auto boxes = make_shared(element::f32, Shape{2, 7, 4}); - const auto scores = make_shared(element::f32, Shape{2, 5, 7}); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, Shape{2, 7, 4}); + const auto scores = make_shared(element::Type_t::f32, Shape{2, 5, 7}); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared(boxes, @@ -732,11 +746,11 @@ TEST(type_prop, nms_v5_output_shape_i32) score_threshold, op::v5::NonMaxSuppression::BoxEncodingType::CORNER, true, - element::i32); + element::Type_t::i32); - ASSERT_EQ(nms->get_output_element_type(0), element::i32); - ASSERT_EQ(nms->get_output_element_type(1), element::f32); - ASSERT_EQ(nms->get_output_element_type(2), element::i32); + ASSERT_EQ(nms->get_output_element_type(0), element::Type_t::i32); + ASSERT_EQ(nms->get_output_element_type(1), element::Type_t::f32); + ASSERT_EQ(nms->get_output_element_type(2), element::Type_t::i32); EXPECT_EQ(nms->get_output_partial_shape(0), PartialShape({Dimension(0, 30), Dimension(3)})); EXPECT_EQ(nms->get_output_partial_shape(1), PartialShape({Dimension(0, 30), Dimension(3)})); @@ -745,18 +759,19 @@ TEST(type_prop, nms_v5_output_shape_i32) TEST(type_prop, nms_v5_dynamic_boxes_and_scores) { - const auto boxes = make_shared(element::f32, PartialShape::dynamic()); - const auto scores = make_shared(element::f32, PartialShape::dynamic()); - const auto max_output_boxes_per_class = op::Constant::create(element::i16, Shape{}, {3}); - const auto iou_threshold = make_shared(element::f32, Shape{}); - const auto score_threshold = make_shared(element::f32, Shape{}); + const auto boxes = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto scores = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto max_output_boxes_per_class = + op::Constant::create(element::Type_t::i16, Shape{}, {3}); + const auto iou_threshold = make_shared(element::Type_t::f32, Shape{}); + const auto score_threshold = make_shared(element::Type_t::f32, Shape{}); const auto nms = make_shared( boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold); - ASSERT_EQ(nms->get_output_element_type(0), element::i64); - ASSERT_EQ(nms->get_output_element_type(1), element::f32); - ASSERT_EQ(nms->get_output_element_type(2), element::i64); + ASSERT_EQ(nms->get_output_element_type(0), element::Type_t::i64); + ASSERT_EQ(nms->get_output_element_type(1), element::Type_t::f32); + ASSERT_EQ(nms->get_output_element_type(2), element::Type_t::i64); EXPECT_EQ(nms->get_output_partial_shape(0), PartialShape({Dimension::dynamic(), 3})); EXPECT_EQ(nms->get_output_partial_shape(1), PartialShape({Dimension::dynamic(), 3})); EXPECT_EQ(nms->get_output_shape(2), (Shape{1})); diff --git a/ngraph/test/type_prop/non_zero.cpp b/ngraph/test/type_prop/non_zero.cpp index 03ad7397c82..1f22ec9fb19 100644 --- a/ngraph/test/type_prop/non_zero.cpp +++ b/ngraph/test/type_prop/non_zero.cpp @@ -23,38 +23,38 @@ using namespace ngraph; TEST(type_prop, non_zero) { - auto data = make_shared(element::f32, Shape{3, 3, 224, 224}); + auto data = make_shared(element::Type_t::f32, Shape{3, 3, 224, 224}); auto non_zero = make_shared(data); - EXPECT_EQ(non_zero->get_element_type(), element::i64); + EXPECT_EQ(non_zero->get_element_type(), element::Type_t::i64); EXPECT_TRUE( non_zero->get_output_partial_shape(0).same_scheme(PartialShape{4, Dimension::dynamic()})); } TEST(type_prop, non_zero_dynamic) { - auto data = make_shared(element::f32, PartialShape::dynamic()); + auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto non_zero = make_shared(data); - EXPECT_EQ(non_zero->get_element_type(), element::i64); + EXPECT_EQ(non_zero->get_element_type(), element::Type_t::i64); EXPECT_TRUE(non_zero->get_output_partial_shape(0).same_scheme( PartialShape{Dimension::dynamic(), Dimension::dynamic()})); } TEST(type_prop, non_zero_output_type) { - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto non_zero = make_shared(data, element::i32); + auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto non_zero = make_shared(data, element::Type_t::i32); - ASSERT_EQ(non_zero->get_output_element_type(0), element::i32); + ASSERT_EQ(non_zero->get_output_element_type(0), element::Type_t::i32); EXPECT_TRUE( non_zero->get_output_partial_shape(0).same_scheme(PartialShape{4, Dimension::dynamic()})); } TEST(type_prop, non_zero_string_output_type) { - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto non_zero = make_shared(data, "i32"); - ASSERT_EQ(non_zero->get_output_element_type(0), element::i32); + ASSERT_EQ(non_zero->get_output_element_type(0), element::Type_t::i32); EXPECT_TRUE( non_zero->get_output_partial_shape(0).same_scheme(PartialShape{4, Dimension::dynamic()})); } @@ -62,10 +62,10 @@ TEST(type_prop, non_zero_string_output_type) TEST(type_prop, non_zero_fail_index_element_type) { // Deduce type - auto data = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto data = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); try { - auto non_zero = make_shared(data, element::i16); + auto non_zero = make_shared(data, element::Type_t::i16); // Should have thrown, so fail if it didn't FAIL() << "Invalid output type not detected"; diff --git a/ngraph/test/type_prop/normalize.cpp b/ngraph/test/type_prop/normalize.cpp index 03f342e5ba8..9d0b9af0394 100644 --- a/ngraph/test/type_prop/normalize.cpp +++ b/ngraph/test/type_prop/normalize.cpp @@ -24,8 +24,8 @@ using namespace ngraph; TEST(type_prop, normalize_axes_input_not_constant) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - auto axes = make_shared(element::u64, Shape{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + auto axes = make_shared(element::Type_t::u64, Shape{1}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -48,8 +48,9 @@ TEST(type_prop, normalize_axes_input_not_constant) TEST(type_prop, normalize_invalid_axes_rank) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{1, 2}, vector{1, 2}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = + make_shared(element::Type_t::i64, Shape{1, 2}, vector{1, 2}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; @@ -73,8 +74,9 @@ TEST(type_prop, normalize_invalid_axes_rank) TEST(type_prop, normalize_axes_out_of_bounds) { Shape data_shape{1, 2, 3, 4}; - auto data = make_shared(element::f32, data_shape); - const auto axes = make_shared(element::i64, Shape{2}, vector{3, 4}); + auto data = make_shared(element::Type_t::f32, data_shape); + const auto axes = + make_shared(element::Type_t::i64, Shape{2}, vector{3, 4}); float eps{1e-6f}; auto eps_mode = op::EpsMode::ADD; diff --git a/ngraph/test/type_prop/one_hot.cpp b/ngraph/test/type_prop/one_hot.cpp index 07a5f3bac16..8bd13bfa0d7 100644 --- a/ngraph/test/type_prop/one_hot.cpp +++ b/ngraph/test/type_prop/one_hot.cpp @@ -23,34 +23,34 @@ using namespace ngraph; TEST(type_prop, one_hot_v1_output_shape) { - auto indices = make_shared(element::i64, Shape{3}); - auto depth = op::Constant::create(element::i64, Shape{}, {2}); - auto on_value = op::Constant::create(element::u32, Shape{}, {5}); - auto off_value = op::Constant::create(element::u32, Shape{}, {10}); + auto indices = make_shared(element::Type_t::i64, Shape{3}); + auto depth = op::Constant::create(element::Type_t::i64, Shape{}, {2}); + auto on_value = op::Constant::create(element::Type_t::u32, Shape{}, {5}); + auto off_value = op::Constant::create(element::Type_t::u32, Shape{}, {10}); int64_t axis = -1; auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); - ASSERT_EQ(ont_hot->get_element_type(), element::u32); + ASSERT_EQ(ont_hot->get_element_type(), element::Type_t::u32); ASSERT_EQ(ont_hot->get_shape(), (Shape{3, 2})); } TEST(type_prop, one_hot_v1_output_shape_2) { - auto indices = make_shared(element::i64, Shape{1, 3, 2, 3}); - auto depth = op::Constant::create(element::i64, Shape{}, {4}); - auto on_value = op::Constant::create(element::f32, Shape{}, {1.0f}); - auto off_value = op::Constant::create(element::f32, Shape{}, {0.0f}); + auto indices = make_shared(element::Type_t::i64, Shape{1, 3, 2, 3}); + auto depth = op::Constant::create(element::Type_t::i64, Shape{}, {4}); + auto on_value = op::Constant::create(element::Type_t::f32, Shape{}, {1.0f}); + auto off_value = op::Constant::create(element::Type_t::f32, Shape{}, {0.0f}); int64_t axis = 3; auto ont_hot = make_shared(indices, depth, on_value, off_value, axis); - ASSERT_EQ(ont_hot->get_element_type(), element::f32); + ASSERT_EQ(ont_hot->get_element_type(), element::Type_t::f32); ASSERT_EQ(ont_hot->get_shape(), (Shape{1, 3, 2, 4, 3})); } TEST(type_prop, one_hot_v1_indices_elem_not_integral) { - auto indices = make_shared(element::f16, Shape{2, 2}); - auto depth = make_shared(element::i64, Shape{}); - auto on_value = make_shared(element::u32, Shape{}); - auto off_value = make_shared(element::u32, Shape{}); + auto indices = make_shared(element::Type_t::f16, Shape{2, 2}); + auto depth = make_shared(element::Type_t::i64, Shape{}); + auto on_value = make_shared(element::Type_t::u32, Shape{}); + auto off_value = make_shared(element::Type_t::u32, Shape{}); int64_t axis = -1; try { @@ -70,10 +70,10 @@ TEST(type_prop, one_hot_v1_indices_elem_not_integral) TEST(type_prop, one_hot_v1_depth_elem_not_integral) { - auto indices = make_shared(element::i64, Shape{2, 2}); - auto depth = make_shared(element::f16, Shape{}); - auto on_value = make_shared(element::u32, Shape{}); - auto off_value = make_shared(element::u32, Shape{}); + auto indices = make_shared(element::Type_t::i64, Shape{2, 2}); + auto depth = make_shared(element::Type_t::f16, Shape{}); + auto on_value = make_shared(element::Type_t::u32, Shape{}); + auto off_value = make_shared(element::Type_t::u32, Shape{}); int64_t axis = -1; try { @@ -93,10 +93,10 @@ TEST(type_prop, one_hot_v1_depth_elem_not_integral) TEST(type_prop, one_hot_v1_on_off_values_not_compatible) { - auto indices = make_shared(element::i64, Shape{2, 2}); - auto depth = make_shared(element::i64, Shape{}); - auto on_value = make_shared(element::bf16, Shape{}); - auto off_value = make_shared(element::f16, Shape{}); + auto indices = make_shared(element::Type_t::i64, Shape{2, 2}); + auto depth = make_shared(element::Type_t::i64, Shape{}); + auto on_value = make_shared(element::Type_t::bf16, Shape{}); + auto off_value = make_shared(element::Type_t::f16, Shape{}); int64_t axis = -1; try { @@ -118,10 +118,10 @@ TEST(type_prop, one_hot_v1_on_off_values_not_compatible) TEST(type_prop, one_hot_v1_depth_not_scalar) { - auto indices = make_shared(element::i64, Shape{2, 2}); - auto depth = make_shared(element::i64, Shape{1}); - auto on_value = make_shared(element::bf16, Shape{}); - auto off_value = make_shared(element::bf16, Shape{}); + auto indices = make_shared(element::Type_t::i64, Shape{2, 2}); + auto depth = make_shared(element::Type_t::i64, Shape{1}); + auto on_value = make_shared(element::Type_t::bf16, Shape{}); + auto off_value = make_shared(element::Type_t::bf16, Shape{}); int64_t axis = -1; try { @@ -141,10 +141,10 @@ TEST(type_prop, one_hot_v1_depth_not_scalar) TEST(type_prop, one_hot_v1_on_value_not_scalar) { - auto indices = make_shared(element::i64, Shape{2, 2}); - auto depth = make_shared(element::i64, Shape{}); - auto on_value = make_shared(element::bf16, Shape{2}); - auto off_value = make_shared(element::bf16, Shape{}); + auto indices = make_shared(element::Type_t::i64, Shape{2, 2}); + auto depth = make_shared(element::Type_t::i64, Shape{}); + auto on_value = make_shared(element::Type_t::bf16, Shape{2}); + auto off_value = make_shared(element::Type_t::bf16, Shape{}); int64_t axis = -1; try { @@ -164,10 +164,10 @@ TEST(type_prop, one_hot_v1_on_value_not_scalar) TEST(type_prop, one_hot_v1_off_value_not_scalar) { - auto indices = make_shared(element::i64, Shape{2, 2}); - auto depth = make_shared(element::i64, Shape{}); - auto on_value = make_shared(element::bf16, Shape{}); - auto off_value = make_shared(element::bf16, Shape{3}); + auto indices = make_shared(element::Type_t::i64, Shape{2, 2}); + auto depth = make_shared(element::Type_t::i64, Shape{}); + auto on_value = make_shared(element::Type_t::bf16, Shape{}); + auto off_value = make_shared(element::Type_t::bf16, Shape{3}); int64_t axis = -1; try { diff --git a/ngraph/test/type_prop/pad.cpp b/ngraph/test/type_prop/pad.cpp index c7a43737a17..106b2e43dad 100644 --- a/ngraph/test/type_prop/pad.cpp +++ b/ngraph/test/type_prop/pad.cpp @@ -25,10 +25,10 @@ using namespace ngraph; TEST(type_prop, pad_v1_arg_pad_value_type_mismatch) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{1}); - auto arg_pad_value = make_shared(element::f16, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{1}); + auto pads_end = make_shared(element::Type_t::i64, Shape{1}); + auto arg_pad_value = make_shared(element::Type_t::f16, Shape{1}); try { @@ -52,10 +52,10 @@ TEST(type_prop, pad_v1_arg_pad_value_type_mismatch) TEST(type_prop, pad_v1_arg_pad_value_shape_not_compatible) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{1}); - auto arg_pad_value = make_shared(element::f32, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{1}); + auto pads_end = make_shared(element::Type_t::i64, Shape{1}); + auto arg_pad_value = make_shared(element::Type_t::f32, Shape{1}); try { @@ -78,9 +78,9 @@ TEST(type_prop, pad_v1_arg_pad_value_shape_not_compatible) TEST(type_prop, pad_v1_pads_begin_shape_not_1D) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1, 2}); - auto pads_end = make_shared(element::i64, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{1, 2}); + auto pads_end = make_shared(element::Type_t::i64, Shape{1}); try { @@ -102,9 +102,9 @@ TEST(type_prop, pad_v1_pads_begin_shape_not_1D) TEST(type_prop, pad_v1_pads_end_shape_not_1D) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{1}); + auto pads_end = make_shared(element::Type_t::i64, Shape{1, 2}); try { @@ -125,9 +125,9 @@ TEST(type_prop, pad_v1_pads_end_shape_not_1D) TEST(type_prop, pad_v1_pads_begin_size_not_correct) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{4}); - auto pads_end = make_shared(element::i64, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{4}); + auto pads_end = make_shared(element::Type_t::i64, Shape{1}); try { @@ -150,10 +150,10 @@ TEST(type_prop, pad_v1_pads_begin_size_not_correct) TEST(type_prop, pad_v1_pads_end_size_not_correct) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{4}); - auto arg_pad_value = make_shared(element::f32, Shape{}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{1}); + auto pads_end = make_shared(element::Type_t::i64, Shape{4}); + auto arg_pad_value = make_shared(element::Type_t::f32, Shape{}); try { @@ -178,9 +178,9 @@ TEST(type_prop, pad_v1_pads_end_size_not_correct) TEST(type_prop, pad_v1_arg_pads_begin_incompatible_type) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::f32, Shape{1}); - auto pads_end = make_shared(element::i64, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::f32, Shape{1}); + auto pads_end = make_shared(element::Type_t::i64, Shape{1}); try { @@ -202,9 +202,9 @@ TEST(type_prop, pad_v1_arg_pads_begin_incompatible_type) TEST(type_prop, pad_v1_arg_pads_end_incompatible_type) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto pads_begin = make_shared(element::i64, Shape{1}); - auto pads_end = make_shared(element::f32, Shape{1}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto pads_begin = make_shared(element::Type_t::i64, Shape{1}); + auto pads_end = make_shared(element::Type_t::f32, Shape{1}); try { @@ -226,12 +226,12 @@ TEST(type_prop, pad_v1_arg_pads_end_incompatible_type) TEST(type_prop, pad_v1_deduce_too_small_for_edge) { - auto arg = make_shared(element::f32, Shape{1, 5, 0, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 5, 0, 2}); auto pads_begin = - make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); + make_shared(element::Type_t::i64, Shape{4}, std::vector{0, 1, 2, 3}); auto pads_end = - make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); - auto arg_pad_value = make_shared(element::f32, Shape{}); + make_shared(element::Type_t::i64, Shape{4}, std::vector{0, 1, 2, 3}); + auto arg_pad_value = make_shared(element::Type_t::f32, Shape{}); try { @@ -255,12 +255,12 @@ TEST(type_prop, pad_v1_deduce_too_small_for_edge) TEST(type_prop, pad_v1_deduce_too_small_for_reflect) { - auto arg = make_shared(element::f32, Shape{1, 5, 1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 5, 1, 2}); auto pads_begin = - make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); + make_shared(element::Type_t::i64, Shape{4}, std::vector{0, 1, 2, 3}); auto pads_end = - make_shared(element::i64, Shape{4}, std::vector{0, 1, 2, 3}); - auto arg_pad_value = make_shared(element::f32, Shape{}); + make_shared(element::Type_t::i64, Shape{4}, std::vector{0, 1, 2, 3}); + auto arg_pad_value = make_shared(element::Type_t::f32, Shape{}); try { diff --git a/ngraph/test/type_prop/parameter.cpp b/ngraph/test/type_prop/parameter.cpp index 6208dbb7f36..a78b2c49035 100644 --- a/ngraph/test/type_prop/parameter.cpp +++ b/ngraph/test/type_prop/parameter.cpp @@ -23,7 +23,7 @@ using namespace ngraph; TEST(type_prop, param_partial_rank_dynamic) { - auto a = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto& pshape = a->get_output_partial_shape(0); @@ -33,7 +33,8 @@ TEST(type_prop, param_partial_rank_dynamic) TEST(type_prop, param_partial_rank_static) { - auto a = make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3, 4}); + auto a = make_shared(element::Type_t::f32, + PartialShape{2, Dimension::dynamic(), 3, 4}); auto& pshape = a->get_output_partial_shape(0); diff --git a/ngraph/test/type_prop/prelu.cpp b/ngraph/test/type_prop/prelu.cpp index d4b95cbb4d6..27fb45b64d8 100644 --- a/ngraph/test/type_prop/prelu.cpp +++ b/ngraph/test/type_prop/prelu.cpp @@ -23,10 +23,10 @@ using namespace ngraph; TEST(type_prop, prelu) { - auto param = make_shared(element::f32, Shape{2, 4}); - auto slope = make_shared(element::f32, Shape{2}); + auto param = make_shared(element::Type_t::f32, Shape{2, 4}); + auto slope = make_shared(element::Type_t::f32, Shape{2}); Shape prelu_shape{2, 4}; auto prelu = make_shared(param, slope); - ASSERT_EQ(prelu->get_element_type(), element::f32); + ASSERT_EQ(prelu->get_element_type(), element::Type_t::f32); ASSERT_EQ(prelu->get_shape(), prelu_shape); } diff --git a/ngraph/test/type_prop/proposal.cpp b/ngraph/test/type_prop/proposal.cpp index 9b92b790bf6..10bc01b4bf1 100644 --- a/ngraph/test/type_prop/proposal.cpp +++ b/ngraph/test/type_prop/proposal.cpp @@ -27,9 +27,9 @@ using namespace ngraph; TEST(type_prop, proposal_v0_invalid_class_probs_rank) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{3}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::Type_t::f32, Shape{3}); try { @@ -52,9 +52,9 @@ TEST(type_prop, proposal_v0_invalid_class_probs_rank) TEST(type_prop, proposal_v0_invalid_class_bbox_deltas_rank) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3}); - auto image_shape = make_shared(element::f32, Shape{3}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto image_shape = make_shared(element::Type_t::f32, Shape{3}); try { @@ -78,9 +78,9 @@ TEST(type_prop, proposal_v0_invalid_class_bbox_deltas_rank) TEST(type_prop, proposal_v0_invalid_image_shape_rank) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{2, 1}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::Type_t::f32, Shape{2, 1}); try { @@ -103,9 +103,9 @@ TEST(type_prop, proposal_v0_invalid_image_shape_rank) TEST(type_prop, proposal_v0_invalid_image_shape_size) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{5}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::Type_t::f32, Shape{5}); try { @@ -135,10 +135,11 @@ TEST(type_prop, proposal_v0_shape_infer) attrs.post_nms_topn = 200; const size_t batch_size = 7; - auto class_probs = make_shared(element::f32, Shape{batch_size, 12, 34, 62}); + auto class_probs = + make_shared(element::Type_t::f32, Shape{batch_size, 12, 34, 62}); auto class_bbox_deltas = - make_shared(element::f32, Shape{batch_size, 24, 34, 62}); - auto image_shape = make_shared(element::f32, Shape{3}); + make_shared(element::Type_t::f32, Shape{batch_size, 24, 34, 62}); + auto image_shape = make_shared(element::Type_t::f32, Shape{3}); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); ASSERT_EQ(op->get_output_shape(0), (Shape{batch_size * attrs.post_nms_topn, 5})); } @@ -148,9 +149,9 @@ TEST(type_prop, proposal_v0_shape_infer) TEST(type_prop, proposal_v4_invalid_class_probs_rank) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{3}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::Type_t::f32, Shape{3}); try { @@ -173,9 +174,9 @@ TEST(type_prop, proposal_v4_invalid_class_probs_rank) TEST(type_prop, proposal_v4_invalid_class_bbox_deltas_rank) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3}); - auto image_shape = make_shared(element::f32, Shape{3}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto image_shape = make_shared(element::Type_t::f32, Shape{3}); try { @@ -199,9 +200,9 @@ TEST(type_prop, proposal_v4_invalid_class_bbox_deltas_rank) TEST(type_prop, proposal_v4_invalid_image_shape_rank) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{2, 1}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::Type_t::f32, Shape{2, 1}); try { @@ -224,9 +225,9 @@ TEST(type_prop, proposal_v4_invalid_image_shape_rank) TEST(type_prop, proposal_v4_invalid_image_shape_size) { op::ProposalAttrs attrs; - auto class_probs = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto class_bbox_deltas = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto image_shape = make_shared(element::f32, Shape{5}); + auto class_probs = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto class_bbox_deltas = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto image_shape = make_shared(element::Type_t::f32, Shape{5}); try { @@ -256,10 +257,11 @@ TEST(type_prop, proposal_v4_shape_infer) attrs.post_nms_topn = 200; const size_t batch_size = 7; - auto class_probs = make_shared(element::f32, Shape{batch_size, 12, 34, 62}); + auto class_probs = + make_shared(element::Type_t::f32, Shape{batch_size, 12, 34, 62}); auto class_bbox_deltas = - make_shared(element::f32, Shape{batch_size, 24, 34, 62}); - auto image_shape = make_shared(element::f32, Shape{3}); + make_shared(element::Type_t::f32, Shape{batch_size, 24, 34, 62}); + auto image_shape = make_shared(element::Type_t::f32, Shape{3}); auto op = make_shared(class_probs, class_bbox_deltas, image_shape, attrs); ASSERT_EQ(op->get_output_shape(0), (Shape{batch_size * attrs.post_nms_topn, 5})); ASSERT_EQ(op->get_output_shape(1), (Shape{batch_size * attrs.post_nms_topn})); diff --git a/ngraph/test/type_prop/quantize.cpp b/ngraph/test/type_prop/quantize.cpp index ee7cfbebd5d..4b8af66ce3f 100644 --- a/ngraph/test/type_prop/quantize.cpp +++ b/ngraph/test/type_prop/quantize.cpp @@ -28,8 +28,8 @@ TEST(type_prop, quantize_f32_to_i8_nchw_per_channel_ok) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{3}; Shape zero_point_shape{3}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -51,8 +51,8 @@ TEST(type_prop, quantize_f32_to_i8_nchw_per_image_ok) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{64}; Shape zero_point_shape{64}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -74,8 +74,8 @@ TEST(type_prop, quantize_f32_to_i8_nchw_per_row_ok) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{480}; Shape zero_point_shape{480}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -97,8 +97,8 @@ TEST(type_prop, quantize_f32_to_i8_nchw_per_image_channel_ok) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{64, 3}; Shape zero_point_shape{64, 3}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -120,8 +120,8 @@ TEST(type_prop, quantize_f32_to_i8_nchw_whole_batch_ok) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -143,8 +143,8 @@ TEST(type_prop, quantize_f64_to_i8_ok) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::f64; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f64; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -166,8 +166,8 @@ TEST(type_prop, quantize_f64_to_u8_ok) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::f64; - element::Type quantized_type = element::u8; + element::Type unquantized_type = element::Type_t::f64; + element::Type quantized_type = element::Type_t::u8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -189,8 +189,8 @@ TEST(type_prop, quantize_f64_to_dyn_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::f64; - element::Type quantized_type = element::dynamic; + element::Type unquantized_type = element::Type_t::f64; + element::Type quantized_type = element::Type_t::dynamic; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -222,8 +222,8 @@ TEST(type_prop, quantize_i8_to_u8_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::i8; - element::Type quantized_type = element::u8; + element::Type unquantized_type = element::Type_t::i8; + element::Type quantized_type = element::Type_t::u8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -256,8 +256,8 @@ TEST(type_prop, quantize_f32_to_f32_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::f32; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::f32; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -289,10 +289,10 @@ TEST(type_prop, quantize_batch_scale_type_mismatch_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; - element::Type scale_type = element::f64; + element::Type scale_type = element::Type_t::f64; element::Type zero_point_type = quantized_type; AxisSet axes{}; auto round_mode = op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_INFINITY; @@ -323,11 +323,11 @@ TEST(type_prop, quantize_zero_point_type_mismatch_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{}; Shape zero_point_shape{}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; - element::Type zero_point_type = element::u8; + element::Type zero_point_type = element::Type_t::u8; AxisSet axes{}; auto round_mode = op::Quantize::RoundMode::ROUND_NEAREST_TOWARD_INFINITY; @@ -357,8 +357,8 @@ TEST(type_prop, quantize_oob_axis_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{320}; Shape zero_point_shape{320}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -391,8 +391,8 @@ TEST(type_prop, quantize_scale_shape_mismatch_same_rank_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{64, 4}; Shape zero_point_shape{64, 3}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -425,8 +425,8 @@ TEST(type_prop, quantize_scale_shape_mismatch_different_rank_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{64, 3, 2}; Shape zero_point_shape{64, 3}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -459,8 +459,8 @@ TEST(type_prop, quantize_zero_point_shape_mismatch_same_rank_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{64, 3}; Shape zero_point_shape{64, 4}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -493,8 +493,8 @@ TEST(type_prop, quantize_zero_point_shape_mismatch_different_rank_fails) Shape batch_shape{64, 3, 480, 640}; Shape scale_shape{64, 3}; Shape zero_point_shape{64, 3, 2}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -527,8 +527,8 @@ TEST(type_prop, quantize_partial_all_rank_dynamic_ok) PartialShape batch_shape{PartialShape::dynamic()}; PartialShape scale_shape{PartialShape::dynamic()}; PartialShape zero_point_shape{PartialShape::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -551,8 +551,8 @@ TEST(type_prop, PartialShape batch_shape{PartialShape::dynamic()}; PartialShape scale_shape{64, Dimension::dynamic(), 96}; PartialShape zero_point_shape{PartialShape::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -576,8 +576,8 @@ TEST( PartialShape batch_shape{PartialShape::dynamic()}; PartialShape scale_shape{64, Dimension::dynamic(), 96}; PartialShape zero_point_shape{PartialShape::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -613,8 +613,8 @@ TEST( PartialShape batch_shape{PartialShape::dynamic()}; PartialShape scale_shape{64, Dimension::dynamic(), 96, Dimension::dynamic()}; PartialShape zero_point_shape{64, 22, Dimension::dynamic(), Dimension::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -638,8 +638,8 @@ TEST( PartialShape batch_shape{PartialShape::dynamic()}; PartialShape scale_shape{64, Dimension::dynamic(), 96, Dimension::dynamic()}; PartialShape zero_point_shape{64, 22, Dimension::dynamic(), Dimension::dynamic(), 3}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -675,8 +675,8 @@ TEST( PartialShape batch_shape{PartialShape::dynamic()}; PartialShape scale_shape{64, Dimension::dynamic(), 96, Dimension::dynamic()}; PartialShape zero_point_shape{65, 22, Dimension::dynamic(), Dimension::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -712,8 +712,8 @@ TEST( PartialShape batch_shape{2, 4, 6, Dimension::dynamic(), 10, Dimension::dynamic()}; PartialShape scale_shape{4, Dimension::dynamic(), Dimension::dynamic()}; PartialShape zero_point_shape{Dimension::dynamic(), 8, Dimension::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -738,8 +738,8 @@ TEST( PartialShape batch_shape{2, 4, 6, Dimension::dynamic(), 10, Dimension::dynamic()}; PartialShape scale_shape{4, Dimension::dynamic(), Dimension::dynamic()}; PartialShape zero_point_shape{Dimension::dynamic(), 8, Dimension::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; @@ -774,8 +774,8 @@ TEST( PartialShape batch_shape{2, 5, 6, Dimension::dynamic(), 10, Dimension::dynamic()}; PartialShape scale_shape{4, Dimension::dynamic(), Dimension::dynamic()}; PartialShape zero_point_shape{Dimension::dynamic(), 8, Dimension::dynamic()}; - element::Type unquantized_type = element::f32; - element::Type quantized_type = element::i8; + element::Type unquantized_type = element::Type_t::f32; + element::Type quantized_type = element::Type_t::i8; element::Type batch_type = unquantized_type; element::Type scale_type = unquantized_type; element::Type zero_point_type = quantized_type; diff --git a/ngraph/test/type_prop/range.cpp b/ngraph/test/type_prop/range.cpp index 5fcfc8e3368..ec3f9b08ffd 100644 --- a/ngraph/test/type_prop/range.cpp +++ b/ngraph/test/type_prop/range.cpp @@ -23,57 +23,57 @@ using namespace ngraph; TEST(type_prop, range_nonconst_ok) { - auto start = make_shared(element::i32, Shape{}); - auto stop = make_shared(element::i32, Shape{}); - auto step = make_shared(element::i32, Shape{}); + auto start = make_shared(element::Type_t::i32, Shape{}); + auto stop = make_shared(element::Type_t::i32, Shape{}); + auto step = make_shared(element::Type_t::i32, Shape{}); auto range = make_shared(start, stop, step); - EXPECT_EQ(range->get_element_type(), element::i32); + EXPECT_EQ(range->get_element_type(), element::Type_t::i32); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, range_nonconst_some_dyn_et_ok) { - auto start = make_shared(element::i32, Shape{}); - auto stop = make_shared(element::dynamic, Shape{}); - auto step = make_shared(element::i32, Shape{}); + auto start = make_shared(element::Type_t::i32, Shape{}); + auto stop = make_shared(element::Type_t::dynamic, Shape{}); + auto step = make_shared(element::Type_t::i32, Shape{}); auto range = make_shared(start, stop, step); - EXPECT_EQ(range->get_element_type(), element::i32); + EXPECT_EQ(range->get_element_type(), element::Type_t::i32); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, range_nonconst_all_dyn_et_ok) { - auto start = make_shared(element::dynamic, Shape{}); - auto stop = make_shared(element::dynamic, Shape{}); - auto step = make_shared(element::dynamic, Shape{}); + auto start = make_shared(element::Type_t::dynamic, Shape{}); + auto stop = make_shared(element::Type_t::dynamic, Shape{}); + auto step = make_shared(element::Type_t::dynamic, Shape{}); auto range = make_shared(start, stop, step); - EXPECT_EQ(range->get_element_type(), element::dynamic); + EXPECT_EQ(range->get_element_type(), element::Type_t::dynamic); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, range_nonconst_f32_ok) { - auto start = make_shared(element::dynamic, Shape{}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::dynamic, Shape{}); + auto start = make_shared(element::Type_t::dynamic, Shape{}); + auto stop = make_shared(element::Type_t::f32, Shape{}); + auto step = make_shared(element::Type_t::dynamic, Shape{}); auto range = make_shared(start, stop, step); - EXPECT_EQ(range->get_element_type(), element::f32); + EXPECT_EQ(range->get_element_type(), element::Type_t::f32); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, range_nonconst_boolean_fails) { - auto start = make_shared(element::dynamic, Shape{}); - auto stop = make_shared(element::boolean, Shape{}); - auto step = make_shared(element::dynamic, Shape{}); + auto start = make_shared(element::Type_t::dynamic, Shape{}); + auto stop = make_shared(element::Type_t::boolean, Shape{}); + auto step = make_shared(element::Type_t::dynamic, Shape{}); try { @@ -93,21 +93,21 @@ TEST(type_prop, range_nonconst_boolean_fails) TEST(type_prop, range_some_const_ok) { - auto start = make_shared(element::i32, Shape{}, std::vector{3}); - auto stop = make_shared(element::i32, Shape{}); - auto step = make_shared(element::i32, Shape{}, std::vector{2}); + auto start = make_shared(element::Type_t::i32, Shape{}, std::vector{3}); + auto stop = make_shared(element::Type_t::i32, Shape{}); + auto step = make_shared(element::Type_t::i32, Shape{}, std::vector{2}); auto range = make_shared(start, stop, step); - EXPECT_EQ(range->get_element_type(), element::i32); + EXPECT_EQ(range->get_element_type(), element::Type_t::i32); EXPECT_TRUE(range->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, range_some_const_zero_stride_fails) { - auto start = make_shared(element::i32, Shape{}, std::vector{3}); - auto stop = make_shared(element::i32, Shape{}); - auto step = make_shared(element::i32, Shape{}, std::vector{0}); + auto start = make_shared(element::Type_t::i32, Shape{}, std::vector{3}); + auto stop = make_shared(element::Type_t::i32, Shape{}); + auto step = make_shared(element::Type_t::i32, Shape{}, std::vector{0}); try { @@ -127,9 +127,9 @@ TEST(type_prop, range_some_const_zero_stride_fails) TEST(type_prop, range_some_const_plus_inf_start_fails) { auto start = make_shared( - element::f32, Shape{}, std::vector{std::numeric_limits::infinity()}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + element::Type_t::f32, Shape{}, std::vector{std::numeric_limits::infinity()}); + auto stop = make_shared(element::Type_t::f32, Shape{}); + auto step = make_shared(element::Type_t::f32, Shape{}, std::vector{1}); try { @@ -149,9 +149,9 @@ TEST(type_prop, range_some_const_plus_inf_start_fails) TEST(type_prop, range_some_const_minus_inf_start_fails) { auto start = make_shared( - element::f32, Shape{}, std::vector{-std::numeric_limits::infinity()}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + element::Type_t::f32, Shape{}, std::vector{-std::numeric_limits::infinity()}); + auto stop = make_shared(element::Type_t::f32, Shape{}); + auto step = make_shared(element::Type_t::f32, Shape{}, std::vector{1}); try { @@ -171,9 +171,9 @@ TEST(type_prop, range_some_const_minus_inf_start_fails) TEST(type_prop, range_some_const_nan_start_fails) { auto start = - make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + make_shared(element::Type_t::f32, Shape{}, std::vector{std::nanf("")}); + auto stop = make_shared(element::Type_t::f32, Shape{}); + auto step = make_shared(element::Type_t::f32, Shape{}, std::vector{1}); try { @@ -192,10 +192,10 @@ TEST(type_prop, range_some_const_nan_start_fails) TEST(type_prop, range_some_const_plus_inf_stop_fails) { - auto start = make_shared(element::f32, Shape{}); + auto start = make_shared(element::Type_t::f32, Shape{}); auto stop = make_shared( - element::f32, Shape{}, std::vector{std::numeric_limits::infinity()}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + element::Type_t::f32, Shape{}, std::vector{std::numeric_limits::infinity()}); + auto step = make_shared(element::Type_t::f32, Shape{}, std::vector{1}); try { @@ -214,10 +214,10 @@ TEST(type_prop, range_some_const_plus_inf_stop_fails) TEST(type_prop, range_some_const_minus_inf_stop_fails) { - auto start = make_shared(element::f32, Shape{}); + auto start = make_shared(element::Type_t::f32, Shape{}); auto stop = make_shared( - element::f32, Shape{}, std::vector{-std::numeric_limits::infinity()}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + element::Type_t::f32, Shape{}, std::vector{-std::numeric_limits::infinity()}); + auto step = make_shared(element::Type_t::f32, Shape{}, std::vector{1}); try { @@ -236,9 +236,10 @@ TEST(type_prop, range_some_const_minus_inf_stop_fails) TEST(type_prop, range_some_const_nan_stio_fails) { - auto start = make_shared(element::f32, Shape{}); - auto stop = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); - auto step = make_shared(element::f32, Shape{}, std::vector{1}); + auto start = make_shared(element::Type_t::f32, Shape{}); + auto stop = + make_shared(element::Type_t::f32, Shape{}, std::vector{std::nanf("")}); + auto step = make_shared(element::Type_t::f32, Shape{}, std::vector{1}); try { @@ -257,10 +258,10 @@ TEST(type_prop, range_some_const_nan_stio_fails) TEST(type_prop, range_some_const_plus_inf_stride_fails) { - auto start = make_shared(element::f32, Shape{}, std::vector{3}); - auto stop = make_shared(element::f32, Shape{}); + auto start = make_shared(element::Type_t::f32, Shape{}, std::vector{3}); + auto stop = make_shared(element::Type_t::f32, Shape{}); auto step = make_shared( - element::f32, Shape{}, std::vector{std::numeric_limits::infinity()}); + element::Type_t::f32, Shape{}, std::vector{std::numeric_limits::infinity()}); try { @@ -279,10 +280,10 @@ TEST(type_prop, range_some_const_plus_inf_stride_fails) TEST(type_prop, range_some_const_minus_inf_stride_fails) { - auto start = make_shared(element::f32, Shape{}, std::vector{3}); - auto stop = make_shared(element::f32, Shape{}); + auto start = make_shared(element::Type_t::f32, Shape{}, std::vector{3}); + auto stop = make_shared(element::Type_t::f32, Shape{}); auto step = make_shared( - element::f32, Shape{}, std::vector{-std::numeric_limits::infinity()}); + element::Type_t::f32, Shape{}, std::vector{-std::numeric_limits::infinity()}); try { @@ -301,9 +302,10 @@ TEST(type_prop, range_some_const_minus_inf_stride_fails) TEST(type_prop, range_some_const_nan_stride_fails) { - auto start = make_shared(element::f32, Shape{}, std::vector{3}); - auto stop = make_shared(element::f32, Shape{}); - auto step = make_shared(element::f32, Shape{}, std::vector{std::nanf("")}); + auto start = make_shared(element::Type_t::f32, Shape{}, std::vector{3}); + auto stop = make_shared(element::Type_t::f32, Shape{}); + auto step = + make_shared(element::Type_t::f32, Shape{}, std::vector{std::nanf("")}); try { @@ -322,9 +324,9 @@ TEST(type_prop, range_some_const_nan_stride_fails) TEST(type_prop, range_all_const_zero_stride_fails) { - auto start = make_shared(element::i32, Shape{}, std::vector{3}); - auto stop = make_shared(element::i32, Shape{}, std::vector{5}); - auto step = make_shared(element::i32, Shape{}, std::vector{0}); + auto start = make_shared(element::Type_t::i32, Shape{}, std::vector{3}); + auto stop = make_shared(element::Type_t::i32, Shape{}, std::vector{5}); + auto step = make_shared(element::Type_t::i32, Shape{}, std::vector{0}); try { @@ -371,62 +373,62 @@ struct RangeTest : ::testing::TestWithParam TEST_P(RangeTest, deduce_shape_i8) { - run_range_test(element::i8, GetParam()); + run_range_test(element::Type_t::i8, GetParam()); } TEST_P(RangeTest, deduce_shape_i16) { - run_range_test(element::i16, GetParam()); + run_range_test(element::Type_t::i16, GetParam()); } TEST_P(RangeTest, deduce_shape_i32) { - run_range_test(element::i32, GetParam()); + run_range_test(element::Type_t::i32, GetParam()); } TEST_P(RangeTest, deduce_shape_i64) { - run_range_test(element::i64, GetParam()); + run_range_test(element::Type_t::i64, GetParam()); } TEST_P(RangeTest, deduce_shape_u8) { - run_range_test(element::u8, GetParam()); + run_range_test(element::Type_t::u8, GetParam()); } TEST_P(RangeTest, deduce_shape_u16) { - run_range_test(element::u16, GetParam()); + run_range_test(element::Type_t::u16, GetParam()); } TEST_P(RangeTest, deduce_shape_u32) { - run_range_test(element::u32, GetParam()); + run_range_test(element::Type_t::u32, GetParam()); } TEST_P(RangeTest, deduce_shape_u64) { - run_range_test(element::u64, GetParam()); + run_range_test(element::Type_t::u64, GetParam()); } TEST_P(RangeTest, deduce_shape_bf16) { - run_range_test(element::bf16, GetParam()); + run_range_test(element::Type_t::bf16, GetParam()); } TEST_P(RangeTest, deduce_shape_f16) { - run_range_test(element::f16, GetParam()); + run_range_test(element::Type_t::f16, GetParam()); } TEST_P(RangeTest, deduce_shape_f32) { - run_range_test(element::f32, GetParam()); + run_range_test(element::Type_t::f32, GetParam()); } TEST_P(RangeTest, deduce_shape_f64) { - run_range_test(element::f64, GetParam()); + run_range_test(element::Type_t::f64, GetParam()); } INSTANTIATE_TEST_CASE_P(type_prop, @@ -445,42 +447,42 @@ struct RangeTestWithNegatives : ::testing::TestWithParam TEST_P(RangeTestWithNegatives, deduce_shape_i8) { - run_range_test(element::i8, GetParam()); + run_range_test(element::Type_t::i8, GetParam()); } TEST_P(RangeTestWithNegatives, deduce_shape_i16) { - run_range_test(element::i16, GetParam()); + run_range_test(element::Type_t::i16, GetParam()); } TEST_P(RangeTestWithNegatives, deduce_shape_i32) { - run_range_test(element::i32, GetParam()); + run_range_test(element::Type_t::i32, GetParam()); } TEST_P(RangeTestWithNegatives, deduce_shape_i64) { - run_range_test(element::i64, GetParam()); + run_range_test(element::Type_t::i64, GetParam()); } TEST_P(RangeTestWithNegatives, deduce_shape_bf16) { - run_range_test(element::bf16, GetParam()); + run_range_test(element::Type_t::bf16, GetParam()); } TEST_P(RangeTestWithNegatives, deduce_shape_f16) { - run_range_test(element::f16, GetParam()); + run_range_test(element::Type_t::f16, GetParam()); } TEST_P(RangeTestWithNegatives, deduce_shape_f32) { - run_range_test(element::f32, GetParam()); + run_range_test(element::Type_t::f32, GetParam()); } TEST_P(RangeTestWithNegatives, deduce_shape_f64) { - run_range_test(element::f64, GetParam()); + run_range_test(element::Type_t::f64, GetParam()); } INSTANTIATE_TEST_CASE_P(type_prop, @@ -498,22 +500,22 @@ struct RangeTestFloating : ::testing::TestWithParam TEST_P(RangeTestFloating, deduce_shape_bf16) { - run_range_test(element::bf16, GetParam()); + run_range_test(element::Type_t::bf16, GetParam()); } TEST_P(RangeTestFloating, deduce_shape_f16) { - run_range_test(element::f16, GetParam()); + run_range_test(element::Type_t::f16, GetParam()); } TEST_P(RangeTestFloating, deduce_shape_f32) { - run_range_test(element::f32, GetParam()); + run_range_test(element::Type_t::f32, GetParam()); } TEST_P(RangeTestFloating, deduce_shape_f64) { - run_range_test(element::f64, GetParam()); + run_range_test(element::Type_t::f64, GetParam()); } INSTANTIATE_TEST_CASE_P(type_prop, diff --git a/ngraph/test/type_prop/read_value.cpp b/ngraph/test/type_prop/read_value.cpp index 793ad539285..b096ddb4e43 100644 --- a/ngraph/test/type_prop/read_value.cpp +++ b/ngraph/test/type_prop/read_value.cpp @@ -23,9 +23,9 @@ using namespace ngraph; TEST(type_prop, read_value_deduce) { - auto input = make_shared(element::f32, Shape{1, 2, 64, 64}); + auto input = make_shared(element::Type_t::f32, Shape{1, 2, 64, 64}); auto read_value = make_shared(input, "variable_id"); - ASSERT_EQ(read_value->get_element_type(), element::f32); + ASSERT_EQ(read_value->get_element_type(), element::Type_t::f32); ASSERT_EQ(read_value->get_shape(), (Shape{1, 2, 64, 64})); } diff --git a/ngraph/test/type_prop/reduce_l1.cpp b/ngraph/test/type_prop/reduce_l1.cpp index 1b165f5cd91..6d2812990e4 100644 --- a/ngraph/test/type_prop/reduce_l1.cpp +++ b/ngraph/test/type_prop/reduce_l1.cpp @@ -23,8 +23,8 @@ using namespace ngraph; TEST(type_prop, reduce_l1_v4_axis_out_of_range) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{2, 3}); try { auto reduce_sum = make_shared(arg, axes); @@ -43,8 +43,8 @@ TEST(type_prop, reduce_l1_v4_axis_out_of_range) TEST(type_prop, reduce_l1_v4_shape_if_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = true; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1})); @@ -52,8 +52,8 @@ TEST(type_prop, reduce_l1_v4_shape_if_keep_dims) TEST(type_prop, reduce_l1_v4_shape_if_not_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = false; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3})); diff --git a/ngraph/test/type_prop/reduce_l2.cpp b/ngraph/test/type_prop/reduce_l2.cpp index e8f41281746..546938a2eda 100644 --- a/ngraph/test/type_prop/reduce_l2.cpp +++ b/ngraph/test/type_prop/reduce_l2.cpp @@ -23,8 +23,8 @@ using namespace ngraph; TEST(type_prop, reduce_l2_v4_axis_out_of_range) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{2, 3}); try { auto reduce_sum = make_shared(arg, axes); @@ -43,8 +43,8 @@ TEST(type_prop, reduce_l2_v4_axis_out_of_range) TEST(type_prop, reduce_l2_v4_shape_if_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = true; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1})); @@ -52,8 +52,8 @@ TEST(type_prop, reduce_l2_v4_shape_if_keep_dims) TEST(type_prop, reduce_l2_v4_shape_if_not_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = false; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3})); diff --git a/ngraph/test/type_prop/reduce_prod.cpp b/ngraph/test/type_prop/reduce_prod.cpp index 1242a9fee1c..f8fcb2b36da 100644 --- a/ngraph/test/type_prop/reduce_prod.cpp +++ b/ngraph/test/type_prop/reduce_prod.cpp @@ -23,8 +23,8 @@ using namespace ngraph; TEST(type_prop, reduce_prod_v1_axis_out_of_range) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{2, 3}); try { auto reduce_prod = make_shared(arg, axes); @@ -44,8 +44,8 @@ TEST(type_prop, reduce_prod_v1_axis_out_of_range) TEST(type_prop, reduce_prod_v1_shape_if_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = true; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1})); @@ -53,8 +53,8 @@ TEST(type_prop, reduce_prod_v1_shape_if_keep_dims) TEST(type_prop, reduce_prod_v1_shape_if_not_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = false; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3})); diff --git a/ngraph/test/type_prop/reduce_sum.cpp b/ngraph/test/type_prop/reduce_sum.cpp index 4b915a937d7..90e50aeec7e 100644 --- a/ngraph/test/type_prop/reduce_sum.cpp +++ b/ngraph/test/type_prop/reduce_sum.cpp @@ -23,8 +23,8 @@ using namespace ngraph; TEST(type_prop, reduce_sum_v1_axis_out_of_range) { - auto arg = make_shared(element::f32, Shape{1, 2, 3}); - auto axes = make_shared(element::i64, Shape{2}, vector{2, 3}); + auto arg = make_shared(element::Type_t::f32, Shape{1, 2, 3}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{2, 3}); try { auto reduce_sum = make_shared(arg, axes); @@ -44,8 +44,8 @@ TEST(type_prop, reduce_sum_v1_axis_out_of_range) TEST(type_prop, reduce_sum_v1_shape_if_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = true; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3, 1, 1})); @@ -53,8 +53,8 @@ TEST(type_prop, reduce_sum_v1_shape_if_keep_dims) TEST(type_prop, reduce_sum_v1_shape_if_not_keep_dims) { - auto arg = make_shared(element::f32, Shape{3, 4, 5}); - auto axes = make_shared(element::i64, Shape{2}, vector{1, 2}); + auto arg = make_shared(element::Type_t::f32, Shape{3, 4, 5}); + auto axes = make_shared(element::Type_t::i64, Shape{2}, vector{1, 2}); auto keep_dims = false; auto reduce_prod = make_shared(arg, axes, keep_dims); ASSERT_TRUE(reduce_prod->get_output_partial_shape(0).compatible(PartialShape{3})); diff --git a/ngraph/test/type_prop/reorg_yolo.cpp b/ngraph/test/type_prop/reorg_yolo.cpp index c132d1fc9ed..e63f0cb5ffa 100644 --- a/ngraph/test/type_prop/reorg_yolo.cpp +++ b/ngraph/test/type_prop/reorg_yolo.cpp @@ -25,7 +25,7 @@ TEST(type_prop, reorg_yolo_stride_2) { const auto in_shape = Shape{1, 64, 26, 26}; size_t stride = 2; - auto data_param = make_shared(element::f32, in_shape); + auto data_param = make_shared(element::Type_t::f32, in_shape); auto reorg_yolo = make_shared(data_param, stride); // in_shape [N,C,H,W] -> out_shape [N, C*stride*stride, H/stride, W/stride] @@ -38,7 +38,7 @@ TEST(type_prop, reorg_yolo_stride_2_batch_2) { const auto in_shape = Shape{2, 64, 26, 26}; size_t stride = 2; - auto data_param = make_shared(element::f32, in_shape); + auto data_param = make_shared(element::Type_t::f32, in_shape); auto reorg_yolo = make_shared(data_param, stride); // in_shape [N,C,H,W] -> out_shape [N, C*stride*stride, H/stride, W/stride] @@ -51,7 +51,7 @@ TEST(type_prop, reorg_yolo_stride_2_smaller_H) { const auto in_shape = Shape{1, 24, 34, 62}; size_t stride = 2; - auto data_param = make_shared(element::f32, in_shape); + auto data_param = make_shared(element::Type_t::f32, in_shape); auto reorg_yolo = make_shared(data_param, stride); // in_shape [N,C,H,W] -> out_shape [N, C*stride*stride, H/stride, W/stride] @@ -63,7 +63,7 @@ TEST(type_prop, reorg_yolo_stride_3) { const auto in_shape = Shape{1, 9, 3, 3}; size_t stride = 3; - auto data_param = make_shared(element::f32, in_shape); + auto data_param = make_shared(element::Type_t::f32, in_shape); auto reorg_yolo = make_shared(data_param, stride); // in_shape [N,C,H,W] -> out_shape [N, C*stride*stride, H/stride, W/stride] @@ -77,7 +77,7 @@ TEST(type_prop, reorg_yolo_catch_small_shape_stride) { const auto in_shape = Shape{1, 1, 4, 4}; size_t stride = 2; - auto data_param = make_shared(element::f32, in_shape); + auto data_param = make_shared(element::Type_t::f32, in_shape); try { // Throw error test: For [N, C, H, W] input shape, C >= (stride*stride) is required. diff --git a/ngraph/test/type_prop/reshape.cpp b/ngraph/test/type_prop/reshape.cpp index 0d2f73b60bf..171182b1d10 100644 --- a/ngraph/test/type_prop/reshape.cpp +++ b/ngraph/test/type_prop/reshape.cpp @@ -23,83 +23,83 @@ using namespace ngraph; TEST(type_prop, reshape_deduce_s2v) { - auto param = make_shared(element::f32, Shape{}); + auto param = make_shared(element::Type_t::f32, Shape{}); auto r = make_shared( - param, op::Constant::create(element::u64, {1}, Shape{1}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {1}, Shape{1}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{1})); } TEST(type_prop, reshape_deduce_s2m) { - auto param = make_shared(element::f32, Shape{}); + auto param = make_shared(element::Type_t::f32, Shape{}); auto r = make_shared( - param, op::Constant::create(element::u64, {2}, Shape{1, 1}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {2}, Shape{1, 1}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{1, 1})); } TEST(type_prop, reshape_deduce_s2t) { - auto param = make_shared(element::f32, Shape{}); + auto param = make_shared(element::Type_t::f32, Shape{}); auto r = make_shared( - param, op::Constant::create(element::u64, {3}, Shape{1, 1, 1}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {3}, Shape{1, 1, 1}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{1, 1, 1})); } TEST(type_prop, reshape_deduce_m2v_01) { - auto param = make_shared(element::f32, Shape{3, 4}); + auto param = make_shared(element::Type_t::f32, Shape{3, 4}); auto r = make_shared( - param, op::Constant::create(element::u64, {1}, Shape{12}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {1}, Shape{12}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{12})); } TEST(type_prop, reshape_deduce_m2v_10) { - auto param = make_shared(element::f32, Shape{3, 4}); + auto param = make_shared(element::Type_t::f32, Shape{3, 4}); auto r = make_shared( - param, op::Constant::create(element::u64, {1}, Shape{12}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {1}, Shape{12}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{12})); } TEST(type_prop, reshape_deduce_t2v_012) { - auto param = make_shared(element::f32, Shape{3, 4, 5}); + auto param = make_shared(element::Type_t::f32, Shape{3, 4, 5}); auto r = make_shared( - param, op::Constant::create(element::u64, {1}, Shape{60}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {1}, Shape{60}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{60})); } TEST(type_prop, reshape_deduce_t2v_120) { - auto param = make_shared(element::f32, Shape{3, 4, 5}); + auto param = make_shared(element::Type_t::f32, Shape{3, 4, 5}); auto r = make_shared( - param, op::Constant::create(element::u64, {1}, Shape{60}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {1}, Shape{60}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{60})); } TEST(type_prop, reshape_deduce_zero_special) { - auto param = make_shared(element::f32, Shape{3, 4, 5}); + auto param = make_shared(element::Type_t::f32, Shape{3, 4, 5}); auto r = make_shared( - param, op::Constant::create(element::u64, {3}, Shape{6, 2, 0}), true); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {3}, Shape{6, 2, 0}), true); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_EQ(r->get_shape(), (Shape{6, 2, 5})); } TEST(type_prop, reshape_deduce_wrong_output_shape) { - auto param = make_shared(element::f32, Shape{3, 4, 5}); + auto param = make_shared(element::Type_t::f32, Shape{3, 4, 5}); try { auto r = make_shared( - param, op::Constant::create(element::u64, {3}, Shape{3, 3, 3}), false); + param, op::Constant::create(element::Type_t::u64, {3}, Shape{3, 3, 3}), false); // Should have thrown, so fail if it didn't FAIL() << "No exception was thrown"; } @@ -120,10 +120,10 @@ TEST(type_prop, reshape_deduce_wrong_output_shape) // TEST(type_prop, reshape_partial_rank_dynamic) { - auto param = make_shared(element::f32, PartialShape::dynamic()); + auto param = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto r = make_shared( - param, op::Constant::create(element::u64, {4}, Shape{3, 1, 8, 2}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {4}, Shape{3, 1, 8, 2}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_TRUE(r->get_output_partial_shape(0).is_static()); ASSERT_EQ(r->get_shape(), (Shape{3, 1, 8, 2})); } @@ -135,10 +135,10 @@ TEST(type_prop, reshape_partial_rank_static) { auto param_shape = PartialShape{Dimension::dynamic(), 6, Dimension::dynamic(), Dimension::dynamic()}; - auto param = make_shared(element::f32, param_shape); + auto param = make_shared(element::Type_t::f32, param_shape); auto r = make_shared( - param, op::Constant::create(element::u64, {4}, Shape{3, 1, 8, 2}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {4}, Shape{3, 1, 8, 2}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_TRUE(r->get_output_partial_shape(0).is_static()); ASSERT_EQ(r->get_shape(), (Shape{3, 1, 8, 2})); } @@ -151,10 +151,10 @@ TEST(type_prop, reshape_partial_rank_static_dynamic_but_zero_ok) { auto param_shape = PartialShape{Dimension::dynamic(), 0, Dimension::dynamic(), Dimension::dynamic()}; - auto param = make_shared(element::f32, PartialShape::dynamic()); + auto param = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto r = make_shared( - param, op::Constant::create(element::u64, {4}, Shape{3, 1, 0, 2}), false); - ASSERT_EQ(r->get_element_type(), element::f32); + param, op::Constant::create(element::Type_t::u64, {4}, Shape{3, 1, 0, 2}), false); + ASSERT_EQ(r->get_element_type(), element::Type_t::f32); ASSERT_TRUE(r->get_output_partial_shape(0).is_static()); ASSERT_EQ(r->get_shape(), (Shape{3, 1, 0, 2})); } diff --git a/ngraph/test/type_prop/reverse.cpp b/ngraph/test/type_prop/reverse.cpp index 6a77fe367b8..ce58bc94335 100644 --- a/ngraph/test/type_prop/reverse.cpp +++ b/ngraph/test/type_prop/reverse.cpp @@ -26,133 +26,140 @@ using namespace ngraph; TEST(type_prop, reverse_1d_deduce) { // Deduce type - auto param = make_shared(element::f32, Shape{5}); + auto param = make_shared(element::Type_t::f32, Shape{5}); auto rev = make_shared( - param, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + param, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5})); } TEST(type_prop, reverse_2d_deduce_0) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6}); + auto param = make_shared(element::Type_t::f32, Shape{5, 6}); auto rev = make_shared( - param, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + param, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6})); } TEST(type_prop, reverse_2d_deduce_1) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6}); + auto param = make_shared(element::Type_t::f32, Shape{5, 6}); auto rev = make_shared( - param, op::Constant::create(element::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX); + param, op::Constant::create(element::Type_t::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6})); } TEST(type_prop, reverse_2d_deduce_01) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6}); - auto rev = make_shared( - param, op::Constant::create(element::i64, {2}, {0, 1}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::Type_t::f32, Shape{5, 6}); + auto rev = make_shared(param, + op::Constant::create(element::Type_t::i64, {2}, {0, 1}), + op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6})); } TEST(type_prop, reverse_3d_deduce_0) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); auto rev = make_shared( - param, op::Constant::create(element::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); + param, op::Constant::create(element::Type_t::i64, {1}, {0}), op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); } TEST(type_prop, reverse_3d_deduce_1) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); auto rev = make_shared( - param, op::Constant::create(element::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX); + param, op::Constant::create(element::Type_t::i64, {1}, {1}), op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); } TEST(type_prop, reverse_3d_deduce_2) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); auto rev = make_shared( - param, op::Constant::create(element::i64, {1}, {2}), op::v1::Reverse::Mode::INDEX); + param, op::Constant::create(element::Type_t::i64, {1}, {2}), op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); } TEST(type_prop, reverse_3d_deduce_01) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); - auto rev = make_shared( - param, op::Constant::create(element::i64, {2}, {0, 1}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); + auto rev = make_shared(param, + op::Constant::create(element::Type_t::i64, {2}, {0, 1}), + op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); } TEST(type_prop, reverse_3d_deduce_02) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); - auto rev = make_shared( - param, op::Constant::create(element::i64, {2}, {0, 2}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); + auto rev = make_shared(param, + op::Constant::create(element::Type_t::i64, {2}, {0, 2}), + op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); } TEST(type_prop, reverse_3d_deduce_12) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); - auto rev = make_shared( - param, op::Constant::create(element::i64, {2}, {1, 2}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); + auto rev = make_shared(param, + op::Constant::create(element::Type_t::i64, {2}, {1, 2}), + op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); } TEST(type_prop, reverse_3d_deduce_012) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); - auto rev = make_shared( - param, op::Constant::create(element::i64, {3}, {0, 1, 2}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); + auto rev = + make_shared(param, + op::Constant::create(element::Type_t::i64, {3}, {0, 1, 2}), + op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_EQ(rev->get_shape(), (Shape{5, 6, 7})); } TEST(type_prop, reverse_3d_deduce_oob) { // Deduce type - auto param = make_shared(element::f32, Shape{5, 6, 7}); + auto param = make_shared(element::Type_t::f32, Shape{5, 6, 7}); try { - auto rev = make_shared(param, - op::Constant::create(element::i64, {3}, {0, 3, 2}), - op::v1::Reverse::Mode::INDEX); + auto rev = + make_shared(param, + op::Constant::create(element::Type_t::i64, {3}, {0, 3, 2}), + op::v1::Reverse::Mode::INDEX); // Should have thrown, so fail if it didn't FAIL() << "Axis out of bounds not detected"; @@ -175,13 +182,13 @@ TEST(type_prop, reverse_3d_deduce_oob) // TEST(type_prop, reverse_partial_rank_dynamic) { - auto param = make_shared(element::f32, PartialShape::dynamic()); - auto rev = - make_shared(param, - op::Constant::create(element::i64, {4}, {0, 2, 1776, 90909}), - op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto rev = make_shared( + param, + op::Constant::create(element::Type_t::i64, {4}, {0, 2, 1776, 90909}), + op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_TRUE(rev->get_output_partial_shape(0).rank().is_dynamic()); } @@ -192,23 +199,25 @@ TEST(type_prop, reverse_partial_rank_dynamic) TEST(type_prop, reverse_partial_rank_static_dynamic_axes_ok) { PartialShape param_shape{Dimension::dynamic(), Dimension::dynamic(), 2, 3}; - auto param = make_shared(element::f32, param_shape); - auto rev = make_shared( - param, op::Constant::create(element::i64, {2}, {0, 2}), op::v1::Reverse::Mode::INDEX); + auto param = make_shared(element::Type_t::f32, param_shape); + auto rev = make_shared(param, + op::Constant::create(element::Type_t::i64, {2}, {0, 2}), + op::v1::Reverse::Mode::INDEX); - EXPECT_EQ(rev->get_element_type(), element::f32); + EXPECT_EQ(rev->get_element_type(), element::Type_t::f32); EXPECT_TRUE(rev->get_output_partial_shape(0).same_scheme(param_shape)); } TEST(type_prop, reverse_partial_rank_static_dynamic_axes_oob) { PartialShape param_shape{Dimension::dynamic(), Dimension::dynamic(), 2, 3}; - auto param = make_shared(element::f32, param_shape); + auto param = make_shared(element::Type_t::f32, param_shape); try { - auto rev = make_shared(param, - op::Constant::create(element::i64, {3}, {0, 4, 2}), - op::v1::Reverse::Mode::INDEX); + auto rev = + make_shared(param, + op::Constant::create(element::Type_t::i64, {3}, {0, 4, 2}), + op::v1::Reverse::Mode::INDEX); // Should have thrown, so fail if it didn't FAIL() << "Axis out of bounds not detected"; diff --git a/ngraph/test/type_prop/reverse_sequence.cpp b/ngraph/test/type_prop/reverse_sequence.cpp index ade152fd77e..65819ceee51 100644 --- a/ngraph/test/type_prop/reverse_sequence.cpp +++ b/ngraph/test/type_prop/reverse_sequence.cpp @@ -23,8 +23,8 @@ using namespace ngraph; TEST(type_prop, reverse_sequence_1_dim) { - auto data = make_shared(element::f32, Shape{4, 3, 2}); - auto seq_lenghts = make_shared(element::f32, Shape{4, 4}); + auto data = make_shared(element::Type_t::f32, Shape{4, 3, 2}); + auto seq_lenghts = make_shared(element::Type_t::f32, Shape{4, 4}); try { size_t batch_axis = 0; @@ -45,8 +45,8 @@ TEST(type_prop, reverse_sequence_1_dim) TEST(type_prop, reverse_sequence_batch_index_oob) { - auto data = make_shared(element::f32, Shape{4, 3, 2}); - auto seq_lenghts = make_shared(element::f32, Shape{3}); + auto data = make_shared(element::Type_t::f32, Shape{4, 3, 2}); + auto seq_lenghts = make_shared(element::Type_t::f32, Shape{3}); try { size_t batch_axis = 3; @@ -66,8 +66,8 @@ TEST(type_prop, reverse_sequence_batch_index_oob) TEST(type_prop, reverse_sequence_sequence_index_oob) { - auto data = make_shared(element::f32, Shape{4, 3, 2}); - auto seq_lengths = make_shared(element::f32, Shape{3}); + auto data = make_shared(element::Type_t::f32, Shape{4, 3, 2}); + auto seq_lengths = make_shared(element::Type_t::f32, Shape{3}); try { size_t batch_axis = 1; @@ -87,8 +87,8 @@ TEST(type_prop, reverse_sequence_sequence_index_oob) TEST(type_prop, reverse_sequence_seq_len_size_equal_to_batch_dim) { - auto data = make_shared(element::f32, Shape{4, 3, 2}); - auto seq_lenghts = make_shared(element::f32, Shape{3}); + auto data = make_shared(element::Type_t::f32, Shape{4, 3, 2}); + auto seq_lenghts = make_shared(element::Type_t::f32, Shape{3}); try { size_t batch_axis = 0; @@ -111,67 +111,68 @@ TEST(type_prop, reverse_sequence_seq_len_size_equal_to_batch_dim) TEST(type_prop, reverse_sequence_partial_both_rank_dynamic) { - auto data = make_shared(element::f32, PartialShape::dynamic()); - auto seq_lengths = make_shared(element::f32, PartialShape::dynamic()); + auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape::dynamic()); // Unrealistic values, but they don't matter here. size_t batch_axis = 202; size_t seq_axis = 909; auto rs = make_shared(data, seq_lengths, batch_axis, seq_axis); EXPECT_TRUE(rs->get_output_partial_shape(0).is_dynamic()); - EXPECT_EQ(rs->get_output_element_type(0), element::f32); + EXPECT_EQ(rs->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, reverse_sequence_partial_left_rank_dynamic) { - auto data = make_shared(element::f32, PartialShape::dynamic()); - auto seq_lengths = make_shared(element::f32, PartialShape{3}); + auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape{3}); // Unrealistic values, but they don't matter here. size_t batch_axis = 202; size_t seq_axis = 909; auto rs = make_shared(data, seq_lengths, batch_axis, seq_axis); EXPECT_TRUE(rs->get_output_partial_shape(0).is_dynamic()); - EXPECT_EQ(rs->get_output_element_type(0), element::f32); + EXPECT_EQ(rs->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, reverse_sequence_partial_right_rank_dynamic) { - auto data = make_shared(element::f32, PartialShape{2, 4, 6, 8}); - auto seq_lengths = make_shared(element::f32, PartialShape::dynamic()); + auto data = make_shared(element::Type_t::f32, PartialShape{2, 4, 6, 8}); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape::dynamic()); size_t batch_axis = 0; size_t seq_axis = 1; auto rs = make_shared(data, seq_lengths, batch_axis, seq_axis); EXPECT_TRUE(rs->get_output_partial_shape(0).same_scheme(PartialShape{2, 4, 6, 8})); - EXPECT_EQ(rs->get_output_element_type(0), element::f32); + EXPECT_EQ(rs->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, reverse_sequence_partial_both_rank_static_dynamic) { - auto data = make_shared(element::f32, + auto data = make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); - auto seq_lengths = make_shared(element::f32, PartialShape::dynamic()); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape::dynamic()); size_t batch_axis = 0; size_t seq_axis = 1; auto rs = make_shared(data, seq_lengths, batch_axis, seq_axis); EXPECT_TRUE(rs->get_output_partial_shape(0).same_scheme(PartialShape{ Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()})); - EXPECT_EQ(rs->get_output_element_type(0), element::f32); + EXPECT_EQ(rs->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, reverse_sequence_partial_both_rank_static_dynamic_batch_axis_oob) { - auto data = make_shared(element::f32, + auto data = make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); - auto seq_lengths = make_shared(element::f32, PartialShape{Dimension::dynamic()}); + auto seq_lengths = + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic()}); size_t batch_axis = 4; size_t seq_axis = 1; try @@ -191,12 +192,13 @@ TEST(type_prop, reverse_sequence_partial_both_rank_static_dynamic_batch_axis_oob TEST(type_prop, reverse_sequence_partial_both_rank_static_dynamic_sequence_axis_oob) { - auto data = make_shared(element::f32, + auto data = make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); - auto seq_lengths = make_shared(element::f32, PartialShape{Dimension::dynamic()}); + auto seq_lengths = + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic()}); size_t batch_axis = 1; size_t seq_axis = 4; try @@ -217,50 +219,51 @@ TEST(type_prop, reverse_sequence_partial_both_rank_static_dynamic_sequence_axis_ TEST(type_prop, reverse_sequence_partial_left_rank_static_dynamic_right_static_left_seq_length_dynamic) { - auto data = make_shared(element::f32, + auto data = make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic(), Dimension::dynamic()}); - auto seq_lengths = make_shared(element::f32, PartialShape{3}); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape{3}); size_t batch_axis = 2; size_t seq_axis = 1; auto rs = make_shared(data, seq_lengths, batch_axis, seq_axis); EXPECT_TRUE(rs->get_output_partial_shape(0).same_scheme( PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, Dimension::dynamic()})); - EXPECT_EQ(rs->get_output_element_type(0), element::f32); + EXPECT_EQ(rs->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, reverse_sequence_partial_both_rank_static_dynamic_right_seq_length_dynamic) { auto data = make_shared( - element::f32, + element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, Dimension::dynamic()}); - auto seq_lengths = make_shared(element::f32, PartialShape{Dimension::dynamic()}); + auto seq_lengths = + make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic()}); size_t batch_axis = 2; size_t seq_axis = 1; auto rs = make_shared(data, seq_lengths, batch_axis, seq_axis); EXPECT_TRUE(rs->get_output_partial_shape(0).same_scheme( PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, Dimension::dynamic()})); - EXPECT_EQ(rs->get_output_element_type(0), element::f32); + EXPECT_EQ(rs->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, reverse_sequence_partial_left_rank_static_dynamic_right_static_left_seq_length_static) { auto data = make_shared( - element::f32, + element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, Dimension::dynamic()}); - auto seq_lengths = make_shared(element::f32, PartialShape{3}); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape{3}); size_t batch_axis = 2; size_t seq_axis = 1; auto rs = make_shared(data, seq_lengths, batch_axis, seq_axis); EXPECT_TRUE(rs->get_output_partial_shape(0).same_scheme( PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, Dimension::dynamic()})); - EXPECT_EQ(rs->get_output_element_type(0), element::f32); + EXPECT_EQ(rs->get_output_element_type(0), element::Type_t::f32); } TEST( @@ -268,9 +271,9 @@ TEST( reverse_sequence_partial_left_rank_static_dynamic_right_static_left_seq_length_static_inconsistent) { auto data = make_shared( - element::f32, + element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3, Dimension::dynamic()}); - auto seq_lengths = make_shared(element::f32, PartialShape{4}); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape{4}); size_t batch_axis = 2; size_t seq_axis = 1; try @@ -292,8 +295,8 @@ TEST( TEST(type_prop, reverse_sequence_negative_axis_dynamic_input_rank) { - auto data = make_shared(element::f32, PartialShape::dynamic()); - auto seq_lengths = make_shared(element::f32, PartialShape{1}); + auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape{1}); int64_t batch_axis = 1; int64_t seq_axis = -2; try @@ -315,8 +318,8 @@ TEST(type_prop, reverse_sequence_negative_axis_dynamic_input_rank) TEST(type_prop, reverse_sequence_negative_axes_support) { - auto data = make_shared(element::f32, PartialShape{1, 2, 3, 4, 5}); - auto seq_lengths = make_shared(element::f32, PartialShape{3}); + auto data = make_shared(element::Type_t::f32, PartialShape{1, 2, 3, 4, 5}); + auto seq_lengths = make_shared(element::Type_t::f32, PartialShape{3}); int64_t batch_axis = -3; int64_t seq_axis = -2; diff --git a/ngraph/test/type_prop/rnn_cell.cpp b/ngraph/test/type_prop/rnn_cell.cpp index 627457edbb9..aedc5c88fe2 100644 --- a/ngraph/test/type_prop/rnn_cell.cpp +++ b/ngraph/test/type_prop/rnn_cell.cpp @@ -28,13 +28,17 @@ TEST(type_prop, rnn_cell) const size_t input_size = 3; const size_t hidden_size = 3; - const auto X = make_shared(element::f32, Shape{batch_size, input_size}); - const auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - const auto W = make_shared(element::f32, Shape{hidden_size, input_size}); - const auto R = make_shared(element::f32, Shape{hidden_size, hidden_size}); + const auto X = + make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + const auto H_t = + make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + const auto W = + make_shared(element::Type_t::f32, Shape{hidden_size, input_size}); + const auto R = + make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); const auto rnn_cell = make_shared(X, H_t, W, R, hidden_size); - EXPECT_EQ(rnn_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(rnn_cell->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(rnn_cell->get_output_shape(0), (Shape{batch_size, hidden_size})); } @@ -44,12 +48,13 @@ TEST(type_prop, rnn_cell_invalid_input) const size_t input_size = 3; const size_t hidden_size = 3; - auto X = make_shared(element::f32, Shape{batch_size, input_size}); - auto R = make_shared(element::f32, Shape{hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + auto R = make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); + auto H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); // Invalid W tensor shape. - auto W = make_shared(element::f32, Shape{2 * hidden_size, input_size}); + auto W = + make_shared(element::Type_t::f32, Shape{2 * hidden_size, input_size}); try { const auto rnn_cell = make_shared(X, H_t, W, R, hidden_size); @@ -62,8 +67,8 @@ TEST(type_prop, rnn_cell_invalid_input) } // Invalid R tensor shape. - W = make_shared(element::f32, Shape{hidden_size, input_size}); - R = make_shared(element::f32, Shape{hidden_size, 1}); + W = make_shared(element::Type_t::f32, Shape{hidden_size, input_size}); + R = make_shared(element::Type_t::f32, Shape{hidden_size, 1}); try { const auto rnn_cell = make_shared(X, H_t, W, R, hidden_size); @@ -78,8 +83,8 @@ TEST(type_prop, rnn_cell_invalid_input) } // Invalid H_t tensor shape. - R = make_shared(element::f32, Shape{hidden_size, hidden_size}); - H_t = make_shared(element::f32, Shape{4, hidden_size}); + R = make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); + H_t = make_shared(element::Type_t::f32, Shape{4, hidden_size}); try { const auto rnn_cell = make_shared(X, H_t, W, R, hidden_size); @@ -93,8 +98,8 @@ TEST(type_prop, rnn_cell_invalid_input) } // Invalid B tensor shape. - H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - auto B = make_shared(element::f32, Shape{2 * hidden_size}); + H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + auto B = make_shared(element::Type_t::f32, Shape{2 * hidden_size}); try { const auto rnn_cell = make_shared(X, H_t, W, R, B, hidden_size); @@ -114,16 +119,16 @@ TEST(type_prop, rnn_cell_dynamic_batch_size) const size_t hidden_size = 3; const auto X = - make_shared(element::f32, PartialShape{batch_size, input_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto W = - make_shared(element::f32, PartialShape{hidden_size, input_size}); - const auto R = - make_shared(element::f32, PartialShape{hidden_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + PartialShape{hidden_size, hidden_size}); const auto rnn_cell = make_shared(X, H_t, W, R, hidden_size); - EXPECT_EQ(rnn_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(rnn_cell->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(rnn_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); } @@ -134,16 +139,16 @@ TEST(type_prop, rnn_cell_dynamic_hidden_size) const auto hidden_size = Dimension::dynamic(); const auto X = - make_shared(element::f32, PartialShape{batch_size, input_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto W = - make_shared(element::f32, PartialShape{hidden_size, input_size}); - const auto R = - make_shared(element::f32, PartialShape{hidden_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{hidden_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + PartialShape{hidden_size, hidden_size}); const auto rnn_cell = make_shared(X, H_t, W, R, 3); - EXPECT_EQ(rnn_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(rnn_cell->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(rnn_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); } @@ -154,18 +159,18 @@ TEST(type_prop, rnn_cell_dynamic_inputs) const auto hidden_size = Dimension::dynamic(); const auto X = - make_shared(element::f32, PartialShape{batch_size, input_size}); - const auto R = - make_shared(element::f32, PartialShape{hidden_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, input_size}); + const auto R = make_shared(element::Type_t::f32, + PartialShape{hidden_size, hidden_size}); const auto W = - make_shared(element::f32, PartialShape{hidden_size, input_size}); + make_shared(element::Type_t::f32, PartialShape{hidden_size, input_size}); const auto H_t = - make_shared(element::f32, PartialShape{batch_size, hidden_size}); + make_shared(element::Type_t::f32, PartialShape{batch_size, hidden_size}); const auto rnn_cell = make_shared(X, H_t, W, R, 2); EXPECT_EQ(rnn_cell->get_output_partial_shape(0), (PartialShape{batch_size, hidden_size})); - EXPECT_EQ(rnn_cell->get_output_element_type(0), element::f32); + EXPECT_EQ(rnn_cell->get_output_element_type(0), element::Type_t::f32); } TEST(type_prop, rnn_cell_invalid_input_rank0) @@ -174,40 +179,41 @@ TEST(type_prop, rnn_cell_invalid_input_rank0) const size_t input_size = 3; const size_t hidden_size = 3; - auto X = make_shared(element::f32, Shape{batch_size, input_size}); - auto R = make_shared(element::f32, Shape{hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + auto R = make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); + auto H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); // Invalid rank0 for W tensor. - auto W = make_shared(element::f32, PartialShape{}); + auto W = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "RNNCell node was created with invalid data."; // Invalid rank0 for X tensor. - W = make_shared(element::f32, PartialShape{hidden_size, input_size}); - X = make_shared(element::f32, PartialShape{}); + W = make_shared(element::Type_t::f32, PartialShape{hidden_size, input_size}); + X = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "RNNCell node was created with invalid data."; // Invalid rank0 for H_t tensor. - X = make_shared(element::f32, Shape{batch_size, input_size}); - H_t = make_shared(element::f32, PartialShape{}); + X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + H_t = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "RNNCell node was created with invalid data."; // Invalid rank0 for R tensor. - H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - R = make_shared(element::f32, PartialShape{}); + H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + R = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, hidden_size), ngraph::NodeValidationFailure) << "RNNCell node was created with invalid data."; // Invalid rank0 for B tensor. - R = make_shared(element::f32, PartialShape{hidden_size, hidden_size}); - auto B = make_shared(element::f32, PartialShape{}); + R = make_shared(element::Type_t::f32, + PartialShape{hidden_size, hidden_size}); + auto B = make_shared(element::Type_t::f32, PartialShape{}); ASSERT_THROW(make_shared(X, H_t, W, R, B, hidden_size), ngraph::NodeValidationFailure) << "RNNCell node was created with invalid data."; @@ -219,40 +225,46 @@ TEST(type_prop, rnn_cell_invalid_input_dynamic_rank) const size_t input_size = 3; const size_t hidden_size = 3; - auto X = make_shared(element::f32, Shape{batch_size, input_size}); - auto R = make_shared(element::f32, Shape{hidden_size, hidden_size}); - auto H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); + auto X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + auto R = make_shared(element::Type_t::f32, Shape{hidden_size, hidden_size}); + auto H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); auto check_dynamic_rnn = [](const shared_ptr& rnn) -> bool { return rnn->output(0).get_partial_shape() == PartialShape::dynamic() && rnn->output(0).get_element_type() == rnn->input(0).get_element_type(); }; // Invalid dynamic rank for W tensor. - auto W = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + auto W = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); auto rnn_w = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_rnn(rnn_w), true); // Invalid dynamic rank for X tensor. - W = make_shared(element::f32, PartialShape{hidden_size, input_size}); - X = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + W = make_shared(element::Type_t::f32, PartialShape{hidden_size, input_size}); + X = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); auto rnn_x = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_rnn(rnn_x), true); // Invalid dynamic rank for H_t tensor. - X = make_shared(element::f32, Shape{batch_size, input_size}); - H_t = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + X = make_shared(element::Type_t::f32, Shape{batch_size, input_size}); + H_t = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); auto rnn_h = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_rnn(rnn_h), true); // Invalid dynamic rank for R tensor. - H_t = make_shared(element::f32, Shape{batch_size, hidden_size}); - R = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + H_t = make_shared(element::Type_t::f32, Shape{batch_size, hidden_size}); + R = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); auto rnn_r = make_shared(X, H_t, W, R, hidden_size); EXPECT_EQ(check_dynamic_rnn(rnn_r), true); // Invalid dynamic rank for B tensor. - R = make_shared(element::f32, PartialShape{hidden_size, hidden_size}); - auto B = make_shared(element::f32, PartialShape::dynamic(Rank::dynamic())); + R = make_shared(element::Type_t::f32, + PartialShape{hidden_size, hidden_size}); + auto B = make_shared(element::Type_t::f32, + PartialShape::dynamic(Rank::dynamic())); auto rnn_b = make_shared(X, H_t, W, R, B, hidden_size); EXPECT_EQ(check_dynamic_rnn(rnn_b), true); } diff --git a/ngraph/test/type_prop/rnn_sequence.cpp b/ngraph/test/type_prop/rnn_sequence.cpp index 94b500dbb02..30c24dff42f 100644 --- a/ngraph/test/type_prop/rnn_sequence.cpp +++ b/ngraph/test/type_prop/rnn_sequence.cpp @@ -30,17 +30,19 @@ TEST(type_prop, rnn_sequence_forward) const size_t input_size = 4; const size_t hidden_size = 128; - const auto X = - make_shared(element::f32, Shape{batch_size, seq_length, input_size}); + const auto X = make_shared(element::Type_t::f32, + Shape{batch_size, seq_length, input_size}); const auto initial_hidden_state = make_shared( - element::f32, Shape{batch_size, num_directions, hidden_size}); - const auto sequence_lengths = make_shared(element::i32, Shape{batch_size}); + element::Type_t::f32, Shape{batch_size, num_directions, hidden_size}); + const auto sequence_lengths = + make_shared(element::Type_t::i32, Shape{batch_size}); - const auto W = make_shared(element::f32, + const auto W = make_shared(element::Type_t::f32, Shape{num_directions, hidden_size, input_size}); - const auto R = make_shared(element::f32, + const auto R = make_shared(element::Type_t::f32, Shape{num_directions, hidden_size, hidden_size}); - const auto B = make_shared(element::f32, Shape{num_directions, hidden_size}); + const auto B = + make_shared(element::Type_t::f32, Shape{num_directions, hidden_size}); const auto direction = op::RecurrentSequenceDirection::FORWARD; @@ -53,10 +55,10 @@ TEST(type_prop, rnn_sequence_forward) EXPECT_TRUE(sequence->get_activations_beta().empty()); EXPECT_EQ(sequence->get_activations()[0], "tanh"); EXPECT_EQ(sequence->get_clip(), 0.f); - EXPECT_EQ(sequence->get_output_element_type(0), element::f32); + EXPECT_EQ(sequence->get_output_element_type(0), element::Type_t::f32); EXPECT_EQ(sequence->outputs().size(), 2); EXPECT_EQ(sequence->get_output_shape(0), (Shape{batch_size, num_directions, seq_length, hidden_size})); - EXPECT_EQ(sequence->get_output_element_type(1), element::f32); + EXPECT_EQ(sequence->get_output_element_type(1), element::Type_t::f32); EXPECT_EQ(sequence->get_output_shape(1), (Shape{batch_size, num_directions, hidden_size})); } diff --git a/ngraph/test/type_prop/roi_align.cpp b/ngraph/test/type_prop/roi_align.cpp index 67b10360670..0c32f24c5d6 100644 --- a/ngraph/test/type_prop/roi_align.cpp +++ b/ngraph/test/type_prop/roi_align.cpp @@ -22,37 +22,39 @@ using namespace ngraph; TEST(type_prop_layers, roi_align_basic_shape_inference) { - const auto data = make_shared(element::f32, Shape{2, 3, 5, 5}); - const auto rois = make_shared(element::f32, Shape{7, 4}); - const auto batch_indices = make_shared(element::i32, Shape{7}); + const auto data = make_shared(element::Type_t::f32, Shape{2, 3, 5, 5}); + const auto rois = make_shared(element::Type_t::f32, Shape{7, 4}); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{7}); const auto op = make_shared(data, rois, batch_indices, 2, 2, 1, 1.0f, "avg"); ASSERT_EQ(op->get_shape(), (Shape{7, 3, 2, 2})); } TEST(type_prop_layers, roi_align_dynamic_channels_dim) { - const auto data = make_shared(element::f32, PartialShape{10, Dimension(), 5, 5}); - const auto rois = make_shared(element::f32, Shape{7, 4}); - const auto batch_indices = make_shared(element::i32, Shape{7}); + const auto data = + make_shared(element::Type_t::f32, PartialShape{10, Dimension(), 5, 5}); + const auto rois = make_shared(element::Type_t::f32, Shape{7, 4}); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{7}); const auto op = make_shared(data, rois, batch_indices, 3, 4, 1, 1.0f, "avg"); ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{7, Dimension(), 3, 4})); } TEST(type_prop_layers, roi_align_num_rois_from_batch_indices) { - const auto data = make_shared(element::f32, PartialShape{10, 3, 5, 5}); + const auto data = make_shared(element::Type_t::f32, PartialShape{10, 3, 5, 5}); const auto rois = - make_shared(element::f32, PartialShape{Dimension{}, Dimension{}}); - const auto batch_indices = make_shared(element::i32, Shape{9}); + make_shared(element::Type_t::f32, PartialShape{Dimension{}, Dimension{}}); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{9}); const auto op = make_shared(data, rois, batch_indices, 3, 4, 1, 1.0f, "avg"); ASSERT_EQ(op->get_shape(), (Shape{9, 3, 3, 4})); } TEST(type_prop_layers, roi_align_incompatible_num_rois) { - const auto data = make_shared(element::f32, Shape{10, 3, 5, 5}); - const auto rois = make_shared(element::f32, PartialShape{1, Dimension{}}); - const auto batch_indices = make_shared(element::i32, Shape{2}); + const auto data = make_shared(element::Type_t::f32, Shape{10, 3, 5, 5}); + const auto rois = + make_shared(element::Type_t::f32, PartialShape{1, Dimension{}}); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{2}); // the first dimension of rois and batch_indices should be equal ASSERT_THROW(make_shared(data, rois, batch_indices, 3, 4, 1, 1.0f, "avg"), ngraph::NodeValidationFailure); @@ -60,9 +62,9 @@ TEST(type_prop_layers, roi_align_incompatible_num_rois) TEST(type_prop_layers, roi_align_incompatible_input_rank) { - const auto data = make_shared(element::f32, Shape{1, 10, 3, 5, 5}); - const auto rois = make_shared(element::f32, Shape{1, 4}); - const auto batch_indices = make_shared(element::i32, Shape{1}); + const auto data = make_shared(element::Type_t::f32, Shape{1, 10, 3, 5, 5}); + const auto rois = make_shared(element::Type_t::f32, Shape{1, 4}); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{1}); // data rank needs to be 4 ASSERT_THROW(make_shared(data, rois, batch_indices, 3, 4, 1, 1.0f, "avg"), ngraph::NodeValidationFailure); @@ -70,9 +72,9 @@ TEST(type_prop_layers, roi_align_incompatible_input_rank) TEST(type_prop_layers, roi_align_incompatible_rois_second_dim) { - const auto data = make_shared(element::f32, Shape{10, 3, 5, 5}); - const auto rois = make_shared(element::f32, Shape{1, 5}); - const auto batch_indices = make_shared(element::i32, Shape{1}); + const auto data = make_shared(element::Type_t::f32, Shape{10, 3, 5, 5}); + const auto rois = make_shared(element::Type_t::f32, Shape{1, 5}); + const auto batch_indices = make_shared(element::Type_t::i32, Shape{1}); // the second dim of rois needs to be 4 ASSERT_THROW(make_shared(data, rois, batch_indices, 3, 4, 1, 1.0f, "avg"), ngraph::NodeValidationFailure); diff --git a/ngraph/test/type_prop/roi_pooling.cpp b/ngraph/test/type_prop/roi_pooling.cpp index 5ab2b7759fe..f9ce17a2b58 100644 --- a/ngraph/test/type_prop/roi_pooling.cpp +++ b/ngraph/test/type_prop/roi_pooling.cpp @@ -22,8 +22,8 @@ using namespace ngraph; TEST(type_prop, roi_pooling_basic_shape_inference) { - const auto feat_maps = make_shared(element::f32, Shape{1, 3, 6, 6}); - const auto rois = make_shared(element::f32, Shape{4, 5}); + const auto feat_maps = make_shared(element::Type_t::f32, Shape{1, 3, 6, 6}); + const auto rois = make_shared(element::Type_t::f32, Shape{4, 5}); const auto op = make_shared(feat_maps, rois, Shape{2, 2}, 0.625f); ASSERT_EQ(op->get_method(), "max"); ASSERT_EQ(op->get_shape(), (Shape{4, 3, 2, 2})); @@ -32,40 +32,42 @@ TEST(type_prop, roi_pooling_basic_shape_inference) TEST(type_prop, roi_pooling_dynamic_channels_dim) { const auto feat_maps = - make_shared(element::f32, PartialShape{1, Dimension(), 6, 6}); - const auto rois = make_shared(element::f32, Shape{4, 5}); + make_shared(element::Type_t::f32, PartialShape{1, Dimension(), 6, 6}); + const auto rois = make_shared(element::Type_t::f32, Shape{4, 5}); const auto op = make_shared(feat_maps, rois, Shape{2, 2}, 0.625f, "max"); ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{4, Dimension(), 2, 2})); } TEST(type_prop, roi_pooling_dynamic_num_rois_dim) { - const auto feat_maps = make_shared(element::f32, Shape{1, 3, 6, 6}); - const auto rois = make_shared(element::f32, PartialShape{Dimension(), 5}); + const auto feat_maps = make_shared(element::Type_t::f32, Shape{1, 3, 6, 6}); + const auto rois = + make_shared(element::Type_t::f32, PartialShape{Dimension(), 5}); const auto op = make_shared(feat_maps, rois, Shape{2, 2}, 0.625f); ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{Dimension(), 3, 2, 2})); } TEST(type_prop, roi_pooling_dynamic_rank_feat_maps) { - const auto feat_maps = make_shared(element::f32, PartialShape::dynamic()); - const auto rois = make_shared(element::f32, Shape{4, 5}); + const auto feat_maps = + make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto rois = make_shared(element::Type_t::f32, Shape{4, 5}); const auto op = make_shared(feat_maps, rois, Shape{2, 2}, 0.625f); ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{4, Dimension(), 2, 2})); } TEST(type_prop, roi_pooling_dynamic_rank_rois) { - const auto feat_maps = make_shared(element::f32, Shape{1, 3, 6, 6}); - const auto rois = make_shared(element::f32, PartialShape::dynamic()); + const auto feat_maps = make_shared(element::Type_t::f32, Shape{1, 3, 6, 6}); + const auto rois = make_shared(element::Type_t::f32, PartialShape::dynamic()); const auto op = make_shared(feat_maps, rois, Shape{2, 2}, 0.625f); ASSERT_TRUE(op->get_output_partial_shape(0).same_scheme(PartialShape{Dimension(), 3, 2, 2})); } TEST(type_prop, roi_pooling_incompatible_input_rank) { - const auto feat_maps = make_shared(element::f32, Shape{1, 3, 2, 6, 6}); - const auto rois = make_shared(element::f32, Shape{3, 5}); + const auto feat_maps = make_shared(element::Type_t::f32, Shape{1, 3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f32, Shape{3, 5}); // feat_maps must be of rank 4 ASSERT_THROW(make_shared(feat_maps, rois, Shape{2, 2}, 0.625f, "max"), ngraph::NodeValidationFailure); @@ -74,8 +76,8 @@ TEST(type_prop, roi_pooling_incompatible_input_rank) TEST(type_prop, roi_pooling_incompatible_pooling_shape) { Shape pool_shape{2, 2, 2}; - const auto feat_maps = make_shared(element::f32, Shape{3, 2, 6, 6}); - const auto rois = make_shared(element::f32, Shape{3, 5}); + const auto feat_maps = make_shared(element::Type_t::f32, Shape{3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f32, Shape{3, 5}); // pool_shape must be of rank 2 {pooled_h, pooled_w} ASSERT_THROW(make_shared(feat_maps, rois, pool_shape, 0.625f, "max"), ngraph::NodeValidationFailure); @@ -83,8 +85,8 @@ TEST(type_prop, roi_pooling_incompatible_pooling_shape) TEST(type_prop, roi_pooling_incompatible_rois_second_dim) { - const auto feat_maps = make_shared(element::f32, Shape{3, 2, 6, 6}); - const auto rois = make_shared(element::f32, Shape{3, 4}); + const auto feat_maps = make_shared(element::Type_t::f32, Shape{3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f32, Shape{3, 4}); // the second dim of rois must be 5. [batch_id, x_1, y_1, x_2, y_2] ASSERT_THROW(make_shared(feat_maps, rois, Shape{2, 2}, 0.625f, "max"), ngraph::NodeValidationFailure); @@ -92,8 +94,8 @@ TEST(type_prop, roi_pooling_incompatible_rois_second_dim) TEST(type_prop, roi_pooling_incompatible_feature_maps_element_type) { - const auto feat_maps = make_shared(element::i32, Shape{3, 2, 6, 6}); - const auto rois = make_shared(element::f32, Shape{3, 5}); + const auto feat_maps = make_shared(element::Type_t::i32, Shape{3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f32, Shape{3, 5}); // feat_maps element type must be floating point type ASSERT_THROW(make_shared(feat_maps, rois, Shape{2, 2}, 0.625f, "max"), ngraph::NodeValidationFailure); @@ -101,8 +103,8 @@ TEST(type_prop, roi_pooling_incompatible_feature_maps_element_type) TEST(type_prop, roi_pooling_incompatible_rois_element_type) { - const auto feat_maps = make_shared(element::f32, Shape{3, 2, 6, 6}); - const auto rois = make_shared(element::f16, Shape{3, 5}); + const auto feat_maps = make_shared(element::Type_t::f32, Shape{3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f16, Shape{3, 5}); // rois element type must be equal to feat_maps element type (floating point type) ASSERT_THROW(make_shared(feat_maps, rois, Shape{2, 2}, 0.625f, "bilinear"), ngraph::NodeValidationFailure); @@ -110,8 +112,8 @@ TEST(type_prop, roi_pooling_incompatible_rois_element_type) TEST(type_prop, roi_pooling_invalid_pooling_method) { - const auto feat_maps = make_shared(element::f32, Shape{3, 2, 6, 6}); - const auto rois = make_shared(element::f16, Shape{3, 5}); + const auto feat_maps = make_shared(element::Type_t::f32, Shape{3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f16, Shape{3, 5}); // ROIPooling method is invalid: not max nor bilinear ASSERT_THROW(make_shared(feat_maps, rois, Shape{2, 2}, 0.625f, "invalid"), ngraph::NodeValidationFailure); @@ -119,8 +121,8 @@ TEST(type_prop, roi_pooling_invalid_pooling_method) TEST(type_prop, roi_pooling_invalid_spatial_scale) { - const auto feat_maps = make_shared(element::f32, Shape{3, 2, 6, 6}); - const auto rois = make_shared(element::f16, Shape{3, 5}); + const auto feat_maps = make_shared(element::Type_t::f32, Shape{3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f16, Shape{3, 5}); // ROIPooling spatial scale attribute must be a positive floating point number ASSERT_THROW(make_shared(feat_maps, rois, Shape{2, 2}, -0.625f, "max"), ngraph::NodeValidationFailure); @@ -128,8 +130,8 @@ TEST(type_prop, roi_pooling_invalid_spatial_scale) TEST(type_prop, roi_pooling_invalid_pooled_size) { - const auto feat_maps = make_shared(element::f32, Shape{3, 2, 6, 6}); - const auto rois = make_shared(element::f16, Shape{3, 5}); + const auto feat_maps = make_shared(element::Type_t::f32, Shape{3, 2, 6, 6}); + const auto rois = make_shared(element::Type_t::f16, Shape{3, 5}); // ROIPooling pooled_h and pooled_w must be non-negative integers ASSERT_THROW(make_shared(feat_maps, rois, Shape{1, 0}, 0.625f, "max"), ngraph::NodeValidationFailure); diff --git a/ngraph/test/type_prop/round.cpp b/ngraph/test/type_prop/round.cpp index dde3c7a7f01..dad253981a8 100644 --- a/ngraph/test/type_prop/round.cpp +++ b/ngraph/test/type_prop/round.cpp @@ -23,57 +23,60 @@ using namespace ngraph; TEST(type_prop, rounding_to_even) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_TO_EVEN); - EXPECT_EQ(round_func->get_element_type(), element::f32); + EXPECT_EQ(round_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(round_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, rounding_away) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO); - EXPECT_EQ(round_func->get_element_type(), element::f32); + EXPECT_EQ(round_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(round_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, rounding_to_even_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_TO_EVEN); - EXPECT_EQ(round_func->get_element_type(), element::f32); + EXPECT_EQ(round_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(round_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); // rank unknown auto round_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic()), + make_shared(element::Type_t::f32, PartialShape::dynamic()), op::v5::Round::RoundMode::HALF_TO_EVEN); ASSERT_TRUE(round_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, rounding_away_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO); - EXPECT_EQ(round_func->get_element_type(), element::f32); + EXPECT_EQ(round_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(round_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); // rank unknown auto round_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic()), + make_shared(element::Type_t::f32, PartialShape::dynamic()), op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO); ASSERT_TRUE(round_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, rounding_to_even_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_TO_EVEN); - EXPECT_EQ(round_func->get_element_type(), element::f32); + EXPECT_EQ(round_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(round_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); ASSERT_TRUE(round_func->get_output_partial_shape(0).rank().is_static()); @@ -81,10 +84,11 @@ TEST(type_prop, rounding_to_even_partial_static_rank) TEST(type_prop, rounding_away_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto round_func = make_shared(data, op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO); - EXPECT_EQ(round_func->get_element_type(), element::f32); + EXPECT_EQ(round_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(round_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); ASSERT_TRUE(round_func->get_output_partial_shape(0).rank().is_static()); diff --git a/ngraph/test/type_prop/scatter_elements_update.cpp b/ngraph/test/type_prop/scatter_elements_update.cpp index d1149b8e377..02b28505ad5 100644 --- a/ngraph/test/type_prop/scatter_elements_update.cpp +++ b/ngraph/test/type_prop/scatter_elements_update.cpp @@ -29,10 +29,10 @@ TEST(type_prop, scatter_elements_update_output_shape) Shape axis_shape{}; Shape expected_output_shape{2, 4, 5, 7}; - auto data = make_shared(element::f32, data_shape); - auto indices = make_shared(element::i16, indices_shape); - auto updates = make_shared(element::f32, updates_shape); - auto axis = make_shared(element::i16, axis_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto indices = make_shared(element::Type_t::i16, indices_shape); + auto updates = make_shared(element::Type_t::f32, updates_shape); + auto axis = make_shared(element::Type_t::i16, axis_shape); auto scatter = make_shared(data, indices, updates, axis); @@ -46,10 +46,10 @@ TEST(type_prop, scatter_elements_update_output_partial_dyn_shape) PartialShape updates_shape{2, 2, Dimension::dynamic()}; PartialShape axis_shape = PartialShape::dynamic(); - auto data = make_shared(element::f32, data_shape); - auto indices = make_shared(element::i16, indices_shape); - auto updates = make_shared(element::f32, updates_shape); - auto axis = make_shared(element::i16, axis_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto indices = make_shared(element::Type_t::i16, indices_shape); + auto updates = make_shared(element::Type_t::f32, updates_shape); + auto axis = make_shared(element::Type_t::i16, axis_shape); auto scatter = make_shared(data, indices, updates, axis); @@ -63,10 +63,10 @@ TEST(type_prop, scatter_elements_update_output_full_dyn_shape) PartialShape updates_shape = PartialShape::dynamic(); PartialShape axis_shape = PartialShape::dynamic(); - auto data = make_shared(element::f32, data_shape); - auto indices = make_shared(element::i16, indices_shape); - auto updates = make_shared(element::f32, updates_shape); - auto axis = make_shared(element::i16, axis_shape); + auto data = make_shared(element::Type_t::f32, data_shape); + auto indices = make_shared(element::Type_t::i16, indices_shape); + auto updates = make_shared(element::Type_t::f32, updates_shape); + auto axis = make_shared(element::Type_t::i16, axis_shape); auto scatter = make_shared(data, indices, updates, axis); @@ -80,10 +80,10 @@ TEST(type_prop, scatter_elements_update_axis_validation) Shape updates_shape{2, 2, 2, 2}; Shape axis_shape{}; - auto data = make_shared(element::f32, data_shape); - auto indices = make_shared(element::i16, indices_shape); - auto updates = make_shared(element::f32, updates_shape); - auto axis = make_shared(element::i16, axis_shape, std::vector{8}); + auto data = make_shared(element::Type_t::f32, data_shape); + auto indices = make_shared(element::Type_t::i16, indices_shape); + auto updates = make_shared(element::Type_t::f32, updates_shape); + auto axis = make_shared(element::Type_t::i16, axis_shape, std::vector{8}); try { @@ -107,10 +107,10 @@ TEST(type_prop, scatter_elements_updates_indices_shape) Shape updates_shape{2, 2, 2, 2}; Shape axis_shape{}; - auto data = make_shared(element::f32, data_shape); - auto indices = make_shared(element::i16, indices_shape); - auto updates = make_shared(element::f32, updates_shape); - auto axis = make_shared(element::i16, axis_shape, std::vector{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + auto indices = make_shared(element::Type_t::i16, indices_shape); + auto updates = make_shared(element::Type_t::f32, updates_shape); + auto axis = make_shared(element::Type_t::i16, axis_shape, std::vector{1}); try { @@ -135,10 +135,10 @@ TEST(type_prop, scatter_elements_updates_indices_rank) Shape updates_shape{2, 2, 2, 2}; Shape axis_shape{}; - auto data = make_shared(element::f32, data_shape); - auto indices = make_shared(element::i16, indices_shape); - auto updates = make_shared(element::f32, updates_shape); - auto axis = make_shared(element::i16, axis_shape, std::vector{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + auto indices = make_shared(element::Type_t::i16, indices_shape); + auto updates = make_shared(element::Type_t::f32, updates_shape); + auto axis = make_shared(element::Type_t::i16, axis_shape, std::vector{1}); try { @@ -163,10 +163,10 @@ TEST(type_prop, scatter_elements_data_indices_rank) Shape updates_shape{2, 2}; Shape axis_shape{}; - auto data = make_shared(element::f32, data_shape); - auto indices = make_shared(element::i16, indices_shape); - auto updates = make_shared(element::f32, updates_shape); - auto axis = make_shared(element::i16, axis_shape, std::vector{1}); + auto data = make_shared(element::Type_t::f32, data_shape); + auto indices = make_shared(element::Type_t::i16, indices_shape); + auto updates = make_shared(element::Type_t::f32, updates_shape); + auto axis = make_shared(element::Type_t::i16, axis_shape, std::vector{1}); try { diff --git a/ngraph/test/type_prop/scatter_nd_update.cpp b/ngraph/test/type_prop/scatter_nd_update.cpp index 06010fcb378..a00baaa2610 100644 --- a/ngraph/test/type_prop/scatter_nd_update.cpp +++ b/ngraph/test/type_prop/scatter_nd_update.cpp @@ -26,9 +26,9 @@ TEST(type_prop, scatter_nd_update_v3_fail_indices_element_type) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::f16, indices_shape); - auto U = make_shared(element::f32, updates_shape); + auto R = make_shared(element::Type_t::f32, ref_shape); + auto I = make_shared(element::Type_t::f16, indices_shape); + auto U = make_shared(element::Type_t::f32, updates_shape); try { auto G = make_shared(R, I, U); @@ -51,9 +51,9 @@ TEST(type_prop, scatter_nd_update_v3_fail_updates_rank) Shape indices_shape{1}; Shape updates_shape{3, 3, 3}; Shape out_shape{3, 3, 3}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::i32, indices_shape); - auto U = make_shared(element::f32, updates_shape); + auto R = make_shared(element::Type_t::f32, ref_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto U = make_shared(element::Type_t::f32, updates_shape); try { auto G = make_shared(R, I, U); @@ -78,9 +78,9 @@ TEST(type_prop, scatter_nd_update_fail_updates_element_type) Shape indices_shape{1}; Shape updates_shape{3, 3}; Shape out_shape{3, 3, 3}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::i32, indices_shape); - auto U = make_shared(element::i32, updates_shape); + auto R = make_shared(element::Type_t::f32, ref_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto U = make_shared(element::Type_t::i32, updates_shape); try { auto G = make_shared(R, I, U); @@ -104,9 +104,9 @@ TEST(type_prop, scatter_nd_update_fail_updates_shape) Shape indices_shape{1}; Shape updates_shape{2, 3}; Shape out_shape{3, 3, 3}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::i32, indices_shape); - auto U = make_shared(element::f32, updates_shape); + auto R = make_shared(element::Type_t::f32, ref_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto U = make_shared(element::Type_t::f32, updates_shape); try { auto G = make_shared(R, I, U); @@ -132,9 +132,9 @@ TEST(type_prop, scatter_nd_update_fail_indices_last_dim) Shape indices_shape{2, 4}; Shape updates_shape{2, 3, 3}; Shape out_shape{3, 3, 3}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::i32, indices_shape); - auto U = make_shared(element::f32, updates_shape); + auto R = make_shared(element::Type_t::f32, ref_shape); + auto I = make_shared(element::Type_t::i32, indices_shape); + auto U = make_shared(element::Type_t::f32, updates_shape); try { auto G = make_shared(R, I, U); diff --git a/ngraph/test/type_prop/scatter_update.cpp b/ngraph/test/type_prop/scatter_update.cpp index 4f113b22988..3135ab79b38 100644 --- a/ngraph/test/type_prop/scatter_update.cpp +++ b/ngraph/test/type_prop/scatter_update.cpp @@ -26,10 +26,10 @@ TEST(type_prop, scatter_update_v3_fail_indices_element_type) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::f16, indices_shape); - auto U = make_shared(element::f32, updates_shape); - auto A = op::Constant::create(element::i64, Shape{}, {1}); + auto R = make_shared(element::Type_t::f32, ref_shape); + auto I = make_shared(element::Type_t::f16, indices_shape); + auto U = make_shared(element::Type_t::f32, updates_shape); + auto A = op::Constant::create(element::Type_t::i64, Shape{}, {1}); try { auto G = make_shared(R, I, U, A); @@ -52,10 +52,10 @@ TEST(type_prop, scatter_update_v3_fail_updates_data_et_not_equal) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::f32, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::u32, updates_shape); - auto A = op::Constant::create(element::u32, Shape{1}, {1}); + auto R = make_shared(element::Type_t::f32, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::u32, updates_shape); + auto A = op::Constant::create(element::Type_t::u32, Shape{1}, {1}); try { auto G = make_shared(R, I, U, A); @@ -78,10 +78,10 @@ TEST(type_prop, scatter_update_v3_fail_axis_element_type) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::i16, ref_shape); - auto I = make_shared(element::u64, indices_shape); - auto U = make_shared(element::i16, updates_shape); - auto A = op::Constant::create(element::f32, Shape{1}, {1.5f}); + auto R = make_shared(element::Type_t::i16, ref_shape); + auto I = make_shared(element::Type_t::u64, indices_shape); + auto U = make_shared(element::Type_t::i16, updates_shape); + auto A = op::Constant::create(element::Type_t::f32, Shape{1}, {1.5f}); try { auto G = make_shared(R, I, U, A); @@ -104,10 +104,10 @@ TEST(type_prop, scatter_update_v3_fail_axis_shape) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::u8, ref_shape); - auto I = make_shared(element::u16, indices_shape); - auto U = make_shared(element::u8, updates_shape); - auto A = op::Constant::create(element::u8, Shape{2}, {1, 5}); + auto R = make_shared(element::Type_t::u8, ref_shape); + auto I = make_shared(element::Type_t::u16, indices_shape); + auto U = make_shared(element::Type_t::u8, updates_shape); + auto A = op::Constant::create(element::Type_t::u8, Shape{2}, {1, 5}); try { auto G = make_shared(R, I, U, A); @@ -130,10 +130,10 @@ TEST(type_prop, scatter_update_v3_fail_updates_rank) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 1, 4}; - auto R = make_shared(element::f64, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::f64, updates_shape); - auto A = op::Constant::create(element::u8, Shape{}, {0}); + auto R = make_shared(element::Type_t::f64, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::f64, updates_shape); + auto A = op::Constant::create(element::Type_t::u8, Shape{}, {0}); try { auto G = make_shared(R, I, U, A); @@ -157,10 +157,10 @@ TEST(type_prop, scatter_update_v3_fail_updates_shape_axis) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::u64, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::u64, updates_shape); - auto A = op::Constant::create(element::u16, Shape{}, {0}); + auto R = make_shared(element::Type_t::u64, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::u64, updates_shape); + auto A = op::Constant::create(element::Type_t::u16, Shape{}, {0}); try { auto G = make_shared(R, I, U, A); @@ -185,10 +185,10 @@ TEST(type_prop, scatter_update_v3_fail_updates_shape_indices) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 3, 1, 4}; - auto R = make_shared(element::u32, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::u32, updates_shape); - auto A = op::Constant::create(element::i32, Shape{}, {1}); + auto R = make_shared(element::Type_t::u32, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::u32, updates_shape); + auto A = op::Constant::create(element::Type_t::i32, Shape{}, {1}); try { auto G = make_shared(R, I, U, A); @@ -213,10 +213,10 @@ TEST(type_prop, scatter_update_v3_fail_updates_shape_data_before_axis) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{3, 2, 1, 4}; - auto R = make_shared(element::u16, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::u16, updates_shape); - auto A = op::Constant::create(element::i8, Shape{}, {1}); + auto R = make_shared(element::Type_t::u16, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::u16, updates_shape); + auto A = op::Constant::create(element::Type_t::i8, Shape{}, {1}); try { auto G = make_shared(R, I, U, A); @@ -241,10 +241,10 @@ TEST(type_prop, scatter_update_v3_fail_updates_shape_data_after_axis) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 5}; - auto R = make_shared(element::i8, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::i8, updates_shape); - auto A = op::Constant::create(element::i16, Shape{}, {1}); + auto R = make_shared(element::Type_t::i8, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::i8, updates_shape); + auto A = op::Constant::create(element::Type_t::i16, Shape{}, {1}); try { auto G = make_shared(R, I, U, A); @@ -269,13 +269,13 @@ TEST(type_prop, scatter_update_v3) Shape ref_shape{2, 3, 4}; Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::i8, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::i8, updates_shape); - auto A = op::Constant::create(element::i16, Shape{}, {1}); + auto R = make_shared(element::Type_t::i8, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::i8, updates_shape); + auto A = op::Constant::create(element::Type_t::i16, Shape{}, {1}); auto scatter_update = make_shared(R, I, U, A); - EXPECT_EQ(scatter_update->get_output_element_type(0), element::i8); + EXPECT_EQ(scatter_update->get_output_element_type(0), element::Type_t::i8); EXPECT_EQ(scatter_update->get_output_shape(0), ref_shape); } @@ -284,12 +284,12 @@ TEST(type_prop, scatter_update_v3_dynamic_data_shape) PartialShape ref_shape = PartialShape::dynamic(); Shape indices_shape{2, 1}; Shape updates_shape{2, 2, 1, 4}; - auto R = make_shared(element::i8, ref_shape); - auto I = make_shared(element::i16, indices_shape); - auto U = make_shared(element::i8, updates_shape); - auto A = op::Constant::create(element::i16, Shape{}, {1}); + auto R = make_shared(element::Type_t::i8, ref_shape); + auto I = make_shared(element::Type_t::i16, indices_shape); + auto U = make_shared(element::Type_t::i8, updates_shape); + auto A = op::Constant::create(element::Type_t::i16, Shape{}, {1}); auto scatter_update = make_shared(R, I, U, A); - EXPECT_EQ(scatter_update->get_output_element_type(0), element::i8); + EXPECT_EQ(scatter_update->get_output_element_type(0), element::Type_t::i8); EXPECT_TRUE(scatter_update->get_output_partial_shape(0).is_dynamic()); } diff --git a/ngraph/test/type_prop/select.cpp b/ngraph/test/type_prop/select.cpp index e70cff09043..c98f2e6dc71 100644 --- a/ngraph/test/type_prop/select.cpp +++ b/ngraph/test/type_prop/select.cpp @@ -25,19 +25,19 @@ using namespace ngraph; TEST(type_prop, select_deduce) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::f32, Shape{2, 4}); auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); - ASSERT_EQ(bc->get_element_type(), element::f32); + ASSERT_EQ(bc->get_element_type(), element::Type_t::f32); ASSERT_EQ(bc->get_shape(), (Shape{2, 4})); } TEST(type_prop, select_shape_mismatch_a) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{3, 5}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::boolean, Shape{3, 5}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::f32, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); @@ -56,9 +56,9 @@ TEST(type_prop, select_shape_mismatch_a) TEST(type_prop, select_shape_mismatch_b) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{3, 5}); - auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{3, 5}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::f32, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); @@ -77,9 +77,9 @@ TEST(type_prop, select_shape_mismatch_b) TEST(type_prop, select_shape_mismatch_c) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::f32, Shape{3, 5}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::f32, Shape{3, 5}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); @@ -98,9 +98,9 @@ TEST(type_prop, select_shape_mismatch_c) TEST(type_prop, select_elem_mismatch_a) { - auto tv0_2_4_param_0 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::f32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::f32, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); @@ -120,9 +120,9 @@ TEST(type_prop, select_elem_mismatch_a) TEST(type_prop, select_elem_mismatch_bc) { - auto tv0_2_4_param_0 = make_shared(element::boolean, Shape{2, 4}); - auto tv0_2_4_param_1 = make_shared(element::f32, Shape{2, 4}); - auto tv0_2_4_param_2 = make_shared(element::i32, Shape{2, 4}); + auto tv0_2_4_param_0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto tv0_2_4_param_1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto tv0_2_4_param_2 = make_shared(element::Type_t::i32, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param_0, tv0_2_4_param_1, tv0_2_4_param_2); @@ -142,21 +142,21 @@ TEST(type_prop, select_elem_mismatch_bc) TEST(type_prop, select_partial_all_rank_dynamic) { - auto param0 = make_shared(element::boolean, PartialShape::dynamic()); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::boolean, PartialShape::dynamic()); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::f32); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(sel->get_output_partial_shape(0).rank().is_dynamic()); } TEST(type_prop, select_partial_all_rank_dynamic_arg0_et_dynamic_arg1_arg2_et_mismatch) { - auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = make_shared(element::i32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::i32, PartialShape::dynamic()); try { @@ -177,78 +177,78 @@ TEST(type_prop, select_partial_all_rank_dynamic_arg0_et_dynamic_arg1_arg2_et_mis TEST(type_prop, select_partial_all_rank_dynamic_arg0_arg1_et_dynamic) { - auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param1 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param2 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); + auto param1 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::f32); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(sel->get_output_partial_shape(0).rank().is_dynamic()); } TEST(type_prop, select_partial_all_rank_dynamic_arg0_arg2_et_dynamic) { - auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = make_shared(element::dynamic, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::f32); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(sel->get_output_partial_shape(0).rank().is_dynamic()); } TEST(type_prop, select_partial_all_rank_dynamic_arg0_arg1_arg2_et_dynamic) { - auto param0 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param1 = make_shared(element::dynamic, PartialShape::dynamic()); - auto param2 = make_shared(element::dynamic, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); + auto param1 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::dynamic, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::dynamic); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::dynamic); ASSERT_TRUE(sel->get_output_partial_shape(0).rank().is_dynamic()); } TEST(type_prop, select_partial_arg0_rank_dynamic_static_arg1_arg2_rank_dynamic_ok) { - auto param0 = - make_shared(element::boolean, PartialShape{2, Dimension::dynamic(), 3}); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); - auto param2 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::boolean, + PartialShape{2, Dimension::dynamic(), 3}); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto param2 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::f32); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE( sel->get_output_partial_shape(0).same_scheme(PartialShape{2, Dimension::dynamic(), 3})); } TEST(type_prop, select_partial_arg1_rank_dynamic_static_arg0_arg2_rank_dynamic_ok) { - auto param0 = make_shared(element::boolean, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::boolean, PartialShape::dynamic()); auto param1 = - make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); - auto param2 = make_shared(element::f32, PartialShape::dynamic()); + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); + auto param2 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::f32); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE( sel->get_output_partial_shape(0).same_scheme(PartialShape{2, Dimension::dynamic(), 3})); } TEST(type_prop, select_partial_arg2_rank_dynamic_static_arg0_arg1_rank_dynamic_ok) { - auto param0 = make_shared(element::boolean, PartialShape::dynamic()); - auto param1 = make_shared(element::f32, PartialShape::dynamic()); + auto param0 = make_shared(element::Type_t::boolean, PartialShape::dynamic()); + auto param1 = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto param2 = - make_shared(element::f32, PartialShape{2, Dimension::dynamic(), 3}); + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic(), 3}); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::f32); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE( sel->get_output_partial_shape(0).same_scheme(PartialShape{2, Dimension::dynamic(), 3})); } @@ -256,15 +256,15 @@ TEST(type_prop, select_partial_arg2_rank_dynamic_static_arg0_arg1_rank_dynamic_o TEST(type_prop, select_partial_all_rank_static_dynamic_ok) { auto param0 = make_shared( - element::boolean, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); + element::Type_t::boolean, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); auto param1 = make_shared( - element::f32, PartialShape{Dimension::dynamic(), 8, Dimension::dynamic()}); + element::Type_t::f32, PartialShape{Dimension::dynamic(), 8, Dimension::dynamic()}); auto param2 = make_shared( - element::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3}); + element::Type_t::f32, PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3}); auto sel = make_shared(param0, param1, param2); - ASSERT_EQ(sel->get_output_element_type(0), element::f32); + ASSERT_EQ(sel->get_output_element_type(0), element::Type_t::f32); ASSERT_TRUE(sel->get_output_partial_shape(0).is_static()); ASSERT_EQ(sel->get_output_shape(0), (Shape{2, 8, 3})); } @@ -272,11 +272,11 @@ TEST(type_prop, select_partial_all_rank_static_dynamic_ok) TEST(type_prop, select_partial_all_rank_static_intransitive_incompatibility) { auto param0 = make_shared( - element::boolean, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); + element::Type_t::boolean, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); auto param1 = make_shared( - element::f32, PartialShape{Dimension::dynamic(), 8, Dimension::dynamic()}); + element::Type_t::f32, PartialShape{Dimension::dynamic(), 8, Dimension::dynamic()}); auto param2 = - make_shared(element::f32, PartialShape{3, Dimension::dynamic(), 3}); + make_shared(element::Type_t::f32, PartialShape{3, Dimension::dynamic(), 3}); try { @@ -331,43 +331,71 @@ TEST_P(DeduceV1SelectTest, output_shape) INSTANTIATE_TEST_CASE_P( type_prop, DeduceV1SelectTest, - ::testing::Values(SelectParams({{2, 4}, {2, 4}, {2, 4}, {2, 4}}, - {element::boolean, element::f32, element::f32, element::f32}, - op::AutoBroadcastType::NONE), - SelectParams({{2, 4}, {2, 4}, {2, 4}, {2, 4}}, - {element::boolean, element::f32, element::f32, element::f32}, - op::AutoBroadcastType::NUMPY), - SelectParams({{}, {2, 4}, {2, 4}, {2, 4}}, - {element::boolean, element::f32, element::f32, element::f32}, - op::AutoBroadcastType::NUMPY), - SelectParams({{}, {4}, {2, 4}, {2, 4}}, - {element::boolean, element::f32, element::dynamic, element::f32}, - op::AutoBroadcastType::NUMPY), - SelectParams({{}, {2, 4}, {4}, {2, 4}}, - {element::boolean, element::f32, element::f32, element::f32}, - op::AutoBroadcastType::NUMPY), - SelectParams({{4}, {2, 4}, {4}, {2, 4}}, - {element::boolean, element::i8, element::dynamic, element::i8}, - op::AutoBroadcastType::NUMPY), - SelectParams({{4}, {4}, {2, 4}, {2, 4}}, - {element::dynamic, element::dynamic, element::i8, element::i8}, - op::AutoBroadcastType::NUMPY), - SelectParams({{2}, {2}, {2, 4}, {2, 4}}, - {element::boolean, element::f32, element::dynamic, element::f32}, - {op::AutoBroadcastType::PDPD, 0}), - // TODO: Whats the right behavior here? - // SelectParams({{2}, {2, 4}, {2}, {2, 4}}, {element::boolean, element::f32, - // element::dynamic, element::f32}, {op::AutoBroadcastType::PDPD, 0}), - SelectParams({{4}, {4}, {2, 4}, {2, 4}}, - {element::boolean, element::f32, element::dynamic, element::f32}, - {op::AutoBroadcastType::PDPD, 1})), + ::testing::Values( + SelectParams({{2, 4}, {2, 4}, {2, 4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::f32, + element::Type_t::f32, + element::Type_t::f32}, + op::AutoBroadcastType::NONE), + SelectParams({{2, 4}, {2, 4}, {2, 4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::f32, + element::Type_t::f32, + element::Type_t::f32}, + op::AutoBroadcastType::NUMPY), + SelectParams({{}, {2, 4}, {2, 4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::f32, + element::Type_t::f32, + element::Type_t::f32}, + op::AutoBroadcastType::NUMPY), + SelectParams({{}, {4}, {2, 4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::f32, + element::Type_t::dynamic, + element::Type_t::f32}, + op::AutoBroadcastType::NUMPY), + SelectParams({{}, {2, 4}, {4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::f32, + element::Type_t::f32, + element::Type_t::f32}, + op::AutoBroadcastType::NUMPY), + SelectParams({{4}, {2, 4}, {4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::i8, + element::Type_t::dynamic, + element::Type_t::i8}, + op::AutoBroadcastType::NUMPY), + SelectParams({{4}, {4}, {2, 4}, {2, 4}}, + {element::Type_t::dynamic, + element::Type_t::dynamic, + element::Type_t::i8, + element::Type_t::i8}, + op::AutoBroadcastType::NUMPY), + SelectParams({{2}, {2}, {2, 4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::f32, + element::Type_t::dynamic, + element::Type_t::f32}, + {op::AutoBroadcastType::PDPD, 0}), + // TODO: Whats the right behavior here? + // SelectParams({{2}, {2, 4}, {2}, {2, 4}}, {element::Type_t::boolean, element::Type_t::f32, + // element::Type_t::dynamic, element::Type_t::f32}, {op::AutoBroadcastType::PDPD, 0}), + SelectParams({{4}, {4}, {2, 4}, {2, 4}}, + {element::Type_t::boolean, + element::Type_t::f32, + element::Type_t::dynamic, + element::Type_t::f32}, + {op::AutoBroadcastType::PDPD, 1})), PrintToDummyParamName()); TEST(type_prop, select_v1_partial_shape) { - auto a = make_shared(element::boolean, PartialShape::dynamic()); - auto b = make_shared(element::f32, Shape{2, 4}); - auto c = make_shared(element::f32, Shape{2, 4}); + auto a = make_shared(element::Type_t::boolean, PartialShape::dynamic()); + auto b = make_shared(element::Type_t::f32, Shape{2, 4}); + auto c = make_shared(element::Type_t::f32, Shape{2, 4}); auto select = make_shared(a, b, c, op::AutoBroadcastType::NONE); ASSERT_EQ(select->get_shape(), (Shape{2, 4})); @@ -375,9 +403,11 @@ TEST(type_prop, select_v1_partial_shape) TEST(type_prop, select_v1_partial_shape_autob) { - auto a = make_shared(element::boolean, PartialShape{Dimension::dynamic()}); - auto b = make_shared(element::f32, PartialShape{Dimension::dynamic()}); - auto c = make_shared(element::f32, PartialShape{2, Dimension::dynamic()}); + auto a = + make_shared(element::Type_t::boolean, PartialShape{Dimension::dynamic()}); + auto b = make_shared(element::Type_t::f32, PartialShape{Dimension::dynamic()}); + auto c = + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic()}); auto select = make_shared(a, b, c); ASSERT_TRUE( @@ -386,9 +416,9 @@ TEST(type_prop, select_v1_partial_shape_autob) TEST(type_prop, select_v1_wrong_et) { - auto param0 = make_shared(element::i8, Shape{2, 4}); - auto param1 = make_shared(element::f32, Shape{2, 4}); - auto param2 = make_shared(element::f32, Shape{2, 4}); + auto param0 = make_shared(element::Type_t::i8, Shape{2, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 4}); try { @@ -408,9 +438,9 @@ TEST(type_prop, select_v1_wrong_et) TEST(type_prop, select_v1_et_mismatch) { - auto param0 = make_shared(element::boolean, Shape{2, 4}); - auto param1 = make_shared(element::f32, Shape{2, 4}); - auto param2 = make_shared(element::i8, Shape{2, 4}); + auto param0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 4}); + auto param2 = make_shared(element::Type_t::i8, Shape{2, 4}); try { @@ -430,9 +460,9 @@ TEST(type_prop, select_v1_et_mismatch) TEST(type_prop, select_v1_shape_mismatch) { - auto param0 = make_shared(element::boolean, Shape{2, 4}); - auto param1 = make_shared(element::f32, Shape{2, 3}); - auto param2 = make_shared(element::f32, Shape{2, 4}); + auto param0 = make_shared(element::Type_t::boolean, Shape{2, 4}); + auto param1 = make_shared(element::Type_t::f32, Shape{2, 3}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 4}); try { @@ -452,9 +482,10 @@ TEST(type_prop, select_v1_shape_mismatch) TEST(type_prop, select_v1_partial_shape_mismatch) { auto param0 = - make_shared(element::boolean, PartialShape{3, Dimension::dynamic()}); - auto param1 = make_shared(element::f32, PartialShape{2, Dimension::dynamic()}); - auto param2 = make_shared(element::f32, Shape{2, 4}); + make_shared(element::Type_t::boolean, PartialShape{3, Dimension::dynamic()}); + auto param1 = + make_shared(element::Type_t::f32, PartialShape{2, Dimension::dynamic()}); + auto param2 = make_shared(element::Type_t::f32, Shape{2, 4}); try { diff --git a/ngraph/test/type_prop/shape_of.cpp b/ngraph/test/type_prop/shape_of.cpp index 812b9771a22..9ea09f6cc28 100644 --- a/ngraph/test/type_prop/shape_of.cpp +++ b/ngraph/test/type_prop/shape_of.cpp @@ -23,85 +23,85 @@ using namespace ngraph; TEST(type_prop, shape_of_v0) { - auto a = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto a = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_EQ(so->get_shape(), Shape{4}); } TEST(type_prop, shape_of_partial_et_dynamic_v0) { - auto a = make_shared(element::dynamic, Shape{1, 2, 3, 4}); + auto a = make_shared(element::Type_t::dynamic, Shape{1, 2, 3, 4}); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_EQ(so->get_shape(), Shape{4}); } TEST(type_prop, shape_of_partial_rank_static_dynamic_v0) { auto a = make_shared( - element::f32, PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), 4}); + element::Type_t::f32, PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), 4}); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_EQ(so->get_shape(), Shape{4}); } TEST(type_prop, shape_of_partial_rank_dynamic_v0) { - auto a = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_TRUE(so->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, shape_of_v3) { - auto a = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto a = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_EQ(so->get_shape(), Shape{4}); } TEST(type_prop, shape_of_partial_et_dynamic_v3) { - auto a = make_shared(element::dynamic, Shape{1, 2, 3, 4}); + auto a = make_shared(element::Type_t::dynamic, Shape{1, 2, 3, 4}); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_EQ(so->get_shape(), Shape{4}); } TEST(type_prop, shape_of_partial_rank_static_dynamic_v3) { auto a = make_shared( - element::f32, PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), 4}); + element::Type_t::f32, PartialShape{1, Dimension::dynamic(), Dimension::dynamic(), 4}); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_EQ(so->get_shape(), Shape{4}); } TEST(type_prop, shape_of_partial_rank_dynamic_v3) { - auto a = make_shared(element::f32, PartialShape::dynamic()); + auto a = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto so = make_shared(a); - ASSERT_EQ(so->get_output_element_type(0), element::i64); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i64); ASSERT_TRUE(so->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(1))); } TEST(type_prop, shape_of_output_type_v3) { - auto a = make_shared(element::f32, Shape{1, 2, 3, 4}); - auto so = make_shared(a, element::i32); + auto a = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); + auto so = make_shared(a, element::Type_t::i32); try { - auto sx = make_shared(a, element::i8); + auto sx = make_shared(a, element::Type_t::i8); FAIL() << "Invalid output_type not detected"; } catch (NodeValidationFailure) @@ -113,7 +113,7 @@ TEST(type_prop, shape_of_output_type_v3) } try { - auto sx = make_shared(a, element::i16); + auto sx = make_shared(a, element::Type_t::i16); FAIL() << "Invalid output_type not detected"; } catch (NodeValidationFailure) @@ -125,7 +125,7 @@ TEST(type_prop, shape_of_output_type_v3) } try { - auto sx = make_shared(a, element::f32); + auto sx = make_shared(a, element::Type_t::f32); FAIL() << "Invalid output_type not detected"; } catch (NodeValidationFailure) @@ -136,6 +136,6 @@ TEST(type_prop, shape_of_output_type_v3) FAIL() << "Node validation error not thrown"; } - ASSERT_EQ(so->get_output_element_type(0), element::i32); + ASSERT_EQ(so->get_output_element_type(0), element::Type_t::i32); ASSERT_EQ(so->get_shape(), Shape{4}); } diff --git a/ngraph/test/type_prop/shuffle_channels.cpp b/ngraph/test/type_prop/shuffle_channels.cpp index abef93c472b..ced139cea25 100644 --- a/ngraph/test/type_prop/shuffle_channels.cpp +++ b/ngraph/test/type_prop/shuffle_channels.cpp @@ -25,7 +25,7 @@ TEST(type_prop, shuffle_channels_axis_validation) { try { - const auto data = make_shared(element::f64, Shape{1, 2, 3, 4}); + const auto data = make_shared(element::Type_t::f64, Shape{1, 2, 3, 4}); const auto shuffle_channels = make_shared(data, -5, 5); FAIL() << "ShuffleChannels validation did not work. Op node was created with incorrect " "params."; @@ -40,7 +40,7 @@ TEST(type_prop, shuffle_channels_axis_validation) TEST(type_prop, shuffle_channels_negative_axis_calculation) { - const auto data = make_shared(element::f64, Shape{1, 2, 3, 4}); + const auto data = make_shared(element::Type_t::f64, Shape{1, 2, 3, 4}); const auto shuffle_channels = make_shared(data, -3, 2); @@ -51,7 +51,7 @@ TEST(type_prop, shuffle_channels_invalid_input_shape) { try { - const auto data = make_shared(element::f64, Shape{}); + const auto data = make_shared(element::Type_t::f64, Shape{}); const auto shuffle_channels = make_shared(data, 0, 1); FAIL() << "ShuffleChannels validation did not work. Op node was created with incorrect " "params."; @@ -67,7 +67,7 @@ TEST(type_prop, shuffle_channels_invalid_groups_value) { try { - const auto data = make_shared(element::f64, Shape{1, 2, 3, 15}); + const auto data = make_shared(element::Type_t::f64, Shape{1, 2, 3, 15}); const auto shuffle_channels = make_shared(data, -1, 2); FAIL() << "ShuffleChannels validation did not work. Op node was created with incorrect " "params."; diff --git a/ngraph/test/type_prop/softmax.cpp b/ngraph/test/type_prop/softmax.cpp index e76761f0618..728cb5a1b45 100644 --- a/ngraph/test/type_prop/softmax.cpp +++ b/ngraph/test/type_prop/softmax.cpp @@ -23,7 +23,7 @@ using namespace ngraph; TEST(type_prop, softmax_default_axis) { const Shape arg_shape{2, 3}; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); auto sm = make_shared(arg); ASSERT_EQ(sm->get_axis(), 1); } @@ -31,7 +31,7 @@ TEST(type_prop, softmax_default_axis) TEST(type_prop, softmax_out_of_bound_axis) { const Shape arg_shape{2, 3}; - auto arg = make_shared(element::f32, arg_shape); + auto arg = make_shared(element::Type_t::f32, arg_shape); // axis cannot be a negative number ASSERT_THROW(make_shared(arg, -1), ngraph::NodeValidationFailure); } diff --git a/ngraph/test/type_prop/softplus.cpp b/ngraph/test/type_prop/softplus.cpp index 7e40369209b..918f05d993c 100644 --- a/ngraph/test/type_prop/softplus.cpp +++ b/ngraph/test/type_prop/softplus.cpp @@ -23,31 +23,33 @@ using namespace ngraph; TEST(type_prop, softplus) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto softplus_func = make_shared(data); - EXPECT_EQ(softplus_func->get_element_type(), element::f32); + EXPECT_EQ(softplus_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(softplus_func->get_shape(), (Shape{1, 3, 6})); } TEST(type_prop, softplus_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto softplus_func = make_shared(data); - EXPECT_EQ(softplus_func->get_element_type(), element::f32); + EXPECT_EQ(softplus_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(softplus_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); // rank unknown auto softplus_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic())); + make_shared(element::Type_t::f32, PartialShape::dynamic())); ASSERT_TRUE(softplus_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, softplus_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto softplus_func = make_shared(data); - EXPECT_EQ(softplus_func->get_element_type(), element::f32); + EXPECT_EQ(softplus_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(softplus_func->get_output_partial_shape(0).same_scheme( (PartialShape{1, Dimension::dynamic(), 6}))); ASSERT_TRUE(softplus_func->get_output_partial_shape(0).rank().is_static()); diff --git a/ngraph/test/type_prop/space_to_batch.cpp b/ngraph/test/type_prop/space_to_batch.cpp index cd40078a014..2367ff250bb 100644 --- a/ngraph/test/type_prop/space_to_batch.cpp +++ b/ngraph/test/type_prop/space_to_batch.cpp @@ -23,70 +23,75 @@ using namespace ngraph; TEST(type_prop, space_to_batch_output_shape_2D) { - auto data = make_shared(element::f32, Shape{2, 128}); - auto block_shape = make_shared(element::i64, Shape{2}, vector{1, 5}); - auto pads_begin = make_shared(element::i64, Shape{2}, vector{0, 2}); - auto pads_end = make_shared(element::i64, Shape{2}, vector{0, 0}); + auto data = make_shared(element::Type_t::f32, Shape{2, 128}); + auto block_shape = + make_shared(element::Type_t::i64, Shape{2}, vector{1, 5}); + auto pads_begin = + make_shared(element::Type_t::i64, Shape{2}, vector{0, 2}); + auto pads_end = + make_shared(element::Type_t::i64, Shape{2}, vector{0, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(space_to_batch->get_element_type(), element::f32); + ASSERT_EQ(space_to_batch->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_batch->get_shape(), (Shape{2 * 5, (128 + 2) / 5})); } TEST(type_prop, space_to_batch_output_shape_4D) { - auto data = make_shared(element::f32, Shape{2, 64, 64, 3}); + auto data = make_shared(element::Type_t::f32, Shape{2, 64, 64, 3}); auto block_shape = - make_shared(element::i64, Shape{4}, vector{1, 10, 5, 1}); + make_shared(element::Type_t::i64, Shape{4}, vector{1, 10, 5, 1}); auto pads_begin = - make_shared(element::i64, Shape{4}, vector{0, 3, 1, 0}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 3, 0, 0}); + make_shared(element::Type_t::i64, Shape{4}, vector{0, 3, 1, 0}); + auto pads_end = + make_shared(element::Type_t::i64, Shape{4}, vector{0, 3, 0, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(space_to_batch->get_element_type(), element::f32); + ASSERT_EQ(space_to_batch->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_batch->get_shape(), (Shape{2 * 10 * 5, (64 + 3 + 3) / 10, (64 + 1) / 5, 3})); } TEST(type_prop, space_to_batch_output_shape_5D) { - auto data = make_shared(element::f32, Shape{2, 32, 64, 128, 256}); + auto data = make_shared(element::Type_t::f32, Shape{2, 32, 64, 128, 256}); auto block_shape = - make_shared(element::i32, Shape{5}, vector{1, 6, 5, 1, 16}); + make_shared(element::Type_t::i32, Shape{5}, vector{1, 6, 5, 1, 16}); auto pads_begin = - make_shared(element::i32, Shape{5}, vector{0, 2, 0, 0, 0}); + make_shared(element::Type_t::i32, Shape{5}, vector{0, 2, 0, 0, 0}); auto pads_end = - make_shared(element::i32, Shape{5}, vector{0, 2, 1, 0, 0}); + make_shared(element::Type_t::i32, Shape{5}, vector{0, 2, 1, 0, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(space_to_batch->get_element_type(), element::f32); + ASSERT_EQ(space_to_batch->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_batch->get_shape(), (Shape{2 * 6 * 5 * 16, (32 + 2 + 2) / 6, (64 + 1) / 5, 128, 256 / 16})); } TEST(type_prop, space_to_batch_and_batch_to_space) { - auto data = make_shared(element::f32, Shape{2, 100, 1024, 3}); + auto data = make_shared(element::Type_t::f32, Shape{2, 100, 1024, 3}); auto block_shape = - make_shared(element::i64, Shape{4}, vector{1, 12, 100, 2}); + make_shared(element::Type_t::i64, Shape{4}, vector{1, 12, 100, 2}); auto pads_begin = - make_shared(element::i64, Shape{4}, vector{0, 3, 38, 1}); - auto pads_end = make_shared(element::i64, Shape{4}, vector{0, 5, 38, 0}); + make_shared(element::Type_t::i64, Shape{4}, vector{0, 3, 38, 1}); + auto pads_end = + make_shared(element::Type_t::i64, Shape{4}, vector{0, 5, 38, 0}); auto space_to_batch = make_shared(data, block_shape, pads_begin, pads_end); - ASSERT_EQ(space_to_batch->get_element_type(), element::f32); + ASSERT_EQ(space_to_batch->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_batch->get_shape(), (Shape{2 * 12 * 100 * 2, (100 + 3 + 5) / 12, (1024 + 38 + 38) / 100, (3 + 1) / 2})); auto batch_to_space = make_shared(space_to_batch, block_shape, pads_begin, pads_end); - ASSERT_EQ(batch_to_space->get_element_type(), element::f32); + ASSERT_EQ(batch_to_space->get_element_type(), element::Type_t::f32); ASSERT_EQ(batch_to_space->get_shape(), (Shape{2, 100, 1024, 3})); } diff --git a/ngraph/test/type_prop/space_to_depth.cpp b/ngraph/test/type_prop/space_to_depth.cpp index 9c0ded0a64b..6055fcd16d5 100644 --- a/ngraph/test/type_prop/space_to_depth.cpp +++ b/ngraph/test/type_prop/space_to_depth.cpp @@ -23,47 +23,47 @@ using namespace ngraph; TEST(type_prop, space_to_depth_output_shape_block_first_4D) { - auto A = make_shared(element::f32, Shape{1, 2, 64, 64}); + auto A = make_shared(element::Type_t::f32, Shape{1, 2, 64, 64}); const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; auto space_to_depth = make_shared(A, mode, 8); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 128, 8, 8})); } TEST(type_prop, space_to_depth_output_shape_block_first_4D_2) { - auto A = make_shared(element::f32, Shape{1, 12, 1080, 1616}); + auto A = make_shared(element::Type_t::f32, Shape{1, 12, 1080, 1616}); const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::BLOCKS_FIRST; auto space_to_depth = make_shared(A, mode, 2); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 12 * 4, 1080 / 2, 1616 / 2})); } TEST(type_prop, space_to_depth_output_shape_depth_first_4D) { - auto A = make_shared(element::f32, Shape{1, 12, 1080, 1616}); + auto A = make_shared(element::Type_t::f32, Shape{1, 12, 1080, 1616}); const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; auto space_to_depth = make_shared(A, mode, 2); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 12 * 4, 1080 / 2, 1616 / 2})); } TEST(type_prop, space_to_depth_output_shape_depth_first_5D) { - auto A = make_shared(element::f32, Shape{1, 12, 4, 1080, 1616}); + auto A = make_shared(element::Type_t::f32, Shape{1, 12, 4, 1080, 1616}); const auto mode = ngraph::op::SpaceToDepth::SpaceToDepthMode::DEPTH_FIRST; auto space_to_depth = make_shared(A, mode, 2); - ASSERT_EQ(space_to_depth->get_element_type(), element::f32); + ASSERT_EQ(space_to_depth->get_element_type(), element::Type_t::f32); ASSERT_EQ(space_to_depth->get_shape(), (Shape{1, 12 * 8, 4 / 2, 1080 / 2, 1616 / 2})); } TEST(type_prop, space_to_depth_input_rank_not_supported) { - auto A = make_shared(element::f32, Shape{1, 8}); + auto A = make_shared(element::Type_t::f32, Shape{1, 8}); try { auto space_to_depth = @@ -84,7 +84,7 @@ TEST(type_prop, space_to_depth_input_rank_not_supported) TEST(type_prop, space_to_depth_blocksize_not_matched) { - auto A = make_shared(element::f32, Shape{1, 3, 8, 7}); + auto A = make_shared(element::Type_t::f32, Shape{1, 3, 8, 7}); try { auto space_to_depth = diff --git a/ngraph/test/type_prop/split.cpp b/ngraph/test/type_prop/split.cpp index 8abbe593dca..0fffd7f9666 100644 --- a/ngraph/test/type_prop/split.cpp +++ b/ngraph/test/type_prop/split.cpp @@ -25,11 +25,11 @@ using namespace ngraph; TEST(type_prop, split) { - const auto data = make_shared(element::i32, Shape{2, 6}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); try { - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto split = make_shared(data, axis, 7); FAIL() << "Split node was created with incorrect data."; } @@ -43,7 +43,7 @@ TEST(type_prop, split) try { - const auto axis = op::Constant::create(element::i64, Shape{}, {-5}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {-5}); const auto split = make_shared(data, axis, 4); // invalid axis FAIL() << "Split node was created with incorrect data."; } @@ -52,19 +52,19 @@ TEST(type_prop, split) EXPECT_HAS_SUBSTRING(error.what(), std::string("Parameter axis -5 out of the tensor rank")); } - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); const auto split = make_shared(data, axis, 2); EXPECT_EQ(split->outputs().size(), 2); EXPECT_EQ(split->get_output_shape(0), (Shape{2, 3})); EXPECT_EQ(split->get_output_shape(1), (Shape{2, 3})); - EXPECT_EQ(split->get_output_element_type(0), element::i32); - EXPECT_EQ(split->get_output_element_type(1), element::i32); + EXPECT_EQ(split->get_output_element_type(0), element::Type_t::i32); + EXPECT_EQ(split->get_output_element_type(1), element::Type_t::i32); } TEST(type_prop, split_axis_must_be_scalar) { - const auto data = make_shared(element::i32, Shape{2, 6}); - const auto axis = op::Constant::create(element::i64, Shape{2}, {0, 1}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{2}, {0, 1}); try { @@ -84,15 +84,15 @@ TEST(type_prop, split_axis_must_be_scalar) TEST(type_prop, split_v1) { - const auto data = make_shared(element::f16, Shape{2, 3, 4}); - const auto axis = op::Constant::create(element::i64, {}, {1}); + const auto data = make_shared(element::Type_t::f16, Shape{2, 3, 4}); + const auto axis = op::Constant::create(element::Type_t::i64, {}, {1}); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); EXPECT_EQ(split->outputs().size(), num_splits); for (int i = 0; i < num_splits; ++i) { - EXPECT_EQ(split->get_output_element_type(i), element::f16); + EXPECT_EQ(split->get_output_element_type(i), element::Type_t::f16); EXPECT_EQ(split->get_output_shape(i), (Shape{2, 1, 4})); } } @@ -100,8 +100,8 @@ TEST(type_prop, split_v1) TEST(type_prop, split_v1_axis_const_data_axis_dim_known) { const auto data = - make_shared(element::f32, PartialShape{2, 3, Dimension::dynamic()}); - const auto axis = op::Constant::create(element::i32, {}, {1}); + make_shared(element::Type_t::f32, PartialShape{2, 3, Dimension::dynamic()}); + const auto axis = op::Constant::create(element::Type_t::i32, {}, {1}); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -115,8 +115,8 @@ TEST(type_prop, split_v1_axis_const_data_axis_dim_known) TEST(type_prop, split_v1_axis_const_only_data_axis_dim_known) { const auto data = make_shared( - element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); - const auto axis = op::Constant::create(element::i16, {}, {0}); + element::Type_t::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic()}); + const auto axis = op::Constant::create(element::Type_t::i16, {}, {0}); const size_t num_splits = 2; const auto split = make_shared(data, axis, num_splits); @@ -130,9 +130,9 @@ TEST(type_prop, split_v1_axis_const_only_data_axis_dim_known) TEST(type_prop, split_v1_axis_const_data_axis_dim_unknown) { - const auto data = - make_shared(element::f32, PartialShape{4, Dimension::dynamic(), 3, 5}); - const auto axis = op::Constant::create(element::i8, {}, {1}); + const auto data = make_shared(element::Type_t::f32, + PartialShape{4, Dimension::dynamic(), 3, 5}); + const auto axis = op::Constant::create(element::Type_t::i8, {}, {1}); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -146,8 +146,8 @@ TEST(type_prop, split_v1_axis_const_data_axis_dim_unknown) TEST(type_prop, split_v1_axis_const_only_data_rank_known) { - const auto data = make_shared(element::f32, PartialShape::dynamic(4)); - const auto axis = op::Constant::create(element::u64, {}, {1}); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic(4)); + const auto axis = op::Constant::create(element::Type_t::u64, {}, {1}); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -160,8 +160,8 @@ TEST(type_prop, split_v1_axis_const_only_data_rank_known) TEST(type_prop, split_v1_axis_not_const_only_data_rank_known) { - const auto data = make_shared(element::f32, PartialShape::dynamic(4)); - const auto axis = make_shared(element::u32, PartialShape{}); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic(4)); + const auto axis = make_shared(element::Type_t::u32, PartialShape{}); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -174,8 +174,8 @@ TEST(type_prop, split_v1_axis_not_const_only_data_rank_known) TEST(type_prop, split_v1_axis_const_data_rank_unknown) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto axis = op::Constant::create(element::u16, {}, {2}); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto axis = op::Constant::create(element::Type_t::u16, {}, {2}); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -188,8 +188,8 @@ TEST(type_prop, split_v1_axis_const_data_rank_unknown) TEST(type_prop, split_v1_axis_not_const_data_rank_unknown) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto axis = make_shared(element::u8, PartialShape{}); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto axis = make_shared(element::Type_t::u8, PartialShape{}); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); @@ -202,8 +202,8 @@ TEST(type_prop, split_v1_axis_not_const_data_rank_unknown) TEST(type_prop, split_v1_axis_dynamic_rank) { - const auto data = make_shared(element::f32, PartialShape::dynamic()); - const auto axis = make_shared(element::u8, PartialShape::dynamic()); + const auto data = make_shared(element::Type_t::f32, PartialShape::dynamic()); + const auto axis = make_shared(element::Type_t::u8, PartialShape::dynamic()); const size_t num_splits = 3; const auto split = make_shared(data, axis, num_splits); diff --git a/ngraph/test/type_prop/squared_difference.cpp b/ngraph/test/type_prop/squared_difference.cpp index bdedbeb5ea8..4646f41652a 100644 --- a/ngraph/test/type_prop/squared_difference.cpp +++ b/ngraph/test/type_prop/squared_difference.cpp @@ -23,9 +23,9 @@ using namespace ngraph; TEST(type_prop, squared_difference) { - const auto x1 = make_shared(element::f64, Shape{2, 2}); - const auto x2 = make_shared(element::f64, Shape{3, 2}); - const auto x3 = make_shared(element::f64, Shape{1, 2}); + const auto x1 = make_shared(element::Type_t::f64, Shape{2, 2}); + const auto x2 = make_shared(element::Type_t::f64, Shape{3, 2}); + const auto x3 = make_shared(element::Type_t::f64, Shape{1, 2}); try { @@ -38,6 +38,6 @@ TEST(type_prop, squared_difference) } const auto clamp = make_shared(x1, x3); - EXPECT_EQ(clamp->get_element_type(), element::f64); + EXPECT_EQ(clamp->get_element_type(), element::Type_t::f64); EXPECT_EQ(clamp->get_shape(), (Shape{2, 2})); } diff --git a/ngraph/test/type_prop/squeeze.cpp b/ngraph/test/type_prop/squeeze.cpp index 78b813a57d9..7768589f450 100644 --- a/ngraph/test/type_prop/squeeze.cpp +++ b/ngraph/test/type_prop/squeeze.cpp @@ -23,45 +23,47 @@ using namespace ngraph; TEST(type_prop, squeeze) { - auto param = make_shared(element::f32, Shape{1, 4, 1, 4, 1, 8}); + auto param = make_shared(element::Type_t::f32, Shape{1, 4, 1, 4, 1, 8}); auto axes_node = - make_shared(element::u64, Shape{2}, vector{0, 2}); + make_shared(element::Type_t::u64, Shape{2}, vector{0, 2}); auto squeeze = make_shared(param, axes_node); - ASSERT_EQ(squeeze->get_element_type(), element::f32); + ASSERT_EQ(squeeze->get_element_type(), element::Type_t::f32); ASSERT_EQ(squeeze->get_shape(), (Shape{4, 4, 1, 8})); - axes_node = make_shared(element::u64, Shape{0}, vector{}); + axes_node = + make_shared(element::Type_t::u64, Shape{0}, vector{}); auto squeeze_default_axes = make_shared(param, axes_node); - ASSERT_EQ(squeeze_default_axes->get_element_type(), element::f32); + ASSERT_EQ(squeeze_default_axes->get_element_type(), element::Type_t::f32); ASSERT_EQ(squeeze_default_axes->get_shape(), (Shape{4, 4, 8})); } TEST(type_prop, squeeze_dynamic) { - auto param = make_shared(element::f32, PartialShape::dynamic(6)); + auto param = make_shared(element::Type_t::f32, PartialShape::dynamic(6)); auto axes_node = - make_shared(element::u64, Shape{2}, vector{0, 2}); + make_shared(element::Type_t::u64, Shape{2}, vector{0, 2}); auto squeeze = make_shared(param, axes_node); - ASSERT_EQ(squeeze->get_element_type(), element::f32); + ASSERT_EQ(squeeze->get_element_type(), element::Type_t::f32); EXPECT_TRUE(squeeze->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); - axes_node = make_shared(element::u64, Shape{0}, vector{}); + axes_node = + make_shared(element::Type_t::u64, Shape{0}, vector{}); auto squeeze_default_axes = make_shared(param, axes_node); - ASSERT_EQ(squeeze_default_axes->get_element_type(), element::f32); + ASSERT_EQ(squeeze_default_axes->get_element_type(), element::Type_t::f32); EXPECT_TRUE( squeeze_default_axes->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, squeeze_axes_invalid_value) { - auto param = make_shared(element::f32, Shape{1, 2, 3, 4}); + auto param = make_shared(element::Type_t::f32, Shape{1, 2, 3, 4}); auto axes_node = - make_shared(element::u64, Shape{2}, vector{0, 2}); + make_shared(element::Type_t::u64, Shape{2}, vector{0, 2}); try { diff --git a/ngraph/test/type_prop/strided_slice.cpp b/ngraph/test/type_prop/strided_slice.cpp index 77bfa280f38..968deff1e5a 100644 --- a/ngraph/test/type_prop/strided_slice.cpp +++ b/ngraph/test/type_prop/strided_slice.cpp @@ -25,9 +25,9 @@ using namespace ngraph; TEST(type_prop, strided_slice_begin_incorrect_type) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::f16, Shape{4}); - auto end = make_shared(element::i64, Shape{4}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::Type_t::f16, Shape{4}); + auto end = make_shared(element::Type_t::i64, Shape{4}); try { auto strided_slice = make_shared( @@ -47,9 +47,9 @@ TEST(type_prop, strided_slice_begin_incorrect_type) TEST(type_prop, strided_slice_end_incorrect_type) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, Shape{4}); - auto end = make_shared(element::boolean, Shape{4}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::Type_t::i64, Shape{4}); + auto end = make_shared(element::Type_t::boolean, Shape{4}); try { auto strided_slice = make_shared( @@ -69,9 +69,9 @@ TEST(type_prop, strided_slice_end_incorrect_type) TEST(type_prop, strided_slice_incompatible_size_of_masks_attr) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, Shape{4}); - auto end = make_shared(element::i64, Shape{4}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::Type_t::i64, Shape{4}); + auto end = make_shared(element::Type_t::i64, Shape{4}); try { auto strided_slice = make_shared(data, @@ -96,9 +96,9 @@ TEST(type_prop, strided_slice_incompatible_size_of_masks_attr) TEST(type_prop, strided_slice_mask_incorrect_value) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, Shape{4, 5}); - auto end = make_shared(element::i64, Shape{4}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::Type_t::i64, Shape{4, 5}); + auto end = make_shared(element::Type_t::i64, Shape{4}); try { auto strided_slice = make_shared( @@ -119,9 +119,9 @@ TEST(type_prop, strided_slice_mask_incorrect_value) TEST(type_prop, strided_slice_begin_incorrect_shape) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, Shape{4, 5}); - auto end = make_shared(element::i64, Shape{4}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::Type_t::i64, Shape{4, 5}); + auto end = make_shared(element::Type_t::i64, Shape{4}); try { auto strided_slice = make_shared( @@ -141,9 +141,9 @@ TEST(type_prop, strided_slice_begin_incorrect_shape) TEST(type_prop, strided_slice_end_incorrect_shape) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, Shape{4}); - auto end = make_shared(element::i64, Shape{4, 5}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::Type_t::i64, Shape{4}); + auto end = make_shared(element::Type_t::i64, Shape{4, 5}); try { auto strided_slice = make_shared( @@ -163,9 +163,9 @@ TEST(type_prop, strided_slice_end_incorrect_shape) TEST(type_prop, strided_slice_default_stride_dynamic_shape_input) { - auto data = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto begin = make_shared(element::i64, PartialShape::dynamic()); - auto end = make_shared(element::i64, Shape{2}); + auto data = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto begin = make_shared(element::Type_t::i64, PartialShape::dynamic()); + auto end = make_shared(element::Type_t::i64, Shape{2}); auto strided_slice = make_shared( data, begin, end, vector{0, 0}, vector{0, 0}); @@ -173,7 +173,7 @@ TEST(type_prop, strided_slice_default_stride_dynamic_shape_input) try { - end = make_shared(element::i64, PartialShape::dynamic()); + end = make_shared(element::Type_t::i64, PartialShape::dynamic()); strided_slice = make_shared( data, begin, end, vector{0, 0}, vector{0, 0}); // Should have thrown, so fail if it didn't @@ -191,10 +191,11 @@ TEST(type_prop, strided_slice_default_stride_dynamic_shape_input) TEST(type_prop, strided_slice_reverse_out_of_bounds) { - auto data = std::make_shared(ngraph::element::f32, ngraph::Shape{3, 4, 5}); - auto begin = op::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {100}); - auto end = op::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {-100}); - auto stride = op::Constant::create(ngraph::element::i64, ngraph::Shape{3}, {-1}); + auto data = + std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{3, 4, 5}); + auto begin = op::Constant::create(ngraph::element::Type_t::i64, ngraph::Shape{3}, {100}); + auto end = op::Constant::create(ngraph::element::Type_t::i64, ngraph::Shape{3}, {-100}); + auto stride = op::Constant::create(ngraph::element::Type_t::i64, ngraph::Shape{3}, {-1}); std::vector begin_mask = {0, 0, 0, 0}; std::vector end_mask = {0, 0, 0, 0}; diff --git a/ngraph/test/type_prop/swish.cpp b/ngraph/test/type_prop/swish.cpp index 6611009e8d9..b9091a5364a 100644 --- a/ngraph/test/type_prop/swish.cpp +++ b/ngraph/test/type_prop/swish.cpp @@ -23,31 +23,33 @@ using namespace ngraph; TEST(type_prop, swish) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); auto swish_func = make_shared(data); - EXPECT_EQ(swish_func->get_element_type(), element::f32); + EXPECT_EQ(swish_func->get_element_type(), element::Type_t::f32); EXPECT_EQ(swish_func->get_shape(), data->get_output_shape(0)); } TEST(type_prop, swish_partial) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto swish_func = make_shared(data); - EXPECT_EQ(swish_func->get_element_type(), element::f32); + EXPECT_EQ(swish_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE( swish_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); // rank unknown auto swish_partial = make_shared( - make_shared(element::f32, PartialShape::dynamic())); + make_shared(element::Type_t::f32, PartialShape::dynamic())); ASSERT_TRUE(swish_partial->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, swish_partial_static_rank) { - auto data = make_shared(element::f32, PartialShape{1, Dimension::dynamic(), 6}); + auto data = + make_shared(element::Type_t::f32, PartialShape{1, Dimension::dynamic(), 6}); auto swish_func = make_shared(data); - EXPECT_EQ(swish_func->get_element_type(), element::f32); + EXPECT_EQ(swish_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE( swish_func->get_output_partial_shape(0).same_scheme(data->get_output_partial_shape(0))); ASSERT_TRUE(swish_func->get_output_partial_shape(0).rank().is_static()); @@ -55,8 +57,8 @@ TEST(type_prop, swish_partial_static_rank) TEST(type_prop, swish_incompatible_types) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); - auto beta = make_shared(element::f16, Shape{}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); + auto beta = make_shared(element::Type_t::f16, Shape{}); try { const auto swish_func = make_shared(data, beta); @@ -70,8 +72,8 @@ TEST(type_prop, swish_incompatible_types) TEST(type_prop, swish_beta_not_scalar) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); - auto beta = make_shared(element::f32, Shape{1}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); + auto beta = make_shared(element::Type_t::f32, Shape{1}); try { const auto swish_func = make_shared(data, beta); @@ -85,11 +87,11 @@ TEST(type_prop, swish_beta_not_scalar) TEST(type_prop, swish_2_inputs) { - auto data = make_shared(element::f32, Shape{1, 3, 6}); - auto beta = make_shared(element::f32, Shape{}); + auto data = make_shared(element::Type_t::f32, Shape{1, 3, 6}); + auto beta = make_shared(element::Type_t::f32, Shape{}); const auto swish_func = make_shared(data, beta); - EXPECT_EQ(swish_func->get_element_type(), element::f32); + EXPECT_EQ(swish_func->get_element_type(), element::Type_t::f32); ASSERT_TRUE(swish_func->get_output_partial_shape(0).same_scheme(data->get_output_shape(0))); ASSERT_TRUE(swish_func->get_output_partial_shape(0).rank().is_static()); } diff --git a/ngraph/test/type_prop/ti.cpp b/ngraph/test/type_prop/ti.cpp index 6fdc2241c00..c2c26b51587 100644 --- a/ngraph/test/type_prop/ti.cpp +++ b/ngraph/test/type_prop/ti.cpp @@ -30,20 +30,20 @@ TEST(type_prop, tensor_iterator_lstm) const size_t L = 10; // Sequence length const size_t I = 8; // Input size const size_t H = 32; // Hidden size - auto SENT = make_shared(element::f32, Shape{N, L, I}); + auto SENT = make_shared(element::Type_t::f32, Shape{N, L, I}); - auto H_init = make_shared(element::f32, Shape{N, 1, H}); - auto C_init = make_shared(element::f32, Shape{N, 1, H}); + auto H_init = make_shared(element::Type_t::f32, Shape{N, 1, H}); + auto C_init = make_shared(element::Type_t::f32, Shape{N, 1, H}); - auto W = make_shared(element::f32, Shape{4 * H, I}); - auto R = make_shared(element::f32, Shape{4 * H, H}); - auto H_t = make_shared(element::f32, Shape{N, 1, H}); - auto C_t = make_shared(element::f32, Shape{N, 1, H}); + auto W = make_shared(element::Type_t::f32, Shape{4 * H, I}); + auto R = make_shared(element::Type_t::f32, Shape{4 * H, H}); + auto H_t = make_shared(element::Type_t::f32, Shape{N, 1, H}); + auto C_t = make_shared(element::Type_t::f32, Shape{N, 1, H}); // Body - auto X = make_shared(element::f32, Shape{N, 1, I}); - auto W_body = make_shared(element::f32, Shape{4 * H, I}); - auto R_body = make_shared(element::f32, Shape{4 * H, H}); + auto X = make_shared(element::Type_t::f32, Shape{N, 1, I}); + auto W_body = make_shared(element::Type_t::f32, Shape{4 * H, I}); + auto R_body = make_shared(element::Type_t::f32, Shape{4 * H, H}); auto LSTM_cell = make_shared(builder::opset1::reshape(X, Shape{N, I}), builder::opset1::reshape(H_t, Shape{N, H}), builder::opset1::reshape(C_t, Shape{N, H}), @@ -77,15 +77,15 @@ TEST(type_prop, tensor_iterator_lstm) TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 40, 10}); - auto Y = make_shared(element::f32, Shape{32, 40, 10}); - auto M = make_shared(element::f32, Shape{32, 2, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 40, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 40, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 2, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xi = make_shared(element::f32, Shape{32, 2, 10}); - auto Yi = make_shared(element::f32, Shape{32, 2, 10}); - auto M_body = make_shared(element::f32, Shape{32, 2, 10}); + auto Xi = make_shared(element::Type_t::f32, Shape{32, 2, 10}); + auto Yi = make_shared(element::Type_t::f32, Shape{32, 2, 10}); + auto M_body = make_shared(element::Type_t::f32, Shape{32, 2, 10}); // Body auto Zo = (Xi + Yi) * M_body; @@ -121,15 +121,15 @@ TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2) TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2_dynamic) { // That which we iterate over - auto X = make_shared(element::f32, Shape{32, 40, 10}); - auto Y = make_shared(element::f32, Shape{32, 40, 10}); - auto M = make_shared(element::f32, Shape{32, 2, 10}); + auto X = make_shared(element::Type_t::f32, Shape{32, 40, 10}); + auto Y = make_shared(element::Type_t::f32, Shape{32, 40, 10}); + auto M = make_shared(element::Type_t::f32, Shape{32, 2, 10}); // Set up the cell body, a function from (Xi, Yi) -> (Zo) // Body parameters - auto Xi = make_shared(element::f32, PartialShape::dynamic()); - auto Yi = make_shared(element::f32, PartialShape::dynamic()); - auto M_body = make_shared(element::f32, PartialShape::dynamic()); + auto Xi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto Yi = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto M_body = make_shared(element::Type_t::f32, PartialShape::dynamic()); // Body auto Zo = (Xi + Yi) * M_body; diff --git a/ngraph/test/type_prop/tile.cpp b/ngraph/test/type_prop/tile.cpp index e3c9a30b95a..8dfcadcbc45 100644 --- a/ngraph/test/type_prop/tile.cpp +++ b/ngraph/test/type_prop/tile.cpp @@ -23,27 +23,27 @@ using namespace ngraph; TEST(type_prop, tile) { - auto param0 = make_shared(element::f32, Shape{6, 8, 10}); - auto param1 = op::Constant::create(element::i64, Shape{3}, {3, 4, 1}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 8, 10}); + auto param1 = op::Constant::create(element::Type_t::i64, Shape{3}, {3, 4, 1}); auto top = make_shared(param0, param1); - ASSERT_EQ(top->get_element_type(), element::f32); + ASSERT_EQ(top->get_element_type(), element::Type_t::f32); ASSERT_EQ(top->get_shape(), (Shape{18, 32, 10})); } TEST(type_prop, tile_small_data_rank) { - auto param0 = make_shared(element::f32, Shape{8, 10}); - auto param1 = op::Constant::create(element::i64, Shape{3}, {3, 4, 1}); + auto param0 = make_shared(element::Type_t::f32, Shape{8, 10}); + auto param1 = op::Constant::create(element::Type_t::i64, Shape{3}, {3, 4, 1}); auto top = make_shared(param0, param1); - ASSERT_EQ(top->get_element_type(), element::f32); + ASSERT_EQ(top->get_element_type(), element::Type_t::f32); ASSERT_EQ(top->get_shape(), (Shape{3, 32, 10})); } TEST(type_prop, tile_few_repeats) { - auto param0 = make_shared(element::f32, Shape{6, 8, 10}); - auto param1 = op::Constant::create(element::i64, Shape{2}, {4, 1}); + auto param0 = make_shared(element::Type_t::f32, Shape{6, 8, 10}); + auto param1 = op::Constant::create(element::Type_t::i64, Shape{2}, {4, 1}); auto top = make_shared(param0, param1); - ASSERT_EQ(top->get_element_type(), element::f32); + ASSERT_EQ(top->get_element_type(), element::Type_t::f32); ASSERT_EQ(top->get_shape(), (Shape{6, 32, 10})); } diff --git a/ngraph/test/type_prop/top_k.cpp b/ngraph/test/type_prop/top_k.cpp index 644b60bac13..bde74878601 100644 --- a/ngraph/test/type_prop/top_k.cpp +++ b/ngraph/test/type_prop/top_k.cpp @@ -31,8 +31,8 @@ TYPED_TEST_CASE_P(topk_type_prop); TYPED_TEST_P(topk_type_prop, topk_negative_axis_support) { const auto data_shape = Shape{1, 2, 3, 4}; - const auto data = make_shared(element::f32, data_shape); - const auto k = op::Constant::create(element::i64, Shape{}, {2}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto k = op::Constant::create(element::Type_t::i64, Shape{}, {2}); const int64_t axis = -2; const auto topk = make_shared(data, k, axis, "max", "value"); @@ -46,8 +46,8 @@ TYPED_TEST_P(topk_type_prop, topk_negative_axis_support) TYPED_TEST_P(topk_type_prop, topk_negative_axis_dynamic_rank) { const auto data_shape = PartialShape::dynamic(); - const auto data = make_shared(element::f32, data_shape); - const auto k = op::Constant::create(element::i64, Shape{}, {2}); + const auto data = make_shared(element::Type_t::f32, data_shape); + const auto k = op::Constant::create(element::Type_t::i64, Shape{}, {2}); const int64_t axis = -2; const auto topk = make_shared(data, k, axis, "max", "value"); @@ -68,14 +68,14 @@ TYPED_TEST_P(topk_type_prop, topk_negative_axis_dynamic_rank) TYPED_TEST_P(topk_type_prop, topk_v1_partial_ouptut) { auto data_shape = PartialShape{2, 10}; - auto data = make_shared(element::f32, data_shape); + auto data = make_shared(element::Type_t::f32, data_shape); { - auto k = make_shared(element::i32, PartialShape({})); + auto k = make_shared(element::Type_t::i32, PartialShape({})); auto topk = make_shared(data, k, 1, "max", "value"); EXPECT_EQ(topk->get_output_partial_shape(0), PartialShape({2, -1})); } { - auto k = make_shared(element::i32, Shape{}, 3); + auto k = make_shared(element::Type_t::i32, Shape{}, 3); auto topk = make_shared(data, k, 1, "max", "value"); EXPECT_EQ(topk->get_output_shape(0), Shape({2, 3})); EXPECT_EQ(topk->get_output_partial_shape(0), PartialShape({2, 3})); @@ -86,18 +86,18 @@ TYPED_TEST_P(topk_type_prop, topk_rank_static_k_unknown) { const int64_t axis = 1; const auto data_shape = Shape{1, 10, 100}; - const auto data = make_shared(element::f32, data_shape); + const auto data = make_shared(element::Type_t::f32, data_shape); { - const auto k = make_shared(element::i32, PartialShape({})); + const auto k = make_shared(element::Type_t::i32, PartialShape({})); const auto topk = make_shared(data, k, axis, "max", "value"); const PartialShape fully_dynamic_axis_shape{1, Dimension::dynamic(), 100}; EXPECT_EQ(topk->get_output_partial_shape(0), fully_dynamic_axis_shape); } { - const auto k = make_shared(element::i64, Shape{}, 5); - const auto convert_k = make_shared(k, element::i32); + const auto k = make_shared(element::Type_t::i64, Shape{}, 5); + const auto convert_k = make_shared(k, element::Type_t::i32); const auto topk = make_shared(data, convert_k, axis, "max", "value"); const PartialShape ranged_dynamic_axis_shape{1, Dimension{5, 10}, 100}; diff --git a/ngraph/test/type_prop/transpose.cpp b/ngraph/test/type_prop/transpose.cpp index ae57978fe7f..e4cf0908509 100644 --- a/ngraph/test/type_prop/transpose.cpp +++ b/ngraph/test/type_prop/transpose.cpp @@ -23,30 +23,32 @@ using namespace ngraph; TEST(type_prop, transpose_arg_static_input_order_static_ok) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = make_shared(element::i64, Shape{4}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto input_order = make_shared(element::Type_t::i64, Shape{4}); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } TEST(type_prop, transpose_arg_static_input_order_constant_ok) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = op::Constant::create(element::i64, Shape{4}, vector{2, 1, 0, 3}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto input_order = + op::Constant::create(element::Type_t::i64, Shape{4}, vector{2, 1, 0, 3}); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape{6, 4, 2, 8})); } TEST(type_prop, transpose_arg_static_input_order_constant_invalid_perm) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = op::Constant::create(element::i64, Shape{4}, vector{2, 9, 0, 3}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto input_order = + op::Constant::create(element::Type_t::i64, Shape{4}, vector{2, 9, 0, 3}); try { @@ -68,76 +70,79 @@ TEST(type_prop, transpose_arg_static_input_order_constant_invalid_perm) TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_static_ok) { auto arg = make_shared( - element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); - auto input_order = make_shared(element::i64, Shape{4}); + element::Type_t::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); + auto input_order = make_shared(element::Type_t::i64, Shape{4}); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } TEST(type_prop, transpose_arg_static_input_order_rank_static_dynamic_ok) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = make_shared(element::i64, PartialShape{Dimension::dynamic()}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto input_order = + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic()}); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_rank_static_dynamic_ok) { auto arg = make_shared( - element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); - auto input_order = make_shared(element::i64, PartialShape{Dimension::dynamic()}); + element::Type_t::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); + auto input_order = + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic()}); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } TEST(type_prop, transpose_arg_rank_dynamic_input_order_rank_static_dynamic_ok) { - auto arg = make_shared(element::f32, PartialShape::dynamic()); - auto input_order = make_shared(element::i64, PartialShape{Dimension::dynamic()}); + auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto input_order = + make_shared(element::Type_t::i64, PartialShape{Dimension::dynamic()}); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, transpose_arg_rank_dynamic_input_order_rank_dynamic_ok) { - auto arg = make_shared(element::f32, PartialShape::dynamic()); - auto input_order = make_shared(element::i64, PartialShape::dynamic()); + auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic()); + auto input_order = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic())); } TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_rank_dynamic_ok) { auto arg = make_shared( - element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); - auto input_order = make_shared(element::i64, PartialShape::dynamic()); + element::Type_t::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); + auto input_order = make_shared(element::Type_t::i64, PartialShape::dynamic()); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } TEST(type_prop, transpose_arg_static_input_order_static_input_order_not_vector) { - auto arg = make_shared(element::f32, PartialShape{2, 4, 6, 8}); - auto input_order = make_shared(element::i64, PartialShape{2, 2}); + auto arg = make_shared(element::Type_t::f32, PartialShape{2, 4, 6, 8}); + auto input_order = make_shared(element::Type_t::i64, PartialShape{2, 2}); try { @@ -156,9 +161,9 @@ TEST(type_prop, transpose_arg_static_input_order_static_input_order_not_vector) TEST(type_prop, transpose_arg_static_input_order_rank_static_dynamic_input_order_not_vector) { - auto arg = make_shared(element::f32, PartialShape{2, 4, 6, 8}); + auto arg = make_shared(element::Type_t::f32, PartialShape{2, 4, 6, 8}); auto input_order = - make_shared(element::i64, PartialShape{2, Dimension::dynamic()}); + make_shared(element::Type_t::i64, PartialShape{2, Dimension::dynamic()}); try { @@ -177,8 +182,8 @@ TEST(type_prop, transpose_arg_static_input_order_rank_static_dynamic_input_order TEST(type_prop, transpose_arg_static_input_order_static_input_order_wrong_size) { - auto arg = make_shared(element::f32, PartialShape{2, 4, 6, 8}); - auto input_order = make_shared(element::i64, PartialShape{5}); + auto arg = make_shared(element::Type_t::f32, PartialShape{2, 4, 6, 8}); + auto input_order = make_shared(element::Type_t::i64, PartialShape{5}); try { @@ -200,8 +205,8 @@ TEST(type_prop, transpose_arg_static_input_order_static_input_order_wrong_size) TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_static_input_order_not_vector) { auto arg = make_shared( - element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); - auto input_order = make_shared(element::i64, PartialShape{2, 2}); + element::Type_t::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); + auto input_order = make_shared(element::Type_t::i64, PartialShape{2, 2}); try { @@ -222,9 +227,9 @@ TEST(type_prop, transpose_arg_rank_static_dynamic_input_order_rank_static_dynamic_input_order_not_vector) { auto arg = make_shared( - element::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); + element::Type_t::f32, PartialShape{2, Dimension::dynamic(), Dimension::dynamic(), 8}); auto input_order = - make_shared(element::i64, PartialShape{2, Dimension::dynamic()}); + make_shared(element::Type_t::i64, PartialShape{2, Dimension::dynamic()}); try { @@ -243,9 +248,9 @@ TEST(type_prop, TEST(type_prop, transpose_arg_rank_dynamic_input_order_rank_static_dynamic_input_order_not_vector) { - auto arg = make_shared(element::f32, PartialShape::dynamic()); + auto arg = make_shared(element::Type_t::f32, PartialShape::dynamic()); auto input_order = - make_shared(element::i64, PartialShape{2, Dimension::dynamic()}); + make_shared(element::Type_t::i64, PartialShape{2, Dimension::dynamic()}); try { @@ -264,19 +269,19 @@ TEST(type_prop, transpose_arg_rank_dynamic_input_order_rank_static_dynamic_input TEST(type_prop, transpose_input_order_et_dynamic_ok) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = make_shared(element::dynamic, Shape{4}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto input_order = make_shared(element::Type_t::dynamic, Shape{4}); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape::dynamic(4))); } TEST(type_prop, transpose_input_order_et_wrong) { - auto arg = make_shared(element::f32, Shape{2, 4, 6, 8}); - auto input_order = make_shared(element::boolean, Shape{4}); + auto arg = make_shared(element::Type_t::f32, Shape{2, 4, 6, 8}); + auto input_order = make_shared(element::Type_t::boolean, Shape{4}); try { @@ -296,11 +301,12 @@ TEST(type_prop, transpose_input_order_et_wrong) TEST(type_prop, transpose_with_empty_order) { - auto arg = make_shared(element::f32, Shape{1, 300}); - auto input_order = make_shared(element::i64, Shape({0}), std::vector()); + auto arg = make_shared(element::Type_t::f32, Shape{1, 300}); + auto input_order = + make_shared(element::Type_t::i64, Shape({0}), std::vector()); auto r = make_shared(arg, input_order); - EXPECT_EQ(r->get_output_element_type(0), element::f32); + EXPECT_EQ(r->get_output_element_type(0), element::Type_t::f32); EXPECT_TRUE(r->get_output_partial_shape(0).same_scheme(PartialShape({300, 1}))); } diff --git a/ngraph/test/type_prop/unary_elementwise.cpp b/ngraph/test/type_prop/unary_elementwise.cpp index 1cbddb4d3ad..aa60efcbd09 100644 --- a/ngraph/test/type_prop/unary_elementwise.cpp +++ b/ngraph/test/type_prop/unary_elementwise.cpp @@ -23,7 +23,7 @@ using namespace ngraph; TEST(type_prop, unary_arithmetic_bad_argument_element_types) { - auto tv0_2_4_param = make_shared(element::boolean, Shape{2, 4}); + auto tv0_2_4_param = make_shared(element::Type_t::boolean, Shape{2, 4}); try { auto bc = make_shared(tv0_2_4_param); diff --git a/ngraph/test/type_prop/unsqueeze.cpp b/ngraph/test/type_prop/unsqueeze.cpp index 484a60b0ea1..b49e14ae227 100644 --- a/ngraph/test/type_prop/unsqueeze.cpp +++ b/ngraph/test/type_prop/unsqueeze.cpp @@ -23,23 +23,23 @@ using namespace ngraph; TEST(type_prop, unsqueeze) { - auto param = make_shared(element::f32, Shape{4, 1, 4, 1, 8}); + auto param = make_shared(element::Type_t::f32, Shape{4, 1, 4, 1, 8}); auto axes_node = - make_shared(element::u64, Shape{2}, vector{1, 2}); + make_shared(element::Type_t::u64, Shape{2}, vector{1, 2}); auto unsqueeze = make_shared(param, axes_node); - ASSERT_EQ(unsqueeze->get_element_type(), element::f32); + ASSERT_EQ(unsqueeze->get_element_type(), element::Type_t::f32); ASSERT_EQ(unsqueeze->get_shape(), (Shape{4, 1, 1, 1, 4, 1, 8})); } TEST(type_prop, unsqueeze_dynamic) { - auto param = make_shared(element::f32, PartialShape::dynamic(5)); + auto param = make_shared(element::Type_t::f32, PartialShape::dynamic(5)); auto axes_node = - make_shared(element::u64, Shape{2}, vector{1, 2}); + make_shared(element::Type_t::u64, Shape{2}, vector{1, 2}); auto unsqueeze = make_shared(param, axes_node); - ASSERT_EQ(unsqueeze->get_element_type(), element::f32); + ASSERT_EQ(unsqueeze->get_element_type(), element::Type_t::f32); EXPECT_TRUE( unsqueeze->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 1, diff --git a/ngraph/test/type_prop/variadic_split.cpp b/ngraph/test/type_prop/variadic_split.cpp index 15da2bbcd18..63cf9f4fdaf 100644 --- a/ngraph/test/type_prop/variadic_split.cpp +++ b/ngraph/test/type_prop/variadic_split.cpp @@ -23,44 +23,44 @@ using namespace ngraph; TEST(type_prop, variadic_split) { - const auto data = make_shared(element::i32, Shape{2, 6}); - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); - const auto splits = op::Constant::create(element::i64, Shape{2}, {2, 4}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); + const auto splits = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 4}); const auto split = make_shared(data, axis, splits); EXPECT_EQ(split->outputs().size(), 2); EXPECT_EQ(split->get_output_shape(0), (Shape{2, 2})); EXPECT_EQ(split->get_output_shape(1), (Shape{2, 4})); - EXPECT_EQ(split->get_output_element_type(0), element::i32); - EXPECT_EQ(split->get_output_element_type(1), element::i32); + EXPECT_EQ(split->get_output_element_type(0), element::Type_t::i32); + EXPECT_EQ(split->get_output_element_type(1), element::Type_t::i32); EXPECT_EQ(make_shared( - make_shared(element::i32, Shape{12, 6}), - op::Constant::create(element::i64, Shape{}, {-2}), - op::Constant::create(element::i64, Shape{3}, {7, -1, 2})) + make_shared(element::Type_t::i32, Shape{12, 6}), + op::Constant::create(element::Type_t::i64, Shape{}, {-2}), + op::Constant::create(element::Type_t::i64, Shape{3}, {7, -1, 2})) ->output(1) .get_shape(), (Shape{3, 6})); EXPECT_EQ(make_shared( - make_shared(element::i32, Shape{12, 6}), - op::Constant::create(element::i64, Shape{}, {-2}), - op::Constant::create(element::i64, Shape{3}, {-1, 7, 2})) + make_shared(element::Type_t::i32, Shape{12, 6}), + op::Constant::create(element::Type_t::i64, Shape{}, {-2}), + op::Constant::create(element::Type_t::i64, Shape{3}, {-1, 7, 2})) ->output(0) .get_shape(), (Shape{3, 6})); EXPECT_EQ(make_shared( - make_shared(element::i32, Shape{12, 1, 6}), - op::Constant::create(element::i64, Shape{1}, {2}), - op::Constant::create(element::i64, Shape{3}, {3, 1, 2})) + make_shared(element::Type_t::i32, Shape{12, 1, 6}), + op::Constant::create(element::Type_t::i64, Shape{1}, {2}), + op::Constant::create(element::Type_t::i64, Shape{3}, {3, 1, 2})) ->output(2) .get_shape(), (Shape{12, 1, 2})); EXPECT_EQ(make_shared( - make_shared(element::i32, Shape{12, 6}), - op::Constant::create(element::i64, Shape{1}, {1}), - op::Constant::create(element::i64, Shape{2}, {6, 0})) + make_shared(element::Type_t::i32, Shape{12, 6}), + op::Constant::create(element::Type_t::i64, Shape{1}, {1}), + op::Constant::create(element::Type_t::i64, Shape{2}, {6, 0})) ->output(1) .get_shape(), (Shape{12, 0})); @@ -68,12 +68,13 @@ TEST(type_prop, variadic_split) TEST(type_prop, variadic_split_splits_rank) { - const auto data = make_shared(element::i32, Shape{2, 6}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); try { - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); - const auto splits = op::Constant::create(element::i64, Shape{1, 2}, {2, 4}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); + const auto splits = + op::Constant::create(element::Type_t::i64, Shape{1, 2}, {2, 4}); const auto split = make_shared(data, axis, splits); FAIL() << "Split node was created with incorrect data."; } @@ -86,12 +87,12 @@ TEST(type_prop, variadic_split_splits_rank) TEST(type_prop, variadic_split_incorrect_sum) { - const auto data = make_shared(element::i32, Shape{2, 6}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); try { - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); - const auto splits = op::Constant::create(element::i64, Shape{2}, {1, 6}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); + const auto splits = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 6}); const auto split = make_shared(data, axis, splits); FAIL() << "Split node was created with incorrect data."; } @@ -105,12 +106,12 @@ TEST(type_prop, variadic_split_incorrect_sum) TEST(type_prop, variadic_split_incorrect_axis) { - const auto data = make_shared(element::i32, Shape{2, 6}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); try { - const auto axis = op::Constant::create(element::i64, Shape{}, {-5}); - const auto splits = op::Constant::create(element::i64, Shape{2}, {2, 4}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {-5}); + const auto splits = op::Constant::create(element::Type_t::i64, Shape{2}, {2, 4}); const auto split = make_shared(data, axis, splits); FAIL() << "Split node was created with incorrect data."; } @@ -123,12 +124,12 @@ TEST(type_prop, variadic_split_incorrect_axis) TEST(type_prop, variadic_split_splits_invalid_negative) { - const auto data = make_shared(element::i32, Shape{2, 6}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); try { - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); - const auto splits = op::Constant::create(element::i64, Shape{2}, {-2, 4}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); + const auto splits = op::Constant::create(element::Type_t::i64, Shape{2}, {-2, 4}); const auto split = make_shared(data, axis, splits); FAIL() << "Split node was created with incorrect data."; } @@ -141,12 +142,13 @@ TEST(type_prop, variadic_split_splits_invalid_negative) TEST(type_prop, variadic_split_splits_multiple_negatives) { - const auto data = make_shared(element::i32, Shape{2, 6}); + const auto data = make_shared(element::Type_t::i32, Shape{2, 6}); try { - const auto axis = op::Constant::create(element::i64, Shape{}, {1}); - const auto splits = op::Constant::create(element::i64, Shape{3}, {-1, -1, 3}); + const auto axis = op::Constant::create(element::Type_t::i64, Shape{}, {1}); + const auto splits = + op::Constant::create(element::Type_t::i64, Shape{3}, {-1, -1, 3}); const auto split = make_shared(data, axis, splits); FAIL() << "Split node was created with incorrect data."; } @@ -161,9 +163,9 @@ TEST(type_prop, variadic_split_shape_partially_dynamic) { // Variadic split shape {12,?} into {7,?}, {3,?} and {2,?} auto var_split1 = make_shared( - make_shared(element::i32, PartialShape{12, Dimension()}), - op::Constant::create(element::i64, Shape{}, {-2}), - op::Constant::create(element::i64, Shape{3}, {7, -1, 2})); + make_shared(element::Type_t::i32, PartialShape{12, Dimension()}), + op::Constant::create(element::Type_t::i64, Shape{}, {-2}), + op::Constant::create(element::Type_t::i64, Shape{3}, {7, -1, 2})); EXPECT_TRUE( var_split1->get_output_partial_shape(0).same_scheme(PartialShape{7, Dimension::dynamic()})); @@ -174,9 +176,9 @@ TEST(type_prop, variadic_split_shape_partially_dynamic) // Variadic split shape {?,?,6} into {?,?,3}, {?,?,1} and {?,?,2} auto var_split2 = make_shared( - make_shared(element::i32, PartialShape{Dimension(), Dimension(), 6}), - op::Constant::create(element::i64, Shape{}, {2}), - op::Constant::create(element::i64, Shape{3}, {3, 1, 2})); + make_shared(element::Type_t::i32, PartialShape{Dimension(), Dimension(), 6}), + op::Constant::create(element::Type_t::i64, Shape{}, {2}), + op::Constant::create(element::Type_t::i64, Shape{3}, {3, 1, 2})); EXPECT_TRUE(var_split2->get_output_partial_shape(0).same_scheme( PartialShape{Dimension::dynamic(), Dimension::dynamic(), 3})); @@ -187,9 +189,9 @@ TEST(type_prop, variadic_split_shape_partially_dynamic) // Variadic split shape {?,6} into {?,6}, and {?,0} auto var_split3 = make_shared( - make_shared(element::i32, PartialShape{Dimension(), 6}), - op::Constant::create(element::i64, Shape{}, {1}), - op::Constant::create(element::i64, Shape{2}, {6, 0})); + make_shared(element::Type_t::i32, PartialShape{Dimension(), 6}), + op::Constant::create(element::Type_t::i64, Shape{}, {1}), + op::Constant::create(element::Type_t::i64, Shape{2}, {6, 0})); EXPECT_TRUE( var_split3->get_output_partial_shape(0).same_scheme(PartialShape{Dimension::dynamic(), 6})); diff --git a/ngraph/test/type_prop_layers.cpp b/ngraph/test/type_prop_layers.cpp index 1e2c0f01a79..2841deb129d 100644 --- a/ngraph/test/type_prop_layers.cpp +++ b/ngraph/test/type_prop_layers.cpp @@ -33,19 +33,19 @@ using namespace ngraph; TEST(type_prop_layers, ctc_greedy_decoder) { - auto input = make_shared(element::f32, Shape{88, 2, 48}); - auto seq_len = make_shared(element::f32, Shape{88, 2}); + auto input = make_shared(element::Type_t::f32, Shape{88, 2, 48}); + auto seq_len = make_shared(element::Type_t::f32, Shape{88, 2}); auto op = make_shared(input, seq_len, false); ASSERT_EQ(op->get_shape(), (Shape{2, 88, 1, 1})); } TEST(type_prop_layers, detection_output) { - auto box_logits = make_shared(element::f32, Shape{4, 1, 5, 5}); - auto class_preds = make_shared(element::f32, Shape{2, 1, 4, 5}); - auto proposals = make_shared(element::f32, Shape{2, 1, 4, 5}); - auto aux_class_preds = make_shared(element::f32, Shape{2, 1, 4, 5}); - auto aux_box_preds = make_shared(element::f32, Shape{2, 1, 4, 5}); + auto box_logits = make_shared(element::Type_t::f32, Shape{4, 1, 5, 5}); + auto class_preds = make_shared(element::Type_t::f32, Shape{2, 1, 4, 5}); + auto proposals = make_shared(element::Type_t::f32, Shape{2, 1, 4, 5}); + auto aux_class_preds = make_shared(element::Type_t::f32, Shape{2, 1, 4, 5}); + auto aux_box_preds = make_shared(element::Type_t::f32, Shape{2, 1, 4, 5}); op::DetectionOutputAttrs attrs; attrs.keep_top_k = {200}; auto op = make_shared( @@ -55,9 +55,9 @@ TEST(type_prop_layers, detection_output) TEST(type_prop_layers, interpolate) { - auto image = make_shared(element::f32, Shape{2, 2, 33, 65}); - auto dyn_output_shape = make_shared(element::i64, Shape{2}); - auto output_shape = op::v0::Constant::create(element::i64, Shape{2}, {15, 30}); + auto image = make_shared(element::Type_t::f32, Shape{2, 2, 33, 65}); + auto dyn_output_shape = make_shared(element::Type_t::i64, Shape{2}); + auto output_shape = op::v0::Constant::create(element::Type_t::i64, Shape{2}, {15, 30}); op::v0::InterpolateAttrs attrs; attrs.axes = {2, 3}; @@ -80,8 +80,8 @@ TEST(type_prop_layers, prior_box1) attrs.min_size = {2.0f, 3.0f}; attrs.aspect_ratio = {1.5f, 2.0f, 2.5f}; - auto layer_shape = op::Constant::create(element::i64, Shape{2}, {32, 32}); - auto image_shape = op::Constant::create(element::i64, Shape{2}, {300, 300}); + auto layer_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {32, 32}); + auto image_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {300, 300}); auto pb = make_shared(layer_shape, image_shape, attrs); ASSERT_EQ(pb->get_shape(), (Shape{2, 20480})); } @@ -93,8 +93,8 @@ TEST(type_prop_layers, prior_box2) attrs.aspect_ratio = {1.5f, 2.0f, 2.5f}; attrs.flip = true; - auto layer_shape = op::Constant::create(element::i64, Shape{2}, {32, 32}); - auto image_shape = op::Constant::create(element::i64, Shape{2}, {300, 300}); + auto layer_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {32, 32}); + auto image_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {300, 300}); auto pb = make_shared(layer_shape, image_shape, attrs); ASSERT_EQ(pb->get_shape(), (Shape{2, 32768})); } @@ -108,8 +108,8 @@ TEST(type_prop_layers, prior_box3) attrs.flip = true; attrs.scale_all_sizes = true; - auto layer_shape = op::Constant::create(element::i64, Shape{2}, {1, 1}); - auto image_shape = op::Constant::create(element::i64, Shape{2}, {300, 300}); + auto layer_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {1, 1}); + auto image_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {300, 300}); auto pb = make_shared(layer_shape, image_shape, attrs); ASSERT_EQ(pb->get_shape(), (Shape{2, 16})); } @@ -120,8 +120,8 @@ TEST(type_prop_layers, prior_box_clustered) attrs.widths = {4.0f, 2.0f, 3.2f}; attrs.heights = {1.0f, 2.0f, 1.1f}; - auto layer_shape = op::Constant::create(element::i64, Shape{2}, {19, 19}); - auto image_shape = op::Constant::create(element::i64, Shape{2}, {300, 300}); + auto layer_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {19, 19}); + auto image_shape = op::Constant::create(element::Type_t::i64, Shape{2}, {300, 300}); auto pbc = make_shared(layer_shape, image_shape, attrs); // Output shape - 4 * 19 * 19 * 3 (attrs.widths.size()) ASSERT_EQ(pbc->get_shape(), (Shape{2, 4332})); @@ -129,21 +129,21 @@ TEST(type_prop_layers, prior_box_clustered) TEST(type_prop_layers, region_yolo1) { - auto inputs = make_shared(element::f32, Shape{1, 125, 13, 13}); + auto inputs = make_shared(element::Type_t::f32, Shape{1, 125, 13, 13}); auto op = make_shared(inputs, 0, 0, 0, true, std::vector{}, 0, 1); ASSERT_EQ(op->get_shape(), (Shape{1 * 125, 13, 13})); } TEST(type_prop_layers, region_yolo2) { - auto inputs = make_shared(element::f32, Shape{1, 125, 13, 13}); + auto inputs = make_shared(element::Type_t::f32, Shape{1, 125, 13, 13}); auto op = make_shared(inputs, 0, 0, 0, true, std::vector{}, 0, 2); ASSERT_EQ(op->get_shape(), (Shape{1 * 125 * 13, 13})); } TEST(type_prop_layers, region_yolo3) { - auto inputs = make_shared(element::f32, Shape{1, 125, 13, 13}); + auto inputs = make_shared(element::Type_t::f32, Shape{1, 125, 13, 13}); auto op = make_shared(inputs, 4, 80, 1, false, std::vector{6, 7, 8}, 0, -1); ASSERT_EQ(op->get_shape(), (Shape{1, (80 + 4 + 1) * 3, 13, 13})); @@ -151,23 +151,23 @@ TEST(type_prop_layers, region_yolo3) TEST(type_prop_layers, reorg_yolo) { - auto inputs = make_shared(element::f32, Shape{2, 24, 34, 62}); + auto inputs = make_shared(element::Type_t::f32, Shape{2, 24, 34, 62}); auto op = make_shared(inputs, Strides{2}); ASSERT_EQ(op->get_shape(), (Shape{2, 96, 17, 31})); } TEST(type_prop_layers, psroi_pooling) { - auto inputs = make_shared(element::f32, Shape{1, 3, 4, 5}); - auto coords = make_shared(element::f32, Shape{150, 5}); + auto inputs = make_shared(element::Type_t::f32, Shape{1, 3, 4, 5}); + auto coords = make_shared(element::Type_t::f32, Shape{150, 5}); auto op = make_shared(inputs, coords, 2, 6, 0.0625, 0, 0, "Avg"); ASSERT_EQ(op->get_shape(), (Shape{150, 2, 6, 6})); } TEST(type_prop_layers, roi_pooling) { - auto inputs = make_shared(element::f32, Shape{2, 3, 4, 5}); - auto coords = make_shared(element::f32, Shape{150, 5}); + auto inputs = make_shared(element::Type_t::f32, Shape{2, 3, 4, 5}); + auto coords = make_shared(element::Type_t::f32, Shape{150, 5}); auto op = make_shared(inputs, coords, Shape{6, 6}, 0.0625, "max"); ASSERT_EQ(op->get_shape(), (Shape{150, 3, 6, 6})); } diff --git a/ngraph/test/util.cpp b/ngraph/test/util.cpp index a85ab16921e..d24bafd31df 100644 --- a/ngraph/test/util.cpp +++ b/ngraph/test/util.cpp @@ -147,15 +147,15 @@ TEST(util, all_close) auto backend = runtime::Backend::create("INTERPRETER"); // Create some tensors for input/output - auto a = backend->create_tensor(element::f32, Shape{2, 3}); - auto b = backend->create_tensor(element::f32, Shape{2, 3}); + auto a = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); + auto b = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); copy_data(a, test::NDArray({{1, 2, 3}, {3, 4, 5}}).get_vector()); copy_data(b, test::NDArray({{1, 2, 3}, {3, 4, 5}}).get_vector()); EXPECT_TRUE(ngraph::test::all_close(a, b)); - auto c = backend->create_tensor(element::f32, Shape{2, 3}); + auto c = backend->create_tensor(element::Type_t::f32, Shape{2, 3}); copy_data(c, test::NDArray({{1.1f, 2, 3}, {3, 4, 5}}).get_vector()); EXPECT_FALSE(ngraph::test::all_close(c, a, 0, .05f)); @@ -171,9 +171,9 @@ class CloneTest : public ::testing::Test public: // (A + B) * C Shape shape = Shape{2, 2}; - std::shared_ptr A = make_shared(element::f32, shape); - std::shared_ptr B = make_shared(element::f32, shape); - std::shared_ptr C = make_shared(element::f32, shape); + std::shared_ptr A = make_shared(element::Type_t::f32, shape); + std::shared_ptr B = make_shared(element::Type_t::f32, shape); + std::shared_ptr C = make_shared(element::Type_t::f32, shape); std::shared_ptr AplusB = A + B; std::shared_ptr AplusBtimesC = AplusB * C; @@ -233,7 +233,7 @@ TEST_F(CloneTest, clone_nodes_full) TEST_F(CloneTest, clone_nodes_partial) { // map A -> A' prior to clone - auto Aprime = make_shared(element::f32, shape); + auto Aprime = make_shared(element::Type_t::f32, shape); node_map[A.get()] = Aprime; auto cloned_nodes = clone_nodes(nodes, node_map); @@ -252,9 +252,9 @@ TEST_F(CloneTest, clone_function_full) TEST(graph_util, clone_multiple_results) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto A_add_B = make_shared(A, B); auto A_add_B_mul_C = make_shared(A_add_B, C); @@ -296,7 +296,7 @@ TEST(graph_util, get_subgraph_outputs_trivial_tests) ASSERT_EQ(outputs.size(), 0); Shape shape{}; - auto A = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); auto absn = make_shared(A); auto neg_absn = make_shared(absn); outputs = ngraph::get_subgraph_outputs(NodeVector{A}, NodeVector{}); @@ -308,7 +308,7 @@ TEST(graph_util, get_subgraph_outputs_trivial_tests) outputs = ngraph::get_subgraph_outputs(NodeVector{A, absn}, NodeVector{}); ASSERT_EQ(outputs, (NodeVector{absn})); - auto B = make_shared(element::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto abs_b = make_shared(B); auto neg_b = make_shared(B); auto abs_b_neg = make_shared(abs_b); @@ -334,9 +334,9 @@ TEST(graph_util, get_subgraph_outputs_trivial_tests) TEST(graph_util, test_subgraph_topological_sort) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto add = A + B; auto mul = C * add; auto result = make_shared(mul); @@ -348,9 +348,9 @@ TEST(graph_util, test_subgraph_topological_sort) TEST(graph_util, test_subgraph_topological_sort_control_dependencies) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto D = make_shared(A); auto E = make_shared(B); auto add = A + B; @@ -511,7 +511,7 @@ TEST(graph, huge) { std::vector> weak_nodes; { - auto param = make_shared(element::f32, Shape{3, 3}); + auto param = make_shared(element::Type_t::f32, Shape{3, 3}); std::shared_ptr n = param; weak_nodes.push_back(n); for (size_t i = 0; i < 1000000; i++) @@ -602,8 +602,8 @@ TEST(util, apply_permutation_pshape_rank_dynamic_inviable_permutation_fails) TEST(util, clone_function_friendly_name) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); auto f = make_shared(make_shared(A, B), ParameterVector{A, B}); A->set_friendly_name("A"); @@ -625,9 +625,9 @@ TEST(util, clone_function_friendly_name) TEST(util, clone_function_op_annotations) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto f = make_shared(A + B + C, ParameterVector{A, B, C}); auto cacheable_op_annotation = std::make_shared(); @@ -663,9 +663,9 @@ TEST(util, clone_function_op_annotations) TEST(util, topological_sort_replace) { Shape shape{2, 2}; - auto A = make_shared(element::f32, shape); - auto B = make_shared(element::f32, shape); - auto C = make_shared(element::f32, shape); + auto A = make_shared(element::Type_t::f32, shape); + auto B = make_shared(element::Type_t::f32, shape); + auto C = make_shared(element::Type_t::f32, shape); auto f = make_shared(A + B + C, ParameterVector{A, B, C}); bool custom_sorter_used = false; @@ -756,7 +756,7 @@ TEST(util_host_tensor_2_vector, ht_boolean_2_vec_bool) vector input{1, 0, 1, 0}; vector output{true, false, true, false}; host_tensor_2_vector_test( - input, output, element::boolean); + input, output, element::Type_t::boolean); } TEST(util_host_tensor_2_vector, ht_boolean_2_vec_int64) @@ -764,7 +764,7 @@ TEST(util_host_tensor_2_vector, ht_boolean_2_vec_int64) vector input{1, 0, 1, 0}; vector output{true, false, true, false}; host_tensor_2_vector_test( - input, output, element::boolean); + input, output, element::Type_t::boolean); } TEST(util_host_tensor_2_vector, ht_i8_2_vec_int64) @@ -774,7 +774,7 @@ TEST(util_host_tensor_2_vector, ht_i8_2_vec_int64) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::i8); + input, output, element::Type_t::i8); } TEST(util_host_tensor_2_vector, ht_i16_2_vec_int64) @@ -784,7 +784,7 @@ TEST(util_host_tensor_2_vector, ht_i16_2_vec_int64) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::i16); + input, output, element::Type_t::i16); } TEST(util_host_tensor_2_vector, ht_i32_2_vec_int64) @@ -794,7 +794,7 @@ TEST(util_host_tensor_2_vector, ht_i32_2_vec_int64) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::i32); + input, output, element::Type_t::i32); } TEST(util_host_tensor_2_vector, ht_i64_2_vec_int64) @@ -803,7 +803,7 @@ TEST(util_host_tensor_2_vector, ht_i64_2_vec_int64) 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; vector output{input}; host_tensor_2_vector_test( - input, output, element::i64); + input, output, element::Type_t::i64); } TEST(util_host_tensor_2_vector, ht_bf16_2_vec_double) @@ -813,7 +813,7 @@ TEST(util_host_tensor_2_vector, ht_bf16_2_vec_double) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::bf16); + input, output, element::Type_t::bf16); } TEST(util_host_tensor_2_vector, ht_f16_2_vec_double) @@ -823,7 +823,7 @@ TEST(util_host_tensor_2_vector, ht_f16_2_vec_double) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::f16); + input, output, element::Type_t::f16); } TEST(util_host_tensor_2_vector, ht_f32_2_vec_double) @@ -832,7 +832,7 @@ TEST(util_host_tensor_2_vector, ht_f32_2_vec_double) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::f32); + input, output, element::Type_t::f32); } TEST(util_host_tensor_2_vector, ht_f64_2_vec_double) @@ -842,7 +842,7 @@ TEST(util_host_tensor_2_vector, ht_f64_2_vec_double) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::f64); + input, output, element::Type_t::f64); } TEST(util_host_tensor_2_vector, ht_u8_2_vec_uint64) @@ -852,7 +852,7 @@ TEST(util_host_tensor_2_vector, ht_u8_2_vec_uint64) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::u8); + input, output, element::Type_t::u8); } TEST(util_host_tensor_2_vector, ht_u16_2_vec_uint64) @@ -862,7 +862,7 @@ TEST(util_host_tensor_2_vector, ht_u16_2_vec_uint64) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::u16); + input, output, element::Type_t::u16); } TEST(util_host_tensor_2_vector, ht_u32_2_vec_uint64) @@ -872,7 +872,7 @@ TEST(util_host_tensor_2_vector, ht_u32_2_vec_uint64) vector output{ 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; host_tensor_2_vector_test( - input, output, element::u32); + input, output, element::Type_t::u32); } TEST(util_host_tensor_2_vector, ht_u64_2_vec_uint64) @@ -881,5 +881,5 @@ TEST(util_host_tensor_2_vector, ht_u64_2_vec_uint64) 0, 1, std::numeric_limits::min(), std::numeric_limits::max()}; vector output{input}; host_tensor_2_vector_test( - input, output, element::u64); + input, output, element::Type_t::u64); } diff --git a/ngraph/test/util/test_tools.cpp b/ngraph/test/util/test_tools.cpp index 85adba4579f..168fa8f975d 100644 --- a/ngraph/test/util/test_tools.cpp +++ b/ngraph/test/util/test_tools.cpp @@ -62,12 +62,12 @@ bool validate_list(const vector>& nodes) shared_ptr make_test_graph() { - auto arg_0 = make_shared(element::f32, Shape{2, 2}); - auto arg_1 = make_shared(element::f32, Shape{2, 2}); - auto arg_2 = make_shared(element::f32, Shape{2, 2}); - auto arg_3 = make_shared(element::f32, Shape{2, 2}); - auto arg_4 = make_shared(element::f32, Shape{2, 2}); - auto arg_5 = make_shared(element::f32, Shape{2, 2}); + auto arg_0 = make_shared(element::Type_t::f32, Shape{2, 2}); + auto arg_1 = make_shared(element::Type_t::f32, Shape{2, 2}); + auto arg_2 = make_shared(element::Type_t::f32, Shape{2, 2}); + auto arg_3 = make_shared(element::Type_t::f32, Shape{2, 2}); + auto arg_4 = make_shared(element::Type_t::f32, Shape{2, 2}); + auto arg_5 = make_shared(element::Type_t::f32, Shape{2, 2}); auto t0 = make_shared(arg_0, arg_1); auto t1 = make_shared(t0, arg_2); @@ -141,47 +141,47 @@ void init_int_tv(ngraph::runtime::Tensor* tv, void random_init(ngraph::runtime::Tensor* tv, std::default_random_engine& engine) { element::Type et = tv->get_element_type(); - if (et == element::boolean) + if (et == element::Type_t::boolean) { init_int_tv(tv, engine, 0, 1); } - else if (et == element::f32) + else if (et == element::Type_t::f32) { init_real_tv(tv, engine, numeric_limits::min(), 1.0f); } - else if (et == element::f64) + else if (et == element::Type_t::f64) { init_real_tv(tv, engine, numeric_limits::min(), 1.0); } - else if (et == element::i8) + else if (et == element::Type_t::i8) { init_int_tv(tv, engine, -1, 1); } - else if (et == element::i16) + else if (et == element::Type_t::i16) { init_int_tv(tv, engine, -1, 1); } - else if (et == element::i32) + else if (et == element::Type_t::i32) { init_int_tv(tv, engine, 0, 1); } - else if (et == element::i64) + else if (et == element::Type_t::i64) { init_int_tv(tv, engine, 0, 1); } - else if (et == element::u8) + else if (et == element::Type_t::u8) { init_int_tv(tv, engine, 0, 1); } - else if (et == element::u16) + else if (et == element::Type_t::u16) { init_int_tv(tv, engine, 0, 1); } - else if (et == element::u32) + else if (et == element::Type_t::u32) { init_int_tv(tv, engine, 0, 1); } - else if (et == element::u64) + else if (et == element::Type_t::u64) { init_int_tv(tv, engine, 0, 1); } diff --git a/ngraph/test/util/visitor.hpp b/ngraph/test/util/visitor.hpp index f9a01cd07c6..c366a84beaf 100644 --- a/ngraph/test/util/visitor.hpp +++ b/ngraph/test/util/visitor.hpp @@ -333,7 +333,7 @@ namespace ngraph void on_adapter(const std::string& name, ValueAccessor& adapter) override { HostTensorPtr data = - std::make_shared(element::u8, Shape{adapter.size()}); + std::make_shared(element::Type_t::u8, Shape{adapter.size()}); data->write(adapter.get_ptr(), adapter.size()); m_values.insert(name, data); }