From 334bf9d227893d0a202fcc3a1cd0e97854b5846c Mon Sep 17 00:00:00 2001 From: Ilya Churaev Date: Fri, 20 Aug 2021 06:42:15 +0300 Subject: [PATCH 1/5] Moved DiscreteTypeInfo to ov namespace (#7127) * Moved DiscreteTypeInfo to new opset * Revert old header * Fixed code style --- .../src/cldnn_engine/cldnn_program.h | 5 +- .../src/legacy_api/src/ngraph_ops/nms_ie.cpp | 2 +- .../include/low_precision/network_helper.hpp | 28 +-- .../low_precision/propagate_shared_value.hpp | 10 +- .../propagate_through_precision_preserved.hpp | 4 +- .../low_precision/propagate_to_input.hpp | 4 +- .../update_shared_precision_preserved.hpp | 10 +- .../low_precision_transformations/src/add.cpp | 28 +-- .../src/clamp.cpp | 12 +- .../src/concat.cpp | 2 +- .../src/convert.cpp | 2 +- .../src/convolution.cpp | 36 ++-- .../src/convolution_backprop_data.cpp | 24 +-- .../src/depth_to_space.cpp | 2 +- .../src/eltwise_base_transformation.cpp | 46 ++-- .../src/fake_quantize.cpp | 32 +-- .../src/fake_quantize_decomposition.cpp | 16 +- .../src/fake_quantize_dequantization.cpp | 14 +- .../src/fold_convert.cpp | 12 +- .../src/fold_fake_quantize.cpp | 4 +- .../src/fuse_convert.cpp | 14 +- .../src/fuse_fake_quantize.cpp | 26 +-- .../src/fuse_multiply_to_fake_quantize.cpp | 14 +- .../src/fuse_subtract_to_fake_quantize.cpp | 20 +- .../src/interpolate.cpp | 8 +- .../src/layer_transformation.cpp | 8 +- .../src/low_precision.cpp | 4 +- .../src/markup_precisions.cpp | 8 +- .../src/mat_mul.cpp | 16 +- .../src/max_pool.cpp | 2 +- .../src/multiply.cpp | 12 +- .../src/multiply_to_group_convolution.cpp | 18 +- .../low_precision_transformations/src/mvn.cpp | 28 +-- .../src/network_helper.cpp | 182 ++++++++-------- .../src/normalize_l2.cpp | 14 +- .../low_precision_transformations/src/pad.cpp | 12 +- .../pull_reshape_through_dequantization.cpp | 22 +- .../pull_transpose_through_dequantization.cpp | 18 +- .../src/quantization_details.cpp | 26 +-- .../src/reduce_base_transformation.cpp | 2 +- .../src/reduce_max.cpp | 4 +- .../src/reduce_mean.cpp | 2 +- .../src/reduce_min.cpp | 4 +- .../src/reduce_sum.cpp | 6 +- .../src/reshape.cpp | 4 +- .../rt_info/intervals_alignment_attribute.cpp | 16 +- .../src/rt_info/precisions_attribute.cpp | 2 +- .../quantization_alignment_attribute.cpp | 8 +- .../src/shuffle_channels.cpp | 6 +- .../src/split.cpp | 6 +- .../src/squeeze.cpp | 2 +- .../src/strided_slice.cpp | 8 +- .../src/subtract.cpp | 6 +- .../src/subtract_multiply_to_multiply_add.cpp | 16 +- .../src/transpose.cpp | 4 +- .../src/unsqueeze.cpp | 2 +- .../src/weightable_layer_transformation.cpp | 44 ++-- .../src/pruning/mask_attribute.cpp | 4 +- .../src/snippets/src/op/subgraph.cpp | 2 +- .../snippets/src/pass/assign_registers.cpp | 6 +- .../snippets/src/pass/collapse_subgraph.cpp | 108 +++++----- .../src/ngraph_ops/nms_ie_internal.cpp | 2 +- .../common_optimizations/nop_elimination.cpp | 28 +-- .../simplify_shape_of_sub_graph.cpp | 8 +- .../transpose_to_reshape.cpp | 2 +- .../weights_dequantize_to_fake_quantize.cpp | 2 +- ...convert_constant_folding_on_const_path.cpp | 4 +- .../op_conversions/convert_batch_to_space.cpp | 6 +- .../op_conversions/convert_space_to_batch.cpp | 6 +- .../op_conversions/convert_subtract.cpp | 17 +- .../rt_info/dequantization_attribute.cpp | 2 +- .../rt_info/fused_names_attribute.cpp | 4 +- .../rt_info/primitives_priority_attribute.cpp | 2 +- .../src/transformations/serialize.cpp | 6 +- ...nvolution_backprop_data_transformation.cpp | 8 +- .../convolution_transformation.cpp | 2 +- .../inference_engine/snippets/registers.cpp | 4 +- .../lpt_ngraph_functions/common/builders.hpp | 2 +- .../lpt_ngraph_functions/src/add_function.cpp | 16 +- .../src/common/builders.cpp | 2 +- .../convolution_backprop_data_function.cpp | 2 +- .../src/convolution_function.cpp | 6 +- .../src/get_dequantization_function.cpp | 8 +- .../mock_mo_frontend.hpp | 8 +- .../core/include/ngraph/pattern/matcher.hpp | 2 +- .../include/ngraph/pattern/op/pattern.hpp | 2 +- ngraph/core/include/ngraph/type.hpp | 69 +----- ngraph/core/include/openvino/core/type.hpp | 98 +++++++++ ngraph/core/src/function.cpp | 2 +- ngraph/core/src/graph_util.cpp | 16 +- ngraph/core/src/node.cpp | 8 +- ngraph/core/src/op/assign.cpp | 4 +- ngraph/core/src/op/loop.cpp | 11 +- ngraph/core/src/op/non_max_suppression.cpp | 2 +- ngraph/core/src/op/parameter.cpp | 2 +- ngraph/core/src/op/result.cpp | 2 +- ngraph/core/src/op/tensor_iterator.cpp | 16 +- ngraph/core/src/op/topk.cpp | 4 +- .../core/src/op/util/arithmetic_reduction.cpp | 2 +- ngraph/core/src/op/util/broadcast_base.cpp | 8 +- ngraph/core/src/op/util/fft_base.cpp | 6 +- ngraph/core/src/pass/constant_folding.cpp | 4 +- ngraph/core/src/pass/convert_precision.cpp | 24 +-- ngraph/core/src/pass/visualize_tree.cpp | 4 +- ngraph/core/src/specialize_function.cpp | 2 +- ngraph/core/src/validation_util.cpp | 20 +- .../frontend/onnx/frontend/src/frontend.cpp | 4 +- .../frontend/onnx/frontend/src/op/dropout.cpp | 2 +- ngraph/frontend/onnx/frontend/src/op/loop.cpp | 10 +- ngraph/frontend/paddlepaddle/src/frontend.cpp | 40 ++-- .../mock_py_frontend.hpp | 8 +- ngraph/test/builder_autobroadcast.cpp | 12 +- ngraph/test/constant_folding.cpp | 196 +++++++++--------- ngraph/test/copy.cpp | 18 +- ngraph/test/graph_rewrite.cpp | 2 +- .../onnx/onnx_import_const_folding.in.cpp | 2 +- ngraph/test/op.cpp | 26 +-- ngraph/test/pattern.cpp | 6 +- .../test/runtime/dynamic/dynamic_backend.cpp | 4 +- .../runtime/interpreter/evaluates_map.cpp | 46 ++-- .../runtime/interpreter/int_executable.cpp | 10 +- ngraph/test/specialize_function.cpp | 6 +- ngraph/test/type_prop/loop.cpp | 110 +++++----- ngraph/test/type_prop/ti.cpp | 10 +- ngraph/test/util.cpp | 10 +- ngraph/test/visitors/op/adaptive_max_pool.cpp | 2 +- ngraph/test/visitors/op/batch_norm.cpp | 2 +- ngraph/test/visitors/op/broadcast.cpp | 2 +- ngraph/test/visitors/op/bucketize.cpp | 4 +- ngraph/test/visitors/op/constant.cpp | 6 +- ngraph/test/visitors/op/convert.cpp | 2 +- .../test/visitors/op/convolution_backprop.cpp | 4 +- ngraph/test/visitors/op/cum_sum.cpp | 6 +- .../visitors/op/deformable_convolution.cpp | 4 +- .../visitors/op/deformable_psroi_pooling.cpp | 2 +- ngraph/test/visitors/op/depth_to_space.cpp | 2 +- ngraph/test/visitors/op/detection_output.cpp | 2 +- ngraph/test/visitors/op/einsum.cpp | 2 +- ngraph/test/visitors/op/elu.cpp | 2 +- .../test/visitors/op/extractimagepatches.cpp | 2 +- ngraph/test/visitors/op/fake_quantize.cpp | 2 +- ngraph/test/visitors/op/gather.cpp | 4 +- ngraph/test/visitors/op/gelu.cpp | 2 +- ngraph/test/visitors/op/grn.cpp | 2 +- ngraph/test/visitors/op/group_conv.cpp | 4 +- ngraph/test/visitors/op/interpolate.cpp | 2 +- ngraph/test/visitors/op/lrn.cpp | 2 +- ngraph/test/visitors/op/lstm_cell.cpp | 2 +- ngraph/test/visitors/op/lstm_sequence.cpp | 2 +- ngraph/test/visitors/op/matmul.cpp | 2 +- ngraph/test/visitors/op/matrix_nms.cpp | 4 +- ngraph/test/visitors/op/max_pool.cpp | 4 +- ngraph/test/visitors/op/multiclass_nms.cpp | 4 +- ngraph/test/visitors/op/mvn.cpp | 4 +- .../test/visitors/op/non_max_suppression.cpp | 8 +- ngraph/test/visitors/op/normalize_l2.cpp | 2 +- ngraph/test/visitors/op/one_hot.cpp | 2 +- ngraph/test/visitors/op/pad.cpp | 2 +- ngraph/test/visitors/op/parameter.cpp | 2 +- ngraph/test/visitors/op/prior_box.cpp | 2 +- .../test/visitors/op/prior_box_clustered.cpp | 2 +- ngraph/test/visitors/op/proposal.cpp | 2 +- ngraph/test/visitors/op/psroi_pooling.cpp | 2 +- ngraph/test/visitors/op/random_uniform.cpp | 2 +- ngraph/test/visitors/op/reduce_ops.hpp | 2 +- ngraph/test/visitors/op/region_yolo.cpp | 2 +- ngraph/test/visitors/op/reorg_yolo.cpp | 4 +- ngraph/test/visitors/op/reshape.cpp | 2 +- ngraph/test/visitors/op/reverse.cpp | 4 +- ngraph/test/visitors/op/reverse_sequence.cpp | 2 +- ngraph/test/visitors/op/rnn_cell.cpp | 4 +- ngraph/test/visitors/op/roi_pooling.cpp | 2 +- ngraph/test/visitors/op/round.cpp | 2 +- ngraph/test/visitors/op/select.cpp | 2 +- ngraph/test/visitors/op/shuffle_channels.cpp | 2 +- ngraph/test/visitors/op/softmax.cpp | 2 +- ngraph/test/visitors/op/space_to_depth.cpp | 2 +- ngraph/test/visitors/op/split.cpp | 2 +- ngraph/test/visitors/op/strided_slice.cpp | 2 +- ngraph/test/visitors/op/topk.cpp | 2 +- ngraph/test/visitors/user_op.cpp | 2 +- 181 files changed, 1090 insertions(+), 1044 deletions(-) create mode 100644 ngraph/core/include/openvino/core/type.hpp diff --git a/inference-engine/src/cldnn_engine/cldnn_program.h b/inference-engine/src/cldnn_engine/cldnn_program.h index 8f90b4fabb7..cf791870e5e 100644 --- a/inference-engine/src/cldnn_engine/cldnn_program.h +++ b/inference-engine/src/cldnn_engine/cldnn_program.h @@ -29,9 +29,12 @@ enum class eltwise_mode : int32_t; // Forward declarations for ngraph part namespace ngraph { class Node; -class DiscreteTypeInfo; } // namespace ngraph +namespace ov { +class DiscreteTypeInfo; +} // namespace ov + #define REGISTER_FACTORY_IMPL(op_version, op_name) \ void __register ## _ ## op_name ## _ ## op_version() { \ Program::RegisterFactory( \ diff --git a/inference-engine/src/legacy_api/src/ngraph_ops/nms_ie.cpp b/inference-engine/src/legacy_api/src/ngraph_ops/nms_ie.cpp index 82a0315ec74..f6b333e5c51 100644 --- a/inference-engine/src/legacy_api/src/ngraph_ops/nms_ie.cpp +++ b/inference-engine/src/legacy_api/src/ngraph_ops/nms_ie.cpp @@ -164,7 +164,7 @@ int64_t op::NonMaxSuppressionIE3::max_boxes_output_from_input() const { } const auto max_output_boxes_input = - as_type_ptr(input_value(max_output_boxes_per_class_port).get_node_shared_ptr()); + ov::as_type_ptr(input_value(max_output_boxes_per_class_port).get_node_shared_ptr()); max_output_boxes = max_output_boxes_input->cast_vector().at(0); return max_output_boxes; diff --git a/inference-engine/src/low_precision_transformations/include/low_precision/network_helper.hpp b/inference-engine/src/low_precision_transformations/include/low_precision/network_helper.hpp index 93c80161deb..ca80b580b0b 100644 --- a/inference-engine/src/low_precision_transformations/include/low_precision/network_helper.hpp +++ b/inference-engine/src/low_precision_transformations/include/low_precision/network_helper.hpp @@ -226,14 +226,14 @@ public: auto node = nodes.front(); nodes.pop_front(); - if (visited.count(node) || is_type(node)) { + if (visited.count(node) || ov::is_type(node)) { continue; } visited.insert(node); bool handleConnectedNodes = false; - if (NetworkHelper::isPrecisionPreserved(node) || is_type(node)) { + if (NetworkHelper::isPrecisionPreserved(node) || ov::is_type(node)) { auto& rt = node->get_rt_info(); if (node == initialNode) { @@ -255,13 +255,13 @@ public: continue; } - if (!is_type(node)) { + if (!ov::is_type(node)) { for (size_t index = 0ul; index < node->get_input_size(); ++index) { auto getInput = [](const std::shared_ptr& node, const size_t index) { const auto dequantization = NetworkHelper::getDequantization(node, index); if (!dequantization.empty() && - (is_type(dequantization.data.get_node())) && - is_type(dequantization.data.get_node()->get_input_node_ptr(0))) { + (ov::is_type(dequantization.data.get_node())) && + ov::is_type(dequantization.data.get_node()->get_input_node_ptr(0))) { const auto input = dequantization.data.get_node()->input(0); return input; } @@ -272,7 +272,7 @@ public: const auto& input_node = input.get_source_output().get_node_shared_ptr(); //const auto& input_node = input.get_source_output().get_node_shared_ptr(); - if (visited.count(input_node) || is_type(input_node)) { + if (visited.count(input_node) || ov::is_type(input_node)) { continue; } @@ -283,7 +283,7 @@ public: for (auto& output : node->outputs()) { for (auto& input_value : output.get_target_inputs()) { const auto& output_node = input_value.get_node()->shared_from_this(); - if (visited.count(output_node) || is_type(output_node)) { + if (visited.count(output_node) || ov::is_type(output_node)) { continue; } @@ -364,7 +364,7 @@ std::shared_ptr NetworkHelper::setOutDataPrecision(std::shared_ptr std::shared_ptr make_op_pattern(const ngraph::NodeVector& args) { - return std::make_shared(element::undefined, PartialShape{}, [](std::shared_ptr n) {return !!as_type_ptr(n); }, args); + return std::make_shared(element::undefined, PartialShape{}, [](std::shared_ptr n) {return !!ov::as_type_ptr(n); }, args); } template @@ -372,7 +372,7 @@ std::shared_ptr make_op_label() { return std::make_shared( element::undefined, PartialShape{}, - [](std::shared_ptr n) {return !!as_type_ptr(n); }); + [](std::shared_ptr n) {return !!ov::as_type_ptr(n); }); } template @@ -394,18 +394,18 @@ std::shared_ptr fold_reshape(Args&&... args) { std::shared_ptr node = std::make_shared(std::forward(args)...); if (node->get_output_size() == 1) { // issue #57985: remove fold_reshape & reuse nGraph implementation - const auto values = as_type_ptr(node->input_value(1).get_node_shared_ptr())->template cast_vector(); + const auto values = ov::as_type_ptr(node->input_value(1).get_node_shared_ptr())->template cast_vector(); if (std::any_of(values.begin(), values.end(), [](const int64_t value) { return (value == 0) || (value == -1); })) { return fold(std::forward(args)...); } OutputVector folded; - if (is_type(node->input_value(0).get_node_shared_ptr()) && - is_type(node->input_value(1).get_node_shared_ptr())) { + if (ov::is_type(node->input_value(0).get_node_shared_ptr()) && + ov::is_type(node->input_value(1).get_node_shared_ptr())) { return std::make_shared( node->get_input_element_type(0), - Shape(as_type_ptr(node->input_value(1).get_node_shared_ptr())->template cast_vector()), - as_type_ptr(node->input_value(0).get_node_shared_ptr())->get_data_ptr()); + Shape(ov::as_type_ptr(node->input_value(1).get_node_shared_ptr())->template cast_vector()), + ov::as_type_ptr(node->input_value(0).get_node_shared_ptr())->get_data_ptr()); } } return node; diff --git a/inference-engine/src/low_precision_transformations/include/low_precision/propagate_shared_value.hpp b/inference-engine/src/low_precision_transformations/include/low_precision/propagate_shared_value.hpp index 9866d63197f..6a83a74089a 100644 --- a/inference-engine/src/low_precision_transformations/include/low_precision/propagate_shared_value.hpp +++ b/inference-engine/src/low_precision_transformations/include/low_precision/propagate_shared_value.hpp @@ -36,7 +36,7 @@ public: std::vector> nodes(f->get_ordered_ops()); for (auto it = nodes.begin(); it != nodes.end(); it++) { const std::shared_ptr node = *it; - if (is_type(node)) { + if (ov::is_type(node)) { assert(node->get_output_size() == 1ul); auto& outputRtInfo = node->output(0).get_rt_info(); @@ -56,7 +56,7 @@ public: auto node = nodeInput.get_source_output().get_node_shared_ptr(); std::vector>>> attributes; - if (is_type(node)) { + if (ov::is_type(node)) { // output auto& rt = nodeInput.get_source_output().get_rt_info(); auto it = rt.find(name); @@ -109,8 +109,8 @@ private: const auto dequantization = NetworkHelper::getDequantization(node, index); if (!dequantization.empty() && - (is_type(dequantization.data.get_node())) && - is_type(dequantization.data.get_node()->get_input_node_ptr(0))) { + (ov::is_type(dequantization.data.get_node())) && + ov::is_type(dequantization.data.get_node()->get_input_node_ptr(0))) { inputNode = dequantization.data.get_node()->get_input_node_shared_ptr(0); } @@ -121,7 +121,7 @@ private: const auto attribute = std::dynamic_pointer_cast>>(inputAttributeIt->second); parentAttributes.push_back(attribute); } - } else if (is_type(inputNode)) { + } else if (ov::is_type(inputNode)) { const auto& outputPortRtInfo = inputNode->outputs()[0].get_rt_info(); auto attributeIt = outputPortRtInfo.find(ngraph::VariantWrapper>::type_info.name); if (attributeIt != outputPortRtInfo.end()) { diff --git a/inference-engine/src/low_precision_transformations/include/low_precision/propagate_through_precision_preserved.hpp b/inference-engine/src/low_precision_transformations/include/low_precision/propagate_through_precision_preserved.hpp index 18a8f1e0ab8..bfa0e1fcceb 100644 --- a/inference-engine/src/low_precision_transformations/include/low_precision/propagate_through_precision_preserved.hpp +++ b/inference-engine/src/low_precision_transformations/include/low_precision/propagate_through_precision_preserved.hpp @@ -96,9 +96,9 @@ private: auto getInput = [](const std::shared_ptr& node, const size_t index) -> Input { const auto dequantization = NetworkHelper::getDequantization(node, index); if (!dequantization.empty() && - is_type(dequantization.data.get_node()) && + ov::is_type(dequantization.data.get_node()) && (dequantization.data.get_node()->get_input_size() == 1ul) && - is_type(dequantization.data.get_node()->get_input_node_ptr(0))) { + ov::is_type(dequantization.data.get_node()->get_input_node_ptr(0))) { return dequantization.data.get_node()->input(0); } diff --git a/inference-engine/src/low_precision_transformations/include/low_precision/propagate_to_input.hpp b/inference-engine/src/low_precision_transformations/include/low_precision/propagate_to_input.hpp index 1f30ab7b4a0..64268e9270b 100644 --- a/inference-engine/src/low_precision_transformations/include/low_precision/propagate_to_input.hpp +++ b/inference-engine/src/low_precision_transformations/include/low_precision/propagate_to_input.hpp @@ -72,9 +72,9 @@ private: auto getInput = [](const Input& input) { const auto dequantization = NetworkHelper::getDequantization(input.get_node()->shared_from_this(), input.get_index()); if (!dequantization.empty() && - is_type(dequantization.data.get_node()) && + ov::is_type(dequantization.data.get_node()) && (dequantization.data.get_node()->get_input_size() == 1ul) && - is_type(dequantization.data.get_node()->get_input_node_ptr(0))) { + ov::is_type(dequantization.data.get_node()->get_input_node_ptr(0))) { return dequantization.data.get_node()->input(0); } diff --git a/inference-engine/src/low_precision_transformations/include/low_precision/update_shared_precision_preserved.hpp b/inference-engine/src/low_precision_transformations/include/low_precision/update_shared_precision_preserved.hpp index 119ae13c412..0a17f7328ac 100644 --- a/inference-engine/src/low_precision_transformations/include/low_precision/update_shared_precision_preserved.hpp +++ b/inference-engine/src/low_precision_transformations/include/low_precision/update_shared_precision_preserved.hpp @@ -35,14 +35,14 @@ public: const bool needToCheckExpectedAttributeType = !std::is_same::value; if (!needToCheckExpectedAttributeType) { // expected attribute is ignored, set attributes for node inputs except Result & FakeQuantize operations - if (is_type(node) || - is_type(node) || + if (ov::is_type(node) || + ov::is_type(node) || transformation_callback(node)) { return false; } } - if (ngraph::pass::low_precision::NetworkHelper::isPrecisionPreserved(node) || is_type(node)) { + if (ngraph::pass::low_precision::NetworkHelper::isPrecisionPreserved(node) || ov::is_type(node)) { return false; } @@ -87,8 +87,8 @@ private: Input getDequantizationInput(const Input& input) { const auto dequantization = NetworkHelper::getDequantization(input.get_node()->shared_from_this(), input.get_index()); if (!dequantization.empty() && - (is_type(dequantization.data.get_node())) && - is_type(dequantization.data.get_node()->get_input_node_ptr(0))) { + (ov::is_type(dequantization.data.get_node())) && + ov::is_type(dequantization.data.get_node()->get_input_node_ptr(0))) { assert(dequantization.data.get_target_inputs().size() == 1ul); return *dequantization.data.get_target_inputs().begin(); } diff --git a/inference-engine/src/low_precision_transformations/src/add.cpp b/inference-engine/src/low_precision_transformations/src/add.cpp index 55a101c101f..c09a22783a6 100644 --- a/inference-engine/src/low_precision_transformations/src/add.cpp +++ b/inference-engine/src/low_precision_transformations/src/add.cpp @@ -28,26 +28,26 @@ std::shared_ptr replaceToSubtract(const std::shared_ptr& // motivation: // - single responsibility // - keep AddTransformation and AddToSubtractTransformation transformations independent and optional - const auto add = as_type_ptr(op); + const auto add = ov::as_type_ptr(op); if (add == nullptr) { return nullptr; } // TODO: use general way from getDequantization: is eltwise with Constant - const int constBranchIndex = is_type(add->get_input_node_ptr(0)) ? + const int constBranchIndex = ov::is_type(add->get_input_node_ptr(0)) ? 0 : - (is_type(add->get_input_node_ptr(1)) ? 1 : -1); + (ov::is_type(add->get_input_node_ptr(1)) ? 1 : -1); if (constBranchIndex == -1) { return nullptr; } const size_t dataBranchIndex = constBranchIndex == 0 ? 1ul : 0; const auto parent = add->get_input_node_shared_ptr(dataBranchIndex); - if (is_type(parent) || - is_type(parent) || - is_type(parent) || - (is_type(parent) && - (is_type(parent->get_input_node_ptr(0)) || is_type(parent->get_input_node_ptr(1))))) { + if (ov::is_type(parent) || + ov::is_type(parent) || + ov::is_type(parent) || + (ov::is_type(parent) && + (ov::is_type(parent->get_input_node_ptr(0)) || ov::is_type(parent->get_input_node_ptr(1))))) { return nullptr; } @@ -68,11 +68,11 @@ std::shared_ptr replaceToSubtract(const std::shared_ptr& } std::shared_ptr fuseWithSubtract(const std::shared_ptr& op) { - const auto add = as_type_ptr(op); + const auto add = ov::as_type_ptr(op); if ((add == nullptr) || - !is_type(add->get_input_node_shared_ptr(0)) || + !ov::is_type(add->get_input_node_shared_ptr(0)) || // TODO: use general way from getDequantization: is eltwise with Constant - !is_type(add->get_input_node_shared_ptr(0)->get_input_node_shared_ptr(1))) { + !ov::is_type(add->get_input_node_shared_ptr(0)->get_input_node_shared_ptr(1))) { return nullptr; } @@ -107,7 +107,7 @@ AddTransformation::AddTransformation(const Params& params) : EltwiseBaseTransfor } bool AddTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) { - std::shared_ptr op = as_type_ptr(m.get_match_root()); + std::shared_ptr op = ov::as_type_ptr(m.get_match_root()); if ((op == nullptr) || (!canBeTransformed(context, op))) { return false; } @@ -116,7 +116,7 @@ bool AddTransformation::transform(TransformationContext& context, ngraph::patter NetworkHelper::normalizeDequantization(NetworkHelper::getDequantization(op, 1)); std::shared_ptr addNode = NetworkHelper::separateInStandaloneBranch(op); - std::shared_ptr add = as_type_ptr(addNode); + std::shared_ptr add = ov::as_type_ptr(addNode); const int fullPathIndex = getNotEmpty(add); std::shared_ptr newMultiply; @@ -136,7 +136,7 @@ bool AddTransformation::transform(TransformationContext& context, ngraph::patter newMultiply = NetworkHelper::swapMultiplyAndAdd(add, multiplyBranch.first); ngraph::copy_runtime_info({ add, newMultiply }, newMultiply); - if (is_type(newMultiply->get_input_node_shared_ptr(0))) { + if (ov::is_type(newMultiply->get_input_node_shared_ptr(0))) { newAddOrSubtract = newMultiply->get_input_node_shared_ptr(0); auto subtract = fuseWithSubtract(newAddOrSubtract); diff --git a/inference-engine/src/low_precision_transformations/src/clamp.cpp b/inference-engine/src/low_precision_transformations/src/clamp.cpp index 45c4cd5986c..da1b462697d 100644 --- a/inference-engine/src/low_precision_transformations/src/clamp.cpp +++ b/inference-engine/src/low_precision_transformations/src/clamp.cpp @@ -37,13 +37,13 @@ bool ClampTransformation::transform(TransformationContext& context, ngraph::patt return false; } - auto constant = as_type_ptr(sub->get_input_node_shared_ptr(1)); + auto constant = ov::as_type_ptr(sub->get_input_node_shared_ptr(1)); if (constant == nullptr) { const auto convert = sub->get_input_node_shared_ptr(1); - if (!is_type(convert)) { + if (!ov::is_type(convert)) { return false; } - constant = as_type_ptr(convert->get_input_node_shared_ptr(0)); + constant = ov::as_type_ptr(convert->get_input_node_shared_ptr(0)); } if (constant == nullptr) { @@ -66,7 +66,7 @@ bool ClampTransformation::transform(TransformationContext& context, ngraph::patt return false; } - const auto newClamp = as_type_ptr(moveDequantizationAfter(context, clamp, dequantization, false, moveSubtract)); + const auto newClamp = ov::as_type_ptr(moveDequantizationAfter(context, clamp, dequantization, false, moveSubtract)); std::shared_ptr replacement; { @@ -74,7 +74,7 @@ bool ClampTransformation::transform(TransformationContext& context, ngraph::patt double max = newClamp->get_max(); if (dequantization.multiply != nullptr) { - double scale = as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(1))->cast_vector()[0]; + double scale = ov::as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(1))->cast_vector()[0]; if (scale < 0.0) { std::swap(min, max); } @@ -83,7 +83,7 @@ bool ClampTransformation::transform(TransformationContext& context, ngraph::patt } if (dequantization.subtract != nullptr && moveSubtract) { - double shift = as_type_ptr(dequantization.subtractConstant)->cast_vector()[0]; + double shift = ov::as_type_ptr(dequantization.subtractConstant)->cast_vector()[0]; min += shift; max += shift; } diff --git a/inference-engine/src/low_precision_transformations/src/concat.cpp b/inference-engine/src/low_precision_transformations/src/concat.cpp index 6adeb1f413c..5c0da831b8e 100644 --- a/inference-engine/src/low_precision_transformations/src/concat.cpp +++ b/inference-engine/src/low_precision_transformations/src/concat.cpp @@ -178,7 +178,7 @@ bool ConcatTransformation::isPrecisionPreserved(std::shared_ptr) const noe } bool ConcatTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr layer) const { - std::shared_ptr concat = as_type_ptr(layer); + std::shared_ptr concat = ov::as_type_ptr(layer); if (concat == nullptr) { return false; } diff --git a/inference-engine/src/low_precision_transformations/src/convert.cpp b/inference-engine/src/low_precision_transformations/src/convert.cpp index e96fc4820c7..fbc64e0db62 100644 --- a/inference-engine/src/low_precision_transformations/src/convert.cpp +++ b/inference-engine/src/low_precision_transformations/src/convert.cpp @@ -37,7 +37,7 @@ ConvertTransformation::ConvertTransformation(const Params& params) : LayerTransf } bool ConvertTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) { - std::shared_ptr convert = as_type_ptr(m.get_match_root()); + std::shared_ptr convert = ov::as_type_ptr(m.get_match_root()); if (!convert) { return false; } diff --git a/inference-engine/src/low_precision_transformations/src/convolution.cpp b/inference-engine/src/low_precision_transformations/src/convolution.cpp index cb05b00d141..344c9ba90ac 100644 --- a/inference-engine/src/low_precision_transformations/src/convolution.cpp +++ b/inference-engine/src/low_precision_transformations/src/convolution.cpp @@ -56,7 +56,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph if (!canConvolutionBeTransformed(context, convolution)) { const auto weightInput = convolution->get_input_node_shared_ptr(1); - const auto reshapeFromWeights = as_type_ptr(weightInput); + const auto reshapeFromWeights = ov::as_type_ptr(weightInput); FakeQuantizeDequantization dequantization = reshapeFromWeights == nullptr ? NetworkHelper::getDequantization(convolution, 1ul) : NetworkHelper::getDequantization(reshapeFromWeights); @@ -69,7 +69,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph reshapeFromWeights->input_value(1), false); } - if (is_type(resultConstant)) { + if (ov::is_type(resultConstant)) { replace_node(weightInput, resultConstant); } } else { @@ -90,7 +90,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph if (optimizedSubtract == nullptr) { optimizedSubtract = dequantization.subtract; } - subtract = as_type_ptr(optimizedSubtract); + subtract = ov::as_type_ptr(optimizedSubtract); } // workaround normalizes shape of Subtract to match CPU plugin expectations @@ -108,7 +108,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph Shape{ length }, broadcastShape)); - const auto newSubtract = as_type_ptr(subtract->clone_with_new_inputs({ + const auto newSubtract = ov::as_type_ptr(subtract->clone_with_new_inputs({ subtract->input_value(0), newShift })); NetworkHelper::copyInfo(subtract, newSubtract); @@ -159,7 +159,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph } const auto copyNode = convolution->clone_with_new_inputs({ dequantization.multiply->input_value(0), convolution->input_value(1) }); - auto conv = as_type_ptr(copyNode); + auto conv = ov::as_type_ptr(copyNode); std::shared_ptr relaxedNewConvolution; if (conv) { relaxedNewConvolution = std::make_shared>( @@ -168,7 +168,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph std::vector{deqPrecision}); } else { relaxedNewConvolution = std::make_shared>( - *as_type_ptr(copyNode), + *ov::as_type_ptr(copyNode), std::vector{deqPrecision, deqPrecision}, std::vector{deqPrecision}); } @@ -183,7 +183,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph replace_node(convolution, newMultiplyAfter); convolution = newMultiplyAfter->input_value(0).get_node_shared_ptr(); - if (is_type(convolution->get_input_node_ptr(0))) { + if (ov::is_type(convolution->get_input_node_ptr(0))) { auto newConvolution = convolution->clone_with_new_inputs({ convolution->get_input_node_ptr(0)->input_value(0), convolution->input_value(1)}); @@ -201,24 +201,24 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph return false; } - std::shared_ptr reshapeFromWeights = as_type_ptr(convolution->get_input_node_shared_ptr(1)); + std::shared_ptr reshapeFromWeights = ov::as_type_ptr(convolution->get_input_node_shared_ptr(1)); dequantization = reshapeFromWeights == nullptr ? NetworkHelper::getDequantization(convolution, 1ul) : NetworkHelper::getDequantization(reshapeFromWeights); assert(!dequantization.empty()); - if (is_type(dequantization.data.get_node())) { - const std::shared_ptr fq = as_type_ptr(dequantization.data.get_node_shared_ptr()); + if (ov::is_type(dequantization.data.get_node())) { + const std::shared_ptr fq = ov::as_type_ptr(dequantization.data.get_node_shared_ptr()); std::shared_ptr newFQ = NetworkHelper::fold_fake_quantize(fq, true); NetworkHelper::copyInfo(fq, newFQ); replace_node(fq, newFQ); } - std::shared_ptr multiplyFromWeights = as_type_ptr( + std::shared_ptr multiplyFromWeights = ov::as_type_ptr( reshapeFromWeights == nullptr ? convolution->get_input_node_shared_ptr(1) : convolution->get_input_node_ptr(1)->get_input_node_shared_ptr(0)); - std::shared_ptr subtractFromWeights = as_type_ptr(multiplyFromWeights->get_input_node_shared_ptr(0)); + std::shared_ptr subtractFromWeights = ov::as_type_ptr(multiplyFromWeights->get_input_node_shared_ptr(0)); { const auto newScalePShape = multiplyFromWeights->get_input_partial_shape(1); @@ -231,7 +231,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph } if (reshapeFromWeights != nullptr) { - reshapeFromWeights = as_type_ptr(reshapeFromWeights->copy_with_new_inputs({ + reshapeFromWeights = ov::as_type_ptr(reshapeFromWeights->copy_with_new_inputs({ multiplyFromWeights->input_value(0), reshapeFromWeights->input_value(1) })); } @@ -264,7 +264,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph if (optimizedSubtract == nullptr) { subtractFromWeights = nullptr; } else { - subtractFromWeights = as_type_ptr(optimizedSubtract); + subtractFromWeights = ov::as_type_ptr(optimizedSubtract); const auto weightsPShape = subtractFromWeights->get_input_partial_shape(0); assert(weightsPShape.is_static()); @@ -281,7 +281,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph } } - std::shared_ptr convertFromWeights = as_type_ptr(subtractFromWeights == nullptr ? + std::shared_ptr convertFromWeights = ov::as_type_ptr(subtractFromWeights == nullptr ? multiplyFromWeights->get_input_node_shared_ptr(0) : subtractFromWeights->get_input_node_shared_ptr(0)); if (convertFromWeights != nullptr) { @@ -298,7 +298,7 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph convolution = newConvolution; } - reshapeFromWeights = as_type_ptr(convolution->get_input_node_shared_ptr(1)); + reshapeFromWeights = ov::as_type_ptr(convolution->get_input_node_shared_ptr(1)); if (reshapeFromWeights != nullptr) { // remove Reshape on weights const std::shared_ptr newWeights = fold_reshape( @@ -319,11 +319,11 @@ bool ConvolutionTransformation::transform(TransformationContext &context, ngraph NetworkHelper::normalizeDequantizationShape(finalDequantization); auto onWeights = convolution->get_input_node_shared_ptr(1); - if (is_type(onWeights)) { + if (ov::is_type(onWeights)) { onWeights = onWeights->get_input_node_shared_ptr(0); } - if (is_type(onWeights)) { + if (ov::is_type(onWeights)) { auto& rt = onWeights->get_rt_info(); rt["DISABLED_CONSTANT_FOLDING"] = std::make_shared>(""); } diff --git a/inference-engine/src/low_precision_transformations/src/convolution_backprop_data.cpp b/inference-engine/src/low_precision_transformations/src/convolution_backprop_data.cpp index 680ed16eb17..1054149e836 100644 --- a/inference-engine/src/low_precision_transformations/src/convolution_backprop_data.cpp +++ b/inference-engine/src/low_precision_transformations/src/convolution_backprop_data.cpp @@ -66,7 +66,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con if (!canBeTransformed(context, convolutionBackpropData)) { auto weightsInput = convolutionBackpropData->get_input_node_shared_ptr(1); - std::shared_ptr reshapeFromWeights = as_type_ptr(weightsInput); + std::shared_ptr reshapeFromWeights = ov::as_type_ptr(weightsInput); FakeQuantizeDequantization dequantization = reshapeFromWeights == nullptr ? NetworkHelper::getDequantization(convolutionBackpropData, 1ul) : NetworkHelper::getDequantization(reshapeFromWeights); @@ -87,7 +87,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con reshapeFromWeights->input_value(1), false); } - if (is_type(resultConstant)) { + if (ov::is_type(resultConstant)) { replace_node(weightsInput, resultConstant); } } else { @@ -113,7 +113,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con const auto copyNode = convolutionBackpropData->copy_with_new_inputs(inputs); const auto relaxedConvolutionBackpropData = std::make_shared>( - *as_type_ptr(copyNode), + *ov::as_type_ptr(copyNode), std::vector{deqPrecision, deqPrecision}, std::vector{deqPrecision}); @@ -126,7 +126,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con replace_node(convolutionBackpropData, newMultiplyAfter); convolutionBackpropData = newMultiplyAfter->get_input_node_shared_ptr(0); inputs[0] = convolutionBackpropData->get_input_node_ptr(0)->input_value(0); - if (is_type(convolutionBackpropData->get_input_node_ptr(0))) { + if (ov::is_type(convolutionBackpropData->get_input_node_ptr(0))) { auto newConvolution = convolutionBackpropData->copy_with_new_inputs(inputs); replace_node(convolutionBackpropData, newConvolution); convolutionBackpropData = newConvolution; @@ -137,16 +137,16 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con decomposeFakeQuantizeForWeightsPath(convolutionBackpropData, 1ul); dequantization = NetworkHelper::getDequantization(convolutionBackpropData, 1ul); - if (is_type(dequantization.data.get_node())) { - const std::shared_ptr fq = as_type_ptr(dequantization.data.get_node_shared_ptr()); + if (ov::is_type(dequantization.data.get_node())) { + const std::shared_ptr fq = ov::as_type_ptr(dequantization.data.get_node_shared_ptr()); std::shared_ptr newFQ = NetworkHelper::fold_fake_quantize(fq, true); NetworkHelper::copyInfo(fq, newFQ); replace_node(fq, newFQ); } - std::shared_ptr multiplyFromWeights = as_type_ptr( + std::shared_ptr multiplyFromWeights = ov::as_type_ptr( convolutionBackpropData->input_value(1).get_node_shared_ptr()); - std::shared_ptr subtractFromWeights = as_type_ptr(multiplyFromWeights->get_input_node_shared_ptr(0)); + std::shared_ptr subtractFromWeights = ov::as_type_ptr(multiplyFromWeights->get_input_node_shared_ptr(0)); { const auto newScalePShape = multiplyFromWeights->get_input_partial_shape(1); @@ -173,7 +173,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con if (optimizedSubtract == nullptr) { subtractFromWeights = nullptr; } else { - subtractFromWeights = as_type_ptr(optimizedSubtract); + subtractFromWeights = ov::as_type_ptr(optimizedSubtract); const auto weightsPShape = subtractFromWeights->get_input_partial_shape(0); assert(weightsPShape.is_static()); @@ -190,7 +190,7 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con } std::shared_ptr convertFromWeights = - as_type_ptr( + ov::as_type_ptr( subtractFromWeights == nullptr ? multiplyFromWeights->get_input_node_shared_ptr(0) : subtractFromWeights->get_input_node_shared_ptr(0)); @@ -209,11 +209,11 @@ bool ConvolutionBackpropDataTransformation::transform(TransformationContext &con updateOutput(context, finalDequantization, convolutionBackpropData); auto onWeights = convolutionBackpropData->get_input_node_shared_ptr(1); - if (is_type(onWeights)) { + if (ov::is_type(onWeights)) { onWeights = onWeights->get_input_node_shared_ptr(0); } - if (is_type(onWeights)) { + if (ov::is_type(onWeights)) { auto& rt = onWeights->get_rt_info(); rt["DISABLED_CONSTANT_FOLDING"] = std::make_shared>(""); } diff --git a/inference-engine/src/low_precision_transformations/src/depth_to_space.cpp b/inference-engine/src/low_precision_transformations/src/depth_to_space.cpp index 09d3b6fac17..3e66bfd0e04 100644 --- a/inference-engine/src/low_precision_transformations/src/depth_to_space.cpp +++ b/inference-engine/src/low_precision_transformations/src/depth_to_space.cpp @@ -51,7 +51,7 @@ bool DepthToSpaceTransformation::canBeTransformed(const TransformationContext& c const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(layer); if (dequantization.multiply != nullptr) { - auto multiplyConst = as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(1)); + auto multiplyConst = ov::as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(1)); if (!NetworkHelper::isScalarLike(multiplyConst)) { return false; } diff --git a/inference-engine/src/low_precision_transformations/src/eltwise_base_transformation.cpp b/inference-engine/src/low_precision_transformations/src/eltwise_base_transformation.cpp index 54e87798a64..b2ae84b19f9 100644 --- a/inference-engine/src/low_precision_transformations/src/eltwise_base_transformation.cpp +++ b/inference-engine/src/low_precision_transformations/src/eltwise_base_transformation.cpp @@ -41,8 +41,8 @@ bool EltwiseBaseTransformation::canBeTransformed(const TransformationContext& co return false; } - if ((as_type_ptr(operation->get_input_node_shared_ptr(0)) || - as_type_ptr(operation->get_input_node_shared_ptr(1))) && + if ((ov::as_type_ptr(operation->get_input_node_shared_ptr(0)) || + ov::as_type_ptr(operation->get_input_node_shared_ptr(1))) && !FakeQuantizeDequantization::checkElementwise(operation)) { NetworkHelper::cleanRunTimeInfo(operation); } @@ -65,18 +65,18 @@ bool EltwiseBaseTransformation::canBeTransformed(const TransformationContext& co } static bool isTargetType(const std::shared_ptr node) { - return is_type(node) || - is_type(node) || - is_type(node); + return ov::is_type(node) || + ov::is_type(node) || + ov::is_type(node); } static std::shared_ptr getDataParent(const std::shared_ptr branchData) { std::shared_ptr parent = branchData; - while (is_type(parent)) { + while (ov::is_type(parent)) { parent = parent->get_input_node_shared_ptr(0); } - if (is_type(parent) && isTargetType(parent->get_input_node_shared_ptr(0))) { + if (ov::is_type(parent) && isTargetType(parent->get_input_node_shared_ptr(0))) { return parent->get_input_node_shared_ptr(0); } return parent; @@ -96,12 +96,12 @@ static bool isBranchHaveMultipleConsumers(const std::shared_ptr branchData // return branch index with FP32 precision after eltwise transformation int EltwiseBaseTransformation::getNotEmpty(const std::shared_ptr& eltwise) const { const FakeQuantizeDequantization dequantization1 = pass::low_precision::NetworkHelper::getDequantization(eltwise, 0ul); - if (as_type(dequantization1.data.get_node())) { + if (ov::as_type(dequantization1.data.get_node())) { return -1; } const FakeQuantizeDequantization dequantization2 = pass::low_precision::NetworkHelper::getDequantization(eltwise, 1ul); - if (as_type(dequantization2.data.get_node())) { + if (ov::as_type(dequantization2.data.get_node())) { return -1; } @@ -130,9 +130,9 @@ int EltwiseBaseTransformation::getNotEmpty(const std::shared_ptr& eltwise) } const std::shared_ptr fakeQuantize1 = - as_type_ptr(dequantization1.data.get_node_shared_ptr()); + ov::as_type_ptr(dequantization1.data.get_node_shared_ptr()); const std::shared_ptr fakeQuantize2 = - as_type_ptr(dequantization2.data.get_node_shared_ptr()); + ov::as_type_ptr(dequantization2.data.get_node_shared_ptr()); if (fakeQuantize1 && !fakeQuantize2) { return 0; @@ -151,11 +151,11 @@ int EltwiseBaseTransformation::getNotEmpty(const std::shared_ptr& eltwise) return 1; } - if (is_type(dequantization1.data.get_node())) { + if (ov::is_type(dequantization1.data.get_node())) { return 0; } - if (is_type(dequantization2.data.get_node())) { + if (ov::is_type(dequantization2.data.get_node())) { return 1; } @@ -199,17 +199,17 @@ std::pair EltwiseBaseTransformation::getMultiplyConstBranch(const std: const auto dequantization2 = NetworkHelper::getDequantization(eltwise, 1); std::shared_ptr constParent = dequantization1.empty() ? - as_type_ptr(parent1) : - as_type_ptr(dequantization1.data.get_node_shared_ptr()); - std::shared_ptr multiplyParent = as_type_ptr(parent2); + ov::as_type_ptr(parent1) : + ov::as_type_ptr(dequantization1.data.get_node_shared_ptr()); + std::shared_ptr multiplyParent = ov::as_type_ptr(parent2); int multiplyBranch = 1; if (constParent == nullptr || multiplyParent == nullptr) { constParent = dequantization2.empty() ? - as_type_ptr(parent2) : - as_type_ptr(dequantization2.data.get_node_shared_ptr()); - multiplyParent = as_type_ptr(parent1); + ov::as_type_ptr(parent2) : + ov::as_type_ptr(dequantization2.data.get_node_shared_ptr()); + multiplyParent = ov::as_type_ptr(parent1); multiplyBranch = 0; } @@ -220,14 +220,14 @@ std::pair EltwiseBaseTransformation::getMultiplyConstBranch(const std: auto multiplyParentParent1 = multiplyParent->get_input_node_shared_ptr(0); auto multiplyParentParent2 = multiplyParent->get_input_node_shared_ptr(1); - auto multiplyParentParent = as_type_ptr(multiplyParentParent1); - auto multiplyParentConst = as_type_ptr(multiplyParentParent2); + auto multiplyParentParent = ov::as_type_ptr(multiplyParentParent1); + auto multiplyParentConst = ov::as_type_ptr(multiplyParentParent2); int multiplyActBranch = 0; if (multiplyParentConst == nullptr) { - multiplyParentParent = as_type_ptr(multiplyParentParent2); - multiplyParentConst = as_type_ptr(multiplyParentParent1); + multiplyParentParent = ov::as_type_ptr(multiplyParentParent2); + multiplyParentConst = ov::as_type_ptr(multiplyParentParent1); multiplyActBranch = 1; } diff --git a/inference-engine/src/low_precision_transformations/src/fake_quantize.cpp b/inference-engine/src/low_precision_transformations/src/fake_quantize.cpp index 405e8fca87a..cab841e3bc5 100644 --- a/inference-engine/src/low_precision_transformations/src/fake_quantize.cpp +++ b/inference-engine/src/low_precision_transformations/src/fake_quantize.cpp @@ -68,11 +68,11 @@ static std::shared_ptr updateShape(std::shared_ptr constantOp, const } static std::shared_ptr getData(const std::shared_ptr& eltwise) { - if (!is_type(eltwise->get_input_node_shared_ptr(0))) { + if (!ov::is_type(eltwise->get_input_node_shared_ptr(0))) { return eltwise->get_input_node_shared_ptr(0); } - if (!is_type(eltwise->get_input_node_shared_ptr(1))) { + if (!ov::is_type(eltwise->get_input_node_shared_ptr(1))) { return eltwise->get_input_node_shared_ptr(1); } @@ -84,12 +84,12 @@ static std::shared_ptr getConstant(const std::shared_ptr return nullptr; } - std::shared_ptr constant = as_type_ptr(eltwise->get_input_node_shared_ptr(1)); + std::shared_ptr constant = ov::as_type_ptr(eltwise->get_input_node_shared_ptr(1)); if (constant != nullptr) { return constant; } - return as_type_ptr(eltwise->get_input_node_shared_ptr(0)); + return ov::as_type_ptr(eltwise->get_input_node_shared_ptr(0)); } } // namespace fq @@ -136,12 +136,12 @@ std::shared_ptr FakeQuantizeTransformation::fuseElementwis std::shared_ptr inputHighConst_f32 = foldConvert(fakeQuantize->get_input_node_shared_ptr(2), deqPrecision); std::shared_ptr constant = fq::getConstant(eltwise); - if (is_type(eltwise) && checkElementwise(eltwise)) { + if (ov::is_type(eltwise) && checkElementwise(eltwise)) { const auto value = constant->get_output_element_type(0) == deqPrecision ? constant : foldConvert(constant, deqPrecision); - const auto valueVec = as_type_ptr(value)->cast_vector(); + const auto valueVec = ov::as_type_ptr(value)->cast_vector(); if (std::any_of(valueVec.cbegin(), valueVec.cend(), [](const float value) { return value <= 0.f; })) { return nullptr; @@ -149,8 +149,8 @@ std::shared_ptr FakeQuantizeTransformation::fuseElementwis inputLowConst_f32 = fold(inputLowConst_f32, value); inputHighConst_f32 = fold(inputHighConst_f32, value); - const auto resultLow = as_type_ptr(inputLowConst_f32)->cast_vector(); - const auto resultHigh = as_type_ptr(inputHighConst_f32)->cast_vector(); + const auto resultLow = ov::as_type_ptr(inputLowConst_f32)->cast_vector(); + const auto resultHigh = ov::as_type_ptr(inputHighConst_f32)->cast_vector(); if (std::any_of(resultLow.begin(), resultLow.end(), [](const float value){ return std::isinf(value); }) || std::any_of(resultHigh.begin(), resultHigh.end(), [](const float value){ return std::isinf(value); })) { return nullptr; @@ -158,18 +158,18 @@ std::shared_ptr FakeQuantizeTransformation::fuseElementwis inputLowConst_f32 = fq::updateShape(inputLowConst_f32, fakeQuantize->get_output_partial_shape(0)); inputHighConst_f32 = fq::updateShape(inputHighConst_f32, fakeQuantize->get_output_partial_shape(0)); - } else if (is_type(eltwise) && checkElementwise(eltwise)) { + } else if (ov::is_type(eltwise) && checkElementwise(eltwise)) { const auto value = constant->get_output_element_type(0) == deqPrecision ? constant : foldConvert(constant, deqPrecision); inputLowConst_f32 = fq::updateShape(fold(inputLowConst_f32, value), fakeQuantize->get_output_partial_shape(0)); inputHighConst_f32 = fq::updateShape(fold(inputHighConst_f32, value), fakeQuantize->get_output_partial_shape(0)); - } else if (is_type(eltwise) && checkElementwise(eltwise)) { - if (is_type(fq::getData(eltwise)) || - is_type(fq::getData(eltwise)) || - is_type(fq::getData(eltwise)) || - is_type(fq::getData(eltwise))) { + } else if (ov::is_type(eltwise) && checkElementwise(eltwise)) { + if (ov::is_type(fq::getData(eltwise)) || + ov::is_type(fq::getData(eltwise)) || + ov::is_type(fq::getData(eltwise)) || + ov::is_type(fq::getData(eltwise))) { return nullptr; } @@ -179,7 +179,7 @@ std::shared_ptr FakeQuantizeTransformation::fuseElementwis inputLowConst_f32 = fq::updateShape(fold(inputLowConst_f32, value), fakeQuantize->get_output_partial_shape(0)); inputHighConst_f32 = fq::updateShape(fold(inputHighConst_f32, value), fakeQuantize->get_output_partial_shape(0)); - } else if (is_type(eltwise)) { + } else if (ov::is_type(eltwise)) { // issue #40611 if ((eltwise->get_input_element_type(0) == element::i32) && ((eltwise->get_output_element_type(0) == element::f16) || (eltwise->get_output_element_type(0) == element::f32))) { @@ -192,7 +192,7 @@ std::shared_ptr FakeQuantizeTransformation::fuseElementwis const auto data = fq::getData(eltwise); const size_t outputIdx = NetworkHelper::getParentOutputIndex(data, eltwise); - const auto newFakeQuantize = as_type_ptr(fakeQuantize->clone_with_new_inputs({ + const auto newFakeQuantize = ov::as_type_ptr(fakeQuantize->clone_with_new_inputs({ data->output(outputIdx), inputLowConst_f32, inputHighConst_f32, diff --git a/inference-engine/src/low_precision_transformations/src/fake_quantize_decomposition.cpp b/inference-engine/src/low_precision_transformations/src/fake_quantize_decomposition.cpp index b522546c55e..fda2858f6df 100644 --- a/inference-engine/src/low_precision_transformations/src/fake_quantize_decomposition.cpp +++ b/inference-engine/src/low_precision_transformations/src/fake_quantize_decomposition.cpp @@ -95,8 +95,8 @@ DataPrecision getDataPrecisionByOutputPortAndFakeQuantize(std::shared_ptr layer) { const size_t levels = layer->get_levels(); - const std::vector outputLowValues = as_type_ptr(layer->get_input_node_shared_ptr(3))->cast_vector(); - const std::vector outputHighValues = as_type_ptr(layer->get_input_node_shared_ptr(4))->cast_vector(); + const std::vector outputLowValues = ov::as_type_ptr(layer->get_input_node_shared_ptr(3))->cast_vector(); + const std::vector outputHighValues = ov::as_type_ptr(layer->get_input_node_shared_ptr(4))->cast_vector(); auto precisionsAttribute = getAttributeFromOutput>(layer->output(0)); if (precisionsAttribute == nullptr) { @@ -166,8 +166,8 @@ std::shared_ptr decomposeFakeQuantize( std::shared_ptr dequantize; if (intervalsAlignment != nullptr) { OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::LPT_LT, "decomposeFakeQuantize1"); - const std::vector outputLowValues = as_type_ptr(layer->get_input_node_shared_ptr(3))->cast_vector(); - const std::vector outputHighValues = as_type_ptr(layer->get_input_node_shared_ptr(4))->cast_vector(); + const std::vector outputLowValues = ov::as_type_ptr(layer->get_input_node_shared_ptr(3))->cast_vector(); + const std::vector outputHighValues = ov::as_type_ptr(layer->get_input_node_shared_ptr(4))->cast_vector(); float dequantizationMul; float dequantizationSub; @@ -230,7 +230,7 @@ std::shared_ptr decomposeFakeQuantize( OV_ITT_SCOPE(FIRST_INFERENCE, itt::domains::LPT_LT, "decomposeFakeQuantize2"); // Split FakeQuantize to two parts: Quantize and Dequantize auto QDQ = NetworkHelper::decomposeFakeQuantize( - as_type_ptr(layer), + ov::as_type_ptr(layer), dataPrecision.precision, dataPrecision.min, dataPrecision.max, @@ -251,7 +251,7 @@ std::shared_ptr decomposeFakeQuantize( } // namespace fq_decomposition bool FakeQuantizeDecompositionTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher& m) { - auto layer = as_type_ptr(m.get_match_root()); + auto layer = ov::as_type_ptr(m.get_match_root()); if (!NetworkHelper::isQuantizeSupported(layer)) { return false; } @@ -343,8 +343,8 @@ bool FakeQuantizeDecompositionTransformation::transform(TransformationContext& c if (dataPrecision.precision == element::undefined) { element::Type precision; const auto levels = layer->get_levels(); - const std::vector outputLowValues = as_type_ptr(layer->get_input_node_shared_ptr(3))->cast_vector(); - const std::vector outputHighValues = as_type_ptr(layer->get_input_node_shared_ptr(4))->cast_vector(); + const std::vector outputLowValues = ov::as_type_ptr(layer->get_input_node_shared_ptr(3))->cast_vector(); + const std::vector outputHighValues = ov::as_type_ptr(layer->get_input_node_shared_ptr(4))->cast_vector(); if (intervalsAlignment == nullptr) { // define precision by FakeQuantize intervals LayerTransformation::PrecisionDetails precisionDetailsAtOutputIntervals = LayerTransformation::getPrecisionDetails( diff --git a/inference-engine/src/low_precision_transformations/src/fake_quantize_dequantization.cpp b/inference-engine/src/low_precision_transformations/src/fake_quantize_dequantization.cpp index da84ed329a7..14a0104a46c 100644 --- a/inference-engine/src/low_precision_transformations/src/fake_quantize_dequantization.cpp +++ b/inference-engine/src/low_precision_transformations/src/fake_quantize_dequantization.cpp @@ -42,9 +42,9 @@ bool FakeQuantizeDequantization::multiplyHasZeroOrDenormal() const { return false; } - std::shared_ptr multiplyConstant = as_type_ptr(multiply->get_input_node_shared_ptr(1)); + std::shared_ptr multiplyConstant = ov::as_type_ptr(multiply->get_input_node_shared_ptr(1)); if (multiplyConstant == nullptr) { - multiplyConstant = as_type_ptr(multiply->get_input_node_shared_ptr(0)); + multiplyConstant = ov::as_type_ptr(multiply->get_input_node_shared_ptr(0)); } if (multiplyConstant == nullptr) { return false; @@ -163,11 +163,11 @@ int FakeQuantizeDequantization::fillDequantizationParams( const size_t branchIndex, std::shared_ptr& convert, std::shared_ptr& constant) { - convert = as_type_ptr(elementwise->get_input_node_shared_ptr(branchIndex)); + convert = ov::as_type_ptr(elementwise->get_input_node_shared_ptr(branchIndex)); if (convert != nullptr) { - constant = as_type_ptr(convert->get_input_node_shared_ptr(0)); + constant = ov::as_type_ptr(convert->get_input_node_shared_ptr(0)); } else { - constant = as_type_ptr(elementwise->get_input_node_shared_ptr(branchIndex)); + constant = ov::as_type_ptr(elementwise->get_input_node_shared_ptr(branchIndex)); } }; @@ -187,12 +187,12 @@ int FakeQuantizeDequantization::fillDequantizationParams( int FakeQuantizeDequantization::fillDequantizationParams( const std::shared_ptr& elementwise, std::shared_ptr& constant) noexcept { - constant = as_type_ptr(elementwise->get_input_node_shared_ptr(1ul)); + constant = ov::as_type_ptr(elementwise->get_input_node_shared_ptr(1ul)); if (constant != nullptr) { return 1; } - constant = as_type_ptr(elementwise->get_input_node_shared_ptr(0ul)); + constant = ov::as_type_ptr(elementwise->get_input_node_shared_ptr(0ul)); if (constant != nullptr) { return 0; } diff --git a/inference-engine/src/low_precision_transformations/src/fold_convert.cpp b/inference-engine/src/low_precision_transformations/src/fold_convert.cpp index 5e673a1ef51..f7a3255df49 100644 --- a/inference-engine/src/low_precision_transformations/src/fold_convert.cpp +++ b/inference-engine/src/low_precision_transformations/src/fold_convert.cpp @@ -38,12 +38,12 @@ bool FoldConvertTransformation::transform(TransformationContext& context, ngraph auto foldConvert = [&](const size_t branch) { const auto convert = subtract->get_input_node_shared_ptr(branch); - if (!is_type(convert) || !is_type(convert->get_input_node_shared_ptr(0))) { + if (!ov::is_type(convert) || !ov::is_type(convert->get_input_node_shared_ptr(0))) { return; } const auto resultConstant = ngraph::pass::low_precision::foldConvert(convert->get_input_node_shared_ptr(0), convert->output(0).get_element_type()); - assert(is_type(resultConstant)); + assert(ov::is_type(resultConstant)); replace_node(convert, resultConstant); updateOutput(context, resultConstant, convert); @@ -57,10 +57,10 @@ bool FoldConvertTransformation::transform(TransformationContext& context, ngraph bool FoldConvertTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { return - (is_type(operation->get_input_node_ptr(1)) && - is_type(operation->get_input_node_ptr(1)->get_input_node_ptr(0))) || - (is_type(operation->get_input_node_ptr(0)) && - is_type(operation->get_input_node_ptr(0)->get_input_node_ptr(0))); + (ov::is_type(operation->get_input_node_ptr(1)) && + ov::is_type(operation->get_input_node_ptr(1)->get_input_node_ptr(0))) || + (ov::is_type(operation->get_input_node_ptr(0)) && + ov::is_type(operation->get_input_node_ptr(0)->get_input_node_ptr(0))); } bool FoldConvertTransformation::isPrecisionPreserved(std::shared_ptr layer) const noexcept { diff --git a/inference-engine/src/low_precision_transformations/src/fold_fake_quantize.cpp b/inference-engine/src/low_precision_transformations/src/fold_fake_quantize.cpp index 7984d946f86..5eb0797123e 100644 --- a/inference-engine/src/low_precision_transformations/src/fold_fake_quantize.cpp +++ b/inference-engine/src/low_precision_transformations/src/fold_fake_quantize.cpp @@ -33,7 +33,7 @@ FoldFakeQuantizeTransformation::FoldFakeQuantizeTransformation(const Params& par } bool FoldFakeQuantizeTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) { - const auto fakeQuantize = as_type_ptr(m.get_match_root()); + const auto fakeQuantize = ov::as_type_ptr(m.get_match_root()); if (fakeQuantize == nullptr) { return false; } @@ -51,7 +51,7 @@ bool FoldFakeQuantizeTransformation::transform(TransformationContext& context, n fakeQuantize, false, (constantShape.rank().get_length() < 2) || constantShape[1] != 1ul ? 1ul : 0ul); - if (is_type(resultConstant)) { + if (ov::is_type(resultConstant)) { replace_node(fakeQuantize, resultConstant); return true; } diff --git a/inference-engine/src/low_precision_transformations/src/fuse_convert.cpp b/inference-engine/src/low_precision_transformations/src/fuse_convert.cpp index 48fbea02119..3533bb66213 100644 --- a/inference-engine/src/low_precision_transformations/src/fuse_convert.cpp +++ b/inference-engine/src/low_precision_transformations/src/fuse_convert.cpp @@ -62,26 +62,26 @@ bool FuseConvertTransformation::transform(TransformationContext& context, ngraph return false; } - const auto convert = as_type_ptr(op->get_input_node_shared_ptr(0)); + const auto convert = ov::as_type_ptr(op->get_input_node_shared_ptr(0)); std::shared_ptr parent = convert->get_input_node_shared_ptr(0); - if (is_type(parent)) { + if (ov::is_type(parent)) { auto convertedConstant = foldConvert(parent, convert->get_convert_element_type()); NetworkHelper::copyInfo(parent, convertedConstant); replace_node(convert, convertedConstant); } else { std::shared_ptr newOp; - if (is_type(op)) { - auto subtract = as_type_ptr(op); + if (ov::is_type(op)) { + auto subtract = ov::as_type_ptr(op); newOp = removeConvertIfPossibleForSubtract(convert, subtract); - } else if (is_type(op)) { + } else if (ov::is_type(op)) { newOp = std::make_shared>( std::vector{ element::f32, element::f32 }, std::vector{}, ngraph::op::TemporaryReplaceOutputType(convert->get_input_source_output(0), element::f32).get(), ngraph::op::TemporaryReplaceOutputType(op->get_input_node_shared_ptr(1), element::f32).get()); NetworkHelper::setOutDataPrecisionForTypeRelaxed(newOp, op->get_output_element_type(0)); replace_node(op, newOp); - } else if (is_type(op)) { + } else if (ov::is_type(op)) { newOp = std::make_shared>( std::vector{ element::f32, element::f32 }, std::vector{}, ngraph::op::TemporaryReplaceOutputType(convert->get_input_source_output(0), element::f32).get(), @@ -103,7 +103,7 @@ bool FuseConvertTransformation::transform(TransformationContext& context, ngraph } bool FuseConvertTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr op) const { - const auto convert = as_type_ptr(op->get_input_node_shared_ptr(0)); + const auto convert = ov::as_type_ptr(op->get_input_node_shared_ptr(0)); // issue #40395 if (convert == nullptr) { return false; diff --git a/inference-engine/src/low_precision_transformations/src/fuse_fake_quantize.cpp b/inference-engine/src/low_precision_transformations/src/fuse_fake_quantize.cpp index 6ce7acfad3a..0c897f468a5 100644 --- a/inference-engine/src/low_precision_transformations/src/fuse_fake_quantize.cpp +++ b/inference-engine/src/low_precision_transformations/src/fuse_fake_quantize.cpp @@ -31,7 +31,7 @@ FuseFakeQuantizeTransformation::FuseFakeQuantizeTransformation(const Params& par } bool FuseFakeQuantizeTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) { - std::shared_ptr fakeQuantize = as_type_ptr(m.get_match_root()); + std::shared_ptr fakeQuantize = ov::as_type_ptr(m.get_match_root()); do { fakeQuantize = handle(context, fakeQuantize); } while (fakeQuantize != nullptr); @@ -55,11 +55,11 @@ std::shared_ptr updateShape(std::shared_ptr op, const PartialShape& } std::shared_ptr getData(const std::shared_ptr& eltwise) { - if (!is_type(eltwise->get_input_node_shared_ptr(0))) { + if (!ov::is_type(eltwise->get_input_node_shared_ptr(0))) { return eltwise->get_input_node_shared_ptr(0); } - if (!is_type(eltwise->get_input_node_shared_ptr(1))) { + if (!ov::is_type(eltwise->get_input_node_shared_ptr(1))) { return eltwise->get_input_node_shared_ptr(1); } @@ -71,12 +71,12 @@ std::shared_ptr getConstant(const std::shared_ptr& eltwi return nullptr; } - std::shared_ptr constant = as_type_ptr(eltwise->get_input_node_shared_ptr(1)); + std::shared_ptr constant = ov::as_type_ptr(eltwise->get_input_node_shared_ptr(1)); if (constant != nullptr) { return constant; } - return as_type_ptr(eltwise->get_input_node_shared_ptr(0)); + return ov::as_type_ptr(eltwise->get_input_node_shared_ptr(0)); } bool eltwiseWithConstant(const std::shared_ptr& eltwise) { @@ -122,30 +122,30 @@ std::shared_ptr FuseFakeQuantizeTransformation::handle( std::shared_ptr inputHightConst = fakeQuantize->get_input_node_shared_ptr(2); std::shared_ptr constant = fuse_fq::getConstant(eltwise); - if (is_type(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) { + if (ov::is_type(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) { const auto value = constant->get_output_element_type(0) == eltwise->get_output_element_type(0) ? constant : foldConvert(constant, eltwise->get_output_element_type(0)); inputLowConst = fuse_fq::updateShape(fold(inputLowConst, value), fakeQuantize->get_output_partial_shape(0)); inputHightConst = fuse_fq::updateShape(fold(inputHightConst, value), fakeQuantize->get_output_partial_shape(0)); - } else if (is_type(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) { + } else if (ov::is_type(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) { const auto value = constant->get_output_element_type(0) == eltwise->get_output_element_type(0) ? constant : foldConvert(constant, eltwise->get_output_element_type(0)); inputLowConst = fuse_fq::updateShape(fold(inputLowConst, value), fakeQuantize->get_output_partial_shape(0)); inputHightConst = fuse_fq::updateShape(fold(inputHightConst, value), fakeQuantize->get_output_partial_shape(0)); - } else if (is_type(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) { + } else if (ov::is_type(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) { const auto value = constant->get_output_element_type(0) == eltwise->get_output_element_type(0) ? constant : foldConvert(constant, eltwise->get_output_element_type(0)); inputLowConst = fuse_fq::updateShape(fold(inputLowConst, value), fakeQuantize->get_output_partial_shape(0)); inputHightConst = fuse_fq::updateShape(fold(inputHightConst, value), fakeQuantize->get_output_partial_shape(0)); - } else if (is_type(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) { - if (is_type(fuse_fq::getData(eltwise)) || - is_type(fuse_fq::getData(eltwise))) { + } else if (ov::is_type(eltwise) && fuse_fq::eltwiseWithConstant(eltwise)) { + if (ov::is_type(fuse_fq::getData(eltwise)) || + ov::is_type(fuse_fq::getData(eltwise))) { return nullptr; } @@ -155,7 +155,7 @@ std::shared_ptr FuseFakeQuantizeTransformation::handle( inputLowConst = fuse_fq::updateShape(fold(inputLowConst, value), fakeQuantize->get_output_partial_shape(0)); inputHightConst = fuse_fq::updateShape(fold(inputHightConst, value), fakeQuantize->get_output_partial_shape(0)); - } else if (is_type(eltwise)) { + } else if (ov::is_type(eltwise)) { // issue #40611 if ((eltwise->input(0).get_element_type() == element::i32) && (eltwise->output(0).get_element_type() == element::f32)) { return nullptr; @@ -164,7 +164,7 @@ std::shared_ptr FuseFakeQuantizeTransformation::handle( return nullptr; } - std::shared_ptr newFakeQuantize = as_type_ptr(fakeQuantize->clone_with_new_inputs({ + std::shared_ptr newFakeQuantize = ov::as_type_ptr(fakeQuantize->clone_with_new_inputs({ fuse_fq::getData(eltwise), inputLowConst, inputHightConst, diff --git a/inference-engine/src/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp b/inference-engine/src/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp index ccff4188d3a..3cab73ba3e9 100644 --- a/inference-engine/src/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp +++ b/inference-engine/src/low_precision_transformations/src/fuse_multiply_to_fake_quantize.cpp @@ -38,11 +38,11 @@ bool FuseMultiplyToFakeQuantizeTransformation::transform(TransformationContext& } const auto parent = multiply->get_input_node_shared_ptr(0); - auto fakeQuantize = as_type_ptr(parent); - const auto convert = as_type_ptr(parent); + auto fakeQuantize = ov::as_type_ptr(parent); + const auto convert = ov::as_type_ptr(parent); if (convert) { - fakeQuantize = as_type_ptr(convert->get_input_node_shared_ptr(0)); + fakeQuantize = ov::as_type_ptr(convert->get_input_node_shared_ptr(0)); } const auto multiplyConstant = multiply->get_input_node_shared_ptr(1); @@ -90,7 +90,7 @@ bool FuseMultiplyToFakeQuantizeTransformation::transform(TransformationContext& } bool FuseMultiplyToFakeQuantizeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!is_type(operation->get_input_node_shared_ptr(1))) { + if (!ov::is_type(operation->get_input_node_shared_ptr(1))) { return false; } @@ -99,11 +99,11 @@ bool FuseMultiplyToFakeQuantizeTransformation::canBeTransformed(const Transforma } const auto parent = operation->get_input_node_shared_ptr(0); - auto fq = as_type_ptr(parent); - const auto convert = as_type_ptr(parent); + auto fq = ov::as_type_ptr(parent); + const auto convert = ov::as_type_ptr(parent); if (convert) { - fq = as_type_ptr(convert->get_input_node_shared_ptr(0)); + fq = ov::as_type_ptr(convert->get_input_node_shared_ptr(0)); } if (!fq) { diff --git a/inference-engine/src/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp b/inference-engine/src/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp index b8ec9b192fd..edd8ee35cb4 100644 --- a/inference-engine/src/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp +++ b/inference-engine/src/low_precision_transformations/src/fuse_subtract_to_fake_quantize.cpp @@ -37,11 +37,11 @@ bool FuseSubtractToFakeQuantizeTransformation::transform(TransformationContext& } const auto parent = subtract->get_input_node_shared_ptr(0); - auto fakeQuantize = as_type_ptr(parent); - const auto convert = as_type_ptr(parent); + auto fakeQuantize = ov::as_type_ptr(parent); + const auto convert = ov::as_type_ptr(parent); if (convert) { - fakeQuantize = as_type_ptr(convert->get_input_node_shared_ptr(0)); + fakeQuantize = ov::as_type_ptr(convert->get_input_node_shared_ptr(0)); } const auto subtractConstant = subtract->get_input_node_shared_ptr(1); @@ -84,7 +84,7 @@ bool FuseSubtractToFakeQuantizeTransformation::transform(TransformationContext& } bool FuseSubtractToFakeQuantizeTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!is_type(operation->get_input_node_shared_ptr(1))) { + if (!ov::is_type(operation->get_input_node_shared_ptr(1))) { return false; } @@ -95,20 +95,20 @@ bool FuseSubtractToFakeQuantizeTransformation::canBeTransformed(const Transforma const auto children = operation->get_output_target_inputs(0); for (const auto& target : children) { - const auto convolution = is_type(target.get_node()); - const auto groupConvolution = is_type(target.get_node()); - const auto convolutionBackpropData = is_type(target.get_node()); + const auto convolution = ov::is_type(target.get_node()); + const auto groupConvolution = ov::is_type(target.get_node()); + const auto convolutionBackpropData = ov::is_type(target.get_node()); if (convolution || groupConvolution || convolutionBackpropData) { return false; } } const auto parent = operation->get_input_node_shared_ptr(0); - auto fq = as_type_ptr(parent); - const auto convert = as_type_ptr(parent); + auto fq = ov::as_type_ptr(parent); + const auto convert = ov::as_type_ptr(parent); if (convert) { - fq = as_type_ptr(convert->get_input_node_shared_ptr(0)); + fq = ov::as_type_ptr(convert->get_input_node_shared_ptr(0)); } if (!fq) { diff --git a/inference-engine/src/low_precision_transformations/src/interpolate.cpp b/inference-engine/src/low_precision_transformations/src/interpolate.cpp index c167a6713b6..ffc5319278a 100644 --- a/inference-engine/src/low_precision_transformations/src/interpolate.cpp +++ b/inference-engine/src/low_precision_transformations/src/interpolate.cpp @@ -63,13 +63,13 @@ bool InterpolateTransformation::transform(TransformationContext &context, ngraph } bool InterpolateTransformation::isPrecisionPreserved(std::shared_ptr layer) const noexcept { - std::shared_ptr interpolate1 = as_type_ptr(layer); + std::shared_ptr interpolate1 = ov::as_type_ptr(layer); if (interpolate1) { const auto attrs = interpolate1->get_attrs(); return attrs.mode == "nearest"; } - std::shared_ptr interpolate4 = as_type_ptr(layer); + std::shared_ptr interpolate4 = ov::as_type_ptr(layer); if (interpolate4) { const auto attrs = interpolate4->get_attrs(); return attrs.mode == op::v4::Interpolate::InterpolateMode::NEAREST; @@ -90,7 +90,7 @@ bool InterpolateTransformation::canBeTransformed(const TransformationContext& co return false; } - const auto interpolate1 = as_type_ptr(layer); + const auto interpolate1 = ov::as_type_ptr(layer); if (interpolate1) { const auto interpAttrs = interpolate1->get_attrs(); if (interpAttrs.axes.count(0) || interpAttrs.axes.count(1)) { @@ -104,7 +104,7 @@ bool InterpolateTransformation::canBeTransformed(const TransformationContext& co } } - const auto interpolate4 = as_type_ptr(layer); + const auto interpolate4 = ov::as_type_ptr(layer); if (interpolate4) { const auto interpAttrs = interpolate4->get_attrs(); diff --git a/inference-engine/src/low_precision_transformations/src/layer_transformation.cpp b/inference-engine/src/low_precision_transformations/src/layer_transformation.cpp index 14d21fa29b6..1b05f965dc1 100644 --- a/inference-engine/src/low_precision_transformations/src/layer_transformation.cpp +++ b/inference-engine/src/low_precision_transformations/src/layer_transformation.cpp @@ -147,9 +147,9 @@ bool LayerTransformation::canSubtractBeHandled(const std::shared_ptr& op, const auto parent = dequantization.subtract->input_value(1).get_node_shared_ptr(); - if (is_type(parent)) { + if (ov::is_type(parent)) { return true; - } else if (is_type(parent) && is_type(parent->get_input_node_shared_ptr(0))) { + } else if (ov::is_type(parent) && ov::is_type(parent->get_input_node_shared_ptr(0))) { const auto constant = parent->get_input_node_shared_ptr(0); const auto constantType = constant->output(0).get_element_type(); return operationType == constantType; @@ -171,7 +171,7 @@ std::stringstream toStream(const std::vector& dequantizationValues) { } void LayerTransformation::printDequantizationInfo(const std::shared_ptr& layer) { - const QuantizationDetails quantizationDetails = QuantizationDetails::getDetails(as_type_ptr(layer)); + const QuantizationDetails quantizationDetails = QuantizationDetails::getDetails(ov::as_type_ptr(layer)); std::cout << layer->get_type_name() << (NetworkHelper::isConstantPath(layer) ? " on weights " : " on activations ") << layer->get_friendly_name() << ":" << std::endl << @@ -337,7 +337,7 @@ void LayerTransformation::updateOutput( // TODO: not tested!!! for (auto output : lastNode->outputs()) { for (auto input : output.get_target_inputs()) { - if (is_type(input.get_node())) { + if (ov::is_type(input.get_node())) { const std::string originalName = originalNode->get_friendly_name(); originalNode->set_friendly_name(originalName + LayerTransformation::originalLayerPostfix); lastNode->set_friendly_name(originalName); diff --git a/inference-engine/src/low_precision_transformations/src/low_precision.cpp b/inference-engine/src/low_precision_transformations/src/low_precision.cpp index f33df2e5c89..be040eb122a 100644 --- a/inference-engine/src/low_precision_transformations/src/low_precision.cpp +++ b/inference-engine/src/low_precision_transformations/src/low_precision.cpp @@ -95,7 +95,7 @@ void make_matcher_type_relaxed(ngraph::pass::GraphRewrite* transformation) { using namespace ngraph; auto is_op_type = [](std::shared_ptr n) { - return !!as_type_ptr(n); + return !!ov::as_type_ptr(n); }; auto p_node = std::make_shared(element::f32, Shape{}, is_op_type); @@ -270,7 +270,7 @@ bool ngraph::pass::low_precision::LowPrecision::isFunctionQuantized(const std::s continue; } - const std::shared_ptr fakeQuantize = as_type_ptr(parent); + const std::shared_ptr fakeQuantize = ov::as_type_ptr(parent); if ((fakeQuantize != nullptr) && QuantizationDetails::outputLayoutIsSupported(fakeQuantize) && QuantizationDetails::isSupportedLevel(fakeQuantize->get_levels())) { diff --git a/inference-engine/src/low_precision_transformations/src/markup_precisions.cpp b/inference-engine/src/low_precision_transformations/src/markup_precisions.cpp index 7cf2c5b3236..d226252748e 100644 --- a/inference-engine/src/low_precision_transformations/src/markup_precisions.cpp +++ b/inference-engine/src/low_precision_transformations/src/markup_precisions.cpp @@ -83,7 +83,7 @@ bool ngraph::pass::low_precision::MarkupPrecisions::run_on_function(std::shared_ // TODO: don't need to set restrictions for not supported operations // if don't set restrictions for not supported operations then accuracy drop appears, issue #59197 - const bool supported = is_type(node) || isSupported(node); + const bool supported = ov::is_type(node) || isSupported(node); if (!supported || !LayerTransformation::canBeTransformedStatic(node)) { setRestriction(node, std::vector>> { {0ul, {}}}); continue; @@ -157,14 +157,14 @@ bool ngraph::pass::low_precision::MarkupPrecisions::isPrecisionPreserved(const s return precisionPreserved; } - if (is_type(node)) { - std::shared_ptr interpolate1 = as_type_ptr(node); + if (ov::is_type(node)) { + std::shared_ptr interpolate1 = ov::as_type_ptr(node); if (interpolate1) { const auto attrs = interpolate1->get_attrs(); return attrs.mode == "nearest"; } - std::shared_ptr interpolate4 = as_type_ptr(node); + std::shared_ptr interpolate4 = ov::as_type_ptr(node); if (interpolate4) { const auto attrs = interpolate4->get_attrs(); return attrs.mode == op::v4::Interpolate::InterpolateMode::NEAREST; diff --git a/inference-engine/src/low_precision_transformations/src/mat_mul.cpp b/inference-engine/src/low_precision_transformations/src/mat_mul.cpp index 83f08fd4ed3..64816e8c541 100644 --- a/inference-engine/src/low_precision_transformations/src/mat_mul.cpp +++ b/inference-engine/src/low_precision_transformations/src/mat_mul.cpp @@ -40,18 +40,18 @@ MatMulTransformation::MatMulTransformation(const Params& params) : LayerTransfor } bool MatMulTransformation::transform(TransformationContext &context, ngraph::pattern::Matcher &m) { - std::shared_ptr matMul = as_type_ptr(m.get_match_root()); + std::shared_ptr matMul = ov::as_type_ptr(m.get_match_root()); if ((matMul == nullptr) || !canBeTransformed(context, matMul)) { return false; } - matMul = as_type_ptr(NetworkHelper::separateInStandaloneBranch(matMul)); + matMul = ov::as_type_ptr(NetworkHelper::separateInStandaloneBranch(matMul)); const auto dequantization1 = NetworkHelper::getDequantization(matMul, 0); auto dequantization2 = NetworkHelper::getDequantization(matMul, 1); if (dequantization2.empty()) { const std::shared_ptr fakeQuantize = - as_type_ptr(dequantization2.data.get_node_shared_ptr()); + ov::as_type_ptr(dequantization2.data.get_node_shared_ptr()); if (fakeQuantize != nullptr) { const QuantizationDetails quantizationDetails = QuantizationDetails::getDetails(fakeQuantize); @@ -90,7 +90,7 @@ bool MatMulTransformation::transform(TransformationContext &context, ngraph::pat // dequantization with subtract on activations & constant weights if (dequantization1.subtract) { - auto broadcastShape = NetworkHelper::isScalarLike(as_type_ptr(dequantization1.subtractConstant)) ? + auto broadcastShape = NetworkHelper::isScalarLike(ov::as_type_ptr(dequantization1.subtractConstant)) ? Shape(dequantization1.subtract->get_output_partial_shape(0).rank().get_length(), 1) : dequantization1.subtractConstant->get_shape(); @@ -139,8 +139,8 @@ bool MatMulTransformation::transform(TransformationContext &context, ngraph::pat const auto mulConst1 = matMul->get_transpose_a() ? transpose(dequantization1.multiplyConstant) : dequantization1.multiplyConstant; auto mulConst2 = matMul->get_transpose_b() ? transpose(dequantization2.multiplyConstant) : dequantization2.multiplyConstant; - if (NetworkHelper::isScalarLike(as_type_ptr(mulConst2))) { - mulConst2 = NetworkHelper::toScalar(as_type_ptr(mulConst2)); + if (NetworkHelper::isScalarLike(ov::as_type_ptr(mulConst2))) { + mulConst2 = NetworkHelper::toScalar(ov::as_type_ptr(mulConst2)); } else { const auto constShape = mulConst2->get_shape(); const size_t inputRank = matMul->get_input_partial_shape(0).rank().get_length(); @@ -194,7 +194,7 @@ bool MatMulTransformation::canBeTransformed(const TransformationContext& context return false; } - std::shared_ptr matMul = as_type_ptr(layer); + std::shared_ptr matMul = ov::as_type_ptr(layer); if (matMul == nullptr) { return false; } @@ -252,7 +252,7 @@ bool MatMulTransformation::canBeTransformed(const TransformationContext& context } } - const auto fakeQuantize = as_type_ptr(layer->get_input_node_shared_ptr(1)); + const auto fakeQuantize = ov::as_type_ptr(layer->get_input_node_shared_ptr(1)); if (fakeQuantize) { if (!QuantizationDetails::outputLayoutIsSupported(fakeQuantize)) { return false; diff --git a/inference-engine/src/low_precision_transformations/src/max_pool.cpp b/inference-engine/src/low_precision_transformations/src/max_pool.cpp index 68a73cac59e..8cdfbfc7c5a 100644 --- a/inference-engine/src/low_precision_transformations/src/max_pool.cpp +++ b/inference-engine/src/low_precision_transformations/src/max_pool.cpp @@ -43,7 +43,7 @@ bool MaxPoolTransformation::canBeTransformed(const TransformationContext& contex return false; } - const std::vector scales = as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(1))->cast_vector(); + const std::vector scales = ov::as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(1))->cast_vector(); if (std::any_of(scales.begin(), scales.end(), [](const float value) { return value < 0.0; })) { return false; } diff --git a/inference-engine/src/low_precision_transformations/src/multiply.cpp b/inference-engine/src/low_precision_transformations/src/multiply.cpp index 923f77a7d20..c5a468cf21c 100644 --- a/inference-engine/src/low_precision_transformations/src/multiply.cpp +++ b/inference-engine/src/low_precision_transformations/src/multiply.cpp @@ -52,10 +52,10 @@ bool MultiplyTransformation::transform(TransformationContext& context, ngraph::p auto newMultiply = multiply; auto fold_fake_quantizes = [](std::shared_ptr& multiply, const size_t index) { - auto fakeQuantizeOnWeights = as_type_ptr(multiply->get_input_node_shared_ptr(index)); + auto fakeQuantizeOnWeights = ov::as_type_ptr(multiply->get_input_node_shared_ptr(index)); if (fakeQuantizeOnWeights != nullptr) { auto result = NetworkHelper::fold_fake_quantize(fakeQuantizeOnWeights); - if (is_type(result)) { + if (ov::is_type(result)) { replace_node(fakeQuantizeOnWeights, result); } } @@ -165,14 +165,14 @@ bool MultiplyTransformation::canBeTransformed(const TransformationContext& conte FakeQuantizeDequantization dequantization2 = pass::low_precision::NetworkHelper::getDequantization(layer, 1ul); if ((dequantization1.data.get_node() == nullptr) || - (dequantization1.empty() && !is_type(dequantization1.data.get_node_shared_ptr()) && - !is_type(dequantization2.data.get_node_shared_ptr()))) { + (dequantization1.empty() && !ov::is_type(dequantization1.data.get_node_shared_ptr()) && + !ov::is_type(dequantization2.data.get_node_shared_ptr()))) { return false; } if ((dequantization2.data.get_node() == nullptr) || - (dequantization2.empty() && !is_type(dequantization2.data.get_node_shared_ptr()) && - !is_type(dequantization1.data.get_node_shared_ptr()))) { + (dequantization2.empty() && !ov::is_type(dequantization2.data.get_node_shared_ptr()) && + !ov::is_type(dequantization1.data.get_node_shared_ptr()))) { return false; } return EltwiseBaseTransformation::canBeTransformed(context, layer); diff --git a/inference-engine/src/low_precision_transformations/src/multiply_to_group_convolution.cpp b/inference-engine/src/low_precision_transformations/src/multiply_to_group_convolution.cpp index 6851a159fee..162499bd4a4 100644 --- a/inference-engine/src/low_precision_transformations/src/multiply_to_group_convolution.cpp +++ b/inference-engine/src/low_precision_transformations/src/multiply_to_group_convolution.cpp @@ -40,7 +40,7 @@ bool MultiplyToGroupConvolutionTransformation::transform(TransformationContext& auto input = multiply->get_input_node_shared_ptr(0); auto constant = multiply->get_input_node_shared_ptr(1); auto inputIndex = 0; - if (!is_type(constant)) { + if (!ov::is_type(constant)) { input = multiply->get_input_node_shared_ptr(1); constant = multiply->get_input_node_shared_ptr(0); inputIndex = 1; @@ -164,15 +164,15 @@ bool MultiplyToGroupConvolutionTransformation::canBeTransformed(const Transforma Shape constShape; int inputIndex; - if (const auto constant = as_type_ptr(operation->get_input_node_shared_ptr(1))) { + if (const auto constant = ov::as_type_ptr(operation->get_input_node_shared_ptr(1))) { inputIndex = 0; constShape = constant->get_shape(); - if (is_type(operation->get_input_node_shared_ptr(0)) || - (is_type(operation->get_input_node_shared_ptr(0)) && - is_type(operation->get_input_node_shared_ptr(0)->get_input_node_shared_ptr(0)))) { + if (ov::is_type(operation->get_input_node_shared_ptr(0)) || + (ov::is_type(operation->get_input_node_shared_ptr(0)) && + ov::is_type(operation->get_input_node_shared_ptr(0)->get_input_node_shared_ptr(0)))) { return false; } - } else if (const auto constant = as_type_ptr(operation->get_input_node_shared_ptr(0))) { + } else if (const auto constant = ov::as_type_ptr(operation->get_input_node_shared_ptr(0))) { inputIndex = 1; constShape = constant->get_shape(); } else { @@ -209,7 +209,7 @@ bool MultiplyToGroupConvolutionTransformation::canBeTransformedToGroupConvolutio const auto parent0 = layer->get_input_node_shared_ptr(0); const auto parent1 = layer->get_input_node_shared_ptr(1); - if (!is_type(parent0) && !is_type(parent1)) { + if (!ov::is_type(parent0) && !ov::is_type(parent1)) { return false; } @@ -224,10 +224,10 @@ bool MultiplyToGroupConvolutionTransformation::canBeTransformedToGroupConvolutio bool MultiplyToGroupConvolutionTransformation::isDynamicOrScalar(const std::shared_ptr& node) { auto getConstantIndex = [](const std::shared_ptr& node) -> int { - if (is_type(node->get_input_node_shared_ptr(1))) { + if (ov::is_type(node->get_input_node_shared_ptr(1))) { return 1; } - if (is_type(node->get_input_node_shared_ptr(0))) { + if (ov::is_type(node->get_input_node_shared_ptr(0))) { return 0; } return -1; diff --git a/inference-engine/src/low_precision_transformations/src/mvn.cpp b/inference-engine/src/low_precision_transformations/src/mvn.cpp index 7883235e42d..383688c2f2f 100644 --- a/inference-engine/src/low_precision_transformations/src/mvn.cpp +++ b/inference-engine/src/low_precision_transformations/src/mvn.cpp @@ -71,22 +71,22 @@ bool MVNTransformation::canBeTransformed(const TransformationContext& context, s return false; } - std::shared_ptr mvn = as_type_ptr(operation); + std::shared_ptr mvn = ov::as_type_ptr(operation); if (!mvn) { - mvn = as_type_ptr(operation); + mvn = ov::as_type_ptr(operation); if (!mvn) { return false; } } - const auto scalesConst = as_type_ptr(NetworkHelper::getConstantInput(mvn->get_input_node_shared_ptr(0))); + const auto scalesConst = ov::as_type_ptr(NetworkHelper::getConstantInput(mvn->get_input_node_shared_ptr(0))); bool isScalarScales = NetworkHelper::isScalarLike(scalesConst); AxisSet reduction_axes; - if (is_type(mvn)) { - reduction_axes = as_type_ptr(mvn)->get_reduction_axes(); + if (ov::is_type(mvn)) { + reduction_axes = ov::as_type_ptr(mvn)->get_reduction_axes(); } else { - reduction_axes = as_type_ptr(mvn->get_input_node_shared_ptr(1))->get_axis_set_val(); + reduction_axes = ov::as_type_ptr(mvn->get_input_node_shared_ptr(1))->get_axis_set_val(); } if (reduction_axes.count(1) == 0) { @@ -115,22 +115,22 @@ bool MVNTransformation::transform(TransformationContext &context, ngraph::patter return false; } - std::shared_ptr mvn = as_type_ptr(operation); + std::shared_ptr mvn = ov::as_type_ptr(operation); if (!mvn) { - mvn = as_type_ptr(operation); + mvn = ov::as_type_ptr(operation); } bool normalizeVariance; - if (is_type(mvn)) { - normalizeVariance = as_type_ptr(mvn)->get_normalize_variance(); + if (ov::is_type(mvn)) { + normalizeVariance = ov::as_type_ptr(mvn)->get_normalize_variance(); } else { - normalizeVariance = as_type_ptr(mvn)->get_normalize_variance(); + normalizeVariance = ov::as_type_ptr(mvn)->get_normalize_variance(); } FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(mvn); - auto scalesConst = as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(1)); + auto scalesConst = ov::as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(1)); if (scalesConst == nullptr) { - scalesConst = as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(0)); + scalesConst = ov::as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(0)); } auto newScalesConst = scalesConst; @@ -151,7 +151,7 @@ bool MVNTransformation::transform(TransformationContext &context, ngraph::patter } } std::shared_ptr newMVN; - if (is_type(mvn)) { + if (ov::is_type(mvn)) { newMVN = mvn->copy_with_new_inputs({dequantization.data}); } else { newMVN = mvn->copy_with_new_inputs({dequantization.data, mvn->get_input_node_shared_ptr(1)}); diff --git a/inference-engine/src/low_precision_transformations/src/network_helper.cpp b/inference-engine/src/low_precision_transformations/src/network_helper.cpp index 32c6a1ce523..6b4bda9da54 100644 --- a/inference-engine/src/low_precision_transformations/src/network_helper.cpp +++ b/inference-engine/src/low_precision_transformations/src/network_helper.cpp @@ -42,7 +42,7 @@ bool NetworkHelper::notAllChildrensAreFQ(const NodeVector& childrens) { // NOTE: This check was added for models that don't have FQ after AvgPool // They will have transparent precision as it was in old LPT. for (const auto& child : childrens) { - if (!is_type(child)) { + if (!ov::is_type(child)) { return true; } } @@ -69,11 +69,11 @@ std::vector> NetworkHelper::consumers(std::shared_ptr& op) { const auto isNotConstantPathOperation = [](const std::shared_ptr& node) -> bool { - return is_type(node) || - is_type(node) || - is_type(node) || - is_type(node) || - is_type(node); + return ov::is_type(node) || + ov::is_type(node) || + ov::is_type(node) || + ov::is_type(node) || + ov::is_type(node); }; if (isNotConstantPathOperation(op)) { @@ -123,7 +123,7 @@ std::shared_ptr NetworkHelper::foldDequantizationConstant( // constant folding of constant op->constant_fold(outputs, inputs); - const auto result = as_type_ptr(outputs[outIdx].get_node_shared_ptr()); + const auto result = ov::as_type_ptr(outputs[outIdx].get_node_shared_ptr()); if (result == nullptr) { THROW_IE_LPT_EXCEPTION(*result) << "result of constant folding is not constant"; } @@ -191,9 +191,9 @@ size_t NetworkHelper::getInputChannelsCount(std::shared_ptr layer) { } size_t NetworkHelper::getGroupsCount(std::shared_ptr layer) { - if (is_type(layer)) { + if (ov::is_type(layer)) { return 1; - } else if (is_type(layer)) { + } else if (ov::is_type(layer)) { return layer->get_input_partial_shape(1)[0].get_length(); // input weights for opset1::GC is in format GOI..., see the specification } else { THROW_TRANSFORMATION_EXCEPTION << "Invalid layer type of " << layer->get_friendly_name() << "; expected Convolution or GroupConvolution"; @@ -221,13 +221,13 @@ std::shared_ptr NetworkHelper::swapMultiplyAndAdd(std::shared_ptrget_input_node_shared_ptr(0); const auto multiplyParent2 = multiply->get_input_node_shared_ptr(1); - auto multiplyInput = as_type_ptr(multiplyParent1); - auto multiplyConst = as_type_ptr(multiplyParent2); + auto multiplyInput = ov::as_type_ptr(multiplyParent1); + auto multiplyConst = ov::as_type_ptr(multiplyParent2); int multiplyInputBranch = 0; if (multiplyConst == nullptr) { - multiplyInput = as_type_ptr(multiplyParent2); - multiplyConst = as_type_ptr(multiplyParent1); + multiplyInput = ov::as_type_ptr(multiplyParent2); + multiplyConst = ov::as_type_ptr(multiplyParent1); multiplyInputBranch = 1; } @@ -249,8 +249,8 @@ std::shared_ptr NetworkHelper::swapMultiplyAndAdd(std::shared_ptr bValues = as_type_ptr(b)->cast_vector(); - const std::vector aValues = as_type_ptr(a)->cast_vector(); + const std::vector bValues = ov::as_type_ptr(b)->cast_vector(); + const std::vector aValues = ov::as_type_ptr(a)->cast_vector(); const bool aBroadcasted = bValues.size() > aValues.size(); const bool bBroadcasted = bValues.size() < aValues.size(); std::vector bDivAValues(aBroadcasted ? bValues.size() : aValues.size()); @@ -399,19 +399,19 @@ std::shared_ptr NetworkHelper::toScalar(std::shared_ptr NetworkHelper::getConstantInput(std::shared_ptr node) { - std::shared_ptr constant1 = as_type_ptr(node->input_value(0).get_node_shared_ptr()); + std::shared_ptr constant1 = ov::as_type_ptr(node->input_value(0).get_node_shared_ptr()); if (!constant1) { - constant1 = as_type_ptr(node->input_value(1).get_node_shared_ptr()); + constant1 = ov::as_type_ptr(node->input_value(1).get_node_shared_ptr()); } return constant1; } int NetworkHelper::getConstantInputIndex(std::shared_ptr node) { - if (as_type_ptr(node->get_input_node_shared_ptr(1)) != nullptr) { + if (ov::as_type_ptr(node->get_input_node_shared_ptr(1)) != nullptr) { return 1; } - if (as_type_ptr(node->get_input_node_shared_ptr(0)) != nullptr) { + if (ov::as_type_ptr(node->get_input_node_shared_ptr(0)) != nullptr) { return 0; } @@ -449,7 +449,7 @@ std::vector NetworkHelper::updateReshapeValues( } std::shared_ptr NetworkHelper::optimizeMultipliesAfter(std::shared_ptr node) { - std::shared_ptr multiply = as_type_ptr(std::move(node)); + std::shared_ptr multiply = ov::as_type_ptr(std::move(node)); if (!multiply) { THROW_IE_LPT_EXCEPTION(*multiply) << "Unexpected operation type"; } @@ -461,7 +461,7 @@ std::shared_ptr NetworkHelper::optimizeMultipliesAfter } auto nextMultiplyInput = *multiply->output(0).get_target_inputs().begin(); - auto nextMultiply = as_type_ptr>(nextMultiplyInput.get_node()->shared_from_this()); + auto nextMultiply = ov::as_type_ptr>(nextMultiplyInput.get_node()->shared_from_this()); if (nextMultiply) { auto constant2 = getConstantInput(nextMultiply); if (!constant2 || constant2->output(0).get_target_inputs().size() != 1) { @@ -472,7 +472,7 @@ std::shared_ptr NetworkHelper::optimizeMultipliesAfter auto multiplyResult = fold(constant1, constant2); { // optimize constant shape: used in rfcn-resnet101-coco - const auto multiplyResultConstant = as_type_ptr(multiplyResult); + const auto multiplyResultConstant = ov::as_type_ptr(multiplyResult); if ((multiplyResultConstant != nullptr) && NetworkHelper::isScalarLike(multiplyResultConstant)) { multiplyResult = NetworkHelper::toScalar(multiplyResultConstant); } @@ -496,10 +496,10 @@ std::shared_ptr NetworkHelper::optimizeMultipliesAfter } std::shared_ptr NetworkHelper::round(std::shared_ptr node, element::Type target_type) { - const auto constant = as_type_ptr(node); + const auto constant = ov::as_type_ptr(node); assert(constant); - const auto castedConstant = as_type_ptr(fold( + const auto castedConstant = ov::as_type_ptr(fold( fold(constant->output(0), ngraph::op::v5::Round::RoundMode::HALF_AWAY_FROM_ZERO), target_type)); @@ -525,7 +525,7 @@ FakeQuantizeDequantization NetworkHelper::foldDequantization(const std::shared_p if (dequantization.convert != nullptr) { const std::shared_ptr result = foldConvert(dequantization.data, dequantization.convert->get_element_type()); - if (is_type(result)) { + if (ov::is_type(result)) { if (inPlace) { copyInfo(dequantization.convert, result); } @@ -543,7 +543,7 @@ FakeQuantizeDequantization NetworkHelper::foldDequantization(const std::shared_p const auto convertionResult = foldConvert( dequantization.subtractConstant, dequantization.subtractConvert->get_element_type()); - if (is_type(convertionResult)) { + if (ov::is_type(convertionResult)) { replace_node(dequantization.subtractConvert, convertionResult); dequantization = NetworkHelper::getDequantization(node, branchIndex, inPlace); } @@ -552,7 +552,7 @@ FakeQuantizeDequantization NetworkHelper::foldDequantization(const std::shared_p const std::shared_ptr result = fold( dequantization.subtract->get_input_node_shared_ptr(0), dequantization.subtract->get_input_node_shared_ptr(1)); - if (is_type(result)) { + if (ov::is_type(result)) { if (inPlace) { copyInfo(dequantization.subtract, result); } @@ -571,7 +571,7 @@ FakeQuantizeDequantization NetworkHelper::foldDequantization(const std::shared_p std::shared_ptr result = fold( dequantization.multiply->get_input_node_shared_ptr(0), dequantization.multiply->get_input_node_shared_ptr(1)); - if (!is_type(result)) { + if (!ov::is_type(result)) { return dequantization; } if (dequantization.multiply->get_output_element_type(0) != result->get_element_type()) { @@ -649,7 +649,7 @@ std::shared_ptr NetworkHelper::fuseConvert(const std::shar } Node* node = targetInputs.begin()->get_node(); - if (!is_type(node) || + if (!ov::is_type(node) || // TODO: LPT: avoid precision hardcode: to separate method: isSupportedPrecision ((node->get_output_element_type(0) != element::u8) && (node->get_output_element_type(0) != element::i8))) { return fakeQuantize; @@ -715,15 +715,15 @@ std::shared_ptr NetworkHelper::foldFakeQuantize( const bool roundValuesArg, const bool roundValuesWasSet, const int outChannelsShapeIndex) { - if (is_type(fq->get_input_node_shared_ptr(0)) && - is_type(fq->get_input_node_shared_ptr(1)) && - is_type(fq->get_input_node_shared_ptr(2)) && - is_type(fq->get_input_node_shared_ptr(3)) && - is_type(fq->get_input_node_shared_ptr(4)) && - op::util::constantIsEqualTo(as_type_ptr(fq->get_input_node_shared_ptr(1)), 0.f) && - op::util::constantIsEqualTo(as_type_ptr(fq->get_input_node_shared_ptr(2)), 254.f) && - op::util::constantIsEqualTo(as_type_ptr(fq->get_input_node_shared_ptr(3)), -127.f) && - op::util::constantIsEqualTo(as_type_ptr(fq->get_input_node_shared_ptr(4)), 127.f)) { + if (ov::is_type(fq->get_input_node_shared_ptr(0)) && + ov::is_type(fq->get_input_node_shared_ptr(1)) && + ov::is_type(fq->get_input_node_shared_ptr(2)) && + ov::is_type(fq->get_input_node_shared_ptr(3)) && + ov::is_type(fq->get_input_node_shared_ptr(4)) && + op::util::constantIsEqualTo(ov::as_type_ptr(fq->get_input_node_shared_ptr(1)), 0.f) && + op::util::constantIsEqualTo(ov::as_type_ptr(fq->get_input_node_shared_ptr(2)), 254.f) && + op::util::constantIsEqualTo(ov::as_type_ptr(fq->get_input_node_shared_ptr(3)), -127.f) && + op::util::constantIsEqualTo(ov::as_type_ptr(fq->get_input_node_shared_ptr(4)), 127.f)) { const auto type1 = fq->input_value(0).get_element_type(); const auto type2 = fq->input_value(3).get_element_type(); if (type1.is_real() && type2.is_real()) { @@ -744,7 +744,7 @@ std::shared_ptr NetworkHelper::foldFakeQuantize( foldConvert(fq->input_value(3), element::f32)); } - auto constant = as_type_ptr(fq->get_input_node_shared_ptr(0)); + auto constant = ov::as_type_ptr(fq->get_input_node_shared_ptr(0)); if (constant) { const bool roundValues = roundValuesWasSet ? roundValuesArg : fq->get_output_element_type(0).is_integral(); @@ -774,10 +774,10 @@ std::shared_ptr NetworkHelper::foldFakeQuantize( const size_t H = constShape.size() > 2lu ? constShape.size() == 3lu ? constShape[2] : constShape[constShape.size() - 2] : 1; const size_t W = constShape.size() > 3lu ? constShape[constShape.size() - 1] : 1; - const auto inputLowValues = as_type_ptr(fq->get_input_node_shared_ptr(1))->cast_vector(); - const auto inputHighValues = as_type_ptr(fq->get_input_node_shared_ptr(2))->cast_vector(); - const auto outputLowValues = as_type_ptr(fq->get_input_node_shared_ptr(3))->cast_vector(); - const auto outputHighValues = as_type_ptr(fq->get_input_node_shared_ptr(4))->cast_vector(); + const auto inputLowValues = ov::as_type_ptr(fq->get_input_node_shared_ptr(1))->cast_vector(); + const auto inputHighValues = ov::as_type_ptr(fq->get_input_node_shared_ptr(2))->cast_vector(); + const auto outputLowValues = ov::as_type_ptr(fq->get_input_node_shared_ptr(3))->cast_vector(); + const auto outputHighValues = ov::as_type_ptr(fq->get_input_node_shared_ptr(4))->cast_vector(); const size_t inputLowSize = inputLowValues.size(); const size_t inputHighSize = inputHighValues.size(); @@ -848,7 +848,7 @@ std::shared_ptr NetworkHelper::composeFakeQuantize(const s if (targetInputs.size() != 1ul) { return nullptr; } - if (is_type(targetInputs.begin()->get_node())) { + if (ov::is_type(targetInputs.begin()->get_node())) { parent = targetInputs.begin()->get_node()->shared_from_this(); } @@ -856,7 +856,7 @@ std::shared_ptr NetworkHelper::composeFakeQuantize(const s if (targetInputs.size() != 1ul) { return nullptr; } - if (is_type(targetInputs.begin()->get_node())) { + if (ov::is_type(targetInputs.begin()->get_node())) { parent = targetInputs.begin()->get_node()->shared_from_this(); } @@ -864,7 +864,7 @@ std::shared_ptr NetworkHelper::composeFakeQuantize(const s if (targetInputs.size() != 1ul) { return nullptr; } - if (is_type(targetInputs.begin()->get_node())) { + if (ov::is_type(targetInputs.begin()->get_node())) { parent = targetInputs.begin()->get_node()->shared_from_this(); } @@ -970,8 +970,8 @@ std::tuple, std::shared_ptr> NetworkHelper::decompos const auto outputLow = fq->input_value(3); const auto outputHigh = fq->input_value(4); - std::vector outputLowValues = as_type_ptr(outputLow.get_node_shared_ptr())->cast_vector(); - std::vector outputHighValues = as_type_ptr(outputHigh.get_node_shared_ptr())->cast_vector(); + std::vector outputLowValues = ov::as_type_ptr(outputLow.get_node_shared_ptr())->cast_vector(); + std::vector outputHighValues = ov::as_type_ptr(outputHigh.get_node_shared_ptr())->cast_vector(); size_t outputSize = outputLowValues.size(); std::vector minValues(outputSize, min); std::vector maxValues(outputSize, max); @@ -1035,7 +1035,7 @@ std::tuple, std::shared_ptr> NetworkHelper::decompos } } - if ((shift != nullptr) && isZero(as_type_ptr(shift))) { + if ((shift != nullptr) && isZero(ov::as_type_ptr(shift))) { shift = nullptr; } @@ -1057,12 +1057,12 @@ std::tuple, std::shared_ptr> NetworkHelper::decompos std::shared_ptr convert2; if (updatePrecision) { std::shared_ptr convert; - std::shared_ptr newFqConstant = as_type_ptr(newFQ); + std::shared_ptr newFqConstant = ov::as_type_ptr(newFQ); - if (is_type(newFQ)) { + if (ov::is_type(newFQ)) { convert = foldConvert(newFQ, precision); - } else if (is_type(newFQ)) { - newFQ = setOutDataPrecision(as_type_ptr(newFQ), precision); + } else if (ov::is_type(newFQ)) { + newFQ = setOutDataPrecision(ov::as_type_ptr(newFQ), precision); convert = newFQ; } else { THROW_IE_LPT_EXCEPTION(*newFQ) << "unexpected operation type"; @@ -1191,20 +1191,20 @@ FakeQuantizeDequantization NetworkHelper::createDequantizationFromFakeQuantize( // TODO: threshold values have to used here to avoid shifts - const std::shared_ptr scale = as_type_ptr(foldConvert(fold( + const std::shared_ptr scale = ov::as_type_ptr(foldConvert(fold( fold(outputHigh, outputLow), fold(newMax, newMin)), deqPrecision)); assert(scale != nullptr); std::shared_ptr shift = hasZeroPoint ? - as_type_ptr(foldConvert(fold( + ov::as_type_ptr(foldConvert(fold( fold(fold(newMin, outputHigh), fold(newMax, outputLow)), fold(outputHigh, outputLow)), deqPrecision)) : nullptr; assert((!hasZeroPoint) || (hasZeroPoint && shift != nullptr)); if (shift != nullptr) { - std::shared_ptr shiftConst = as_type_ptr(shift); + std::shared_ptr shiftConst = ov::as_type_ptr(shift); if (isScalarLike(shiftConst)) { auto scalar = toScalar(shiftConst); if (op::util::constantIsEqualTo(scalar, 0)) { @@ -1241,7 +1241,7 @@ FakeQuantizeDequantization NetworkHelper::createDequantizationFromFakeQuantize( } bool NetworkHelper::areQuantizeAndDequantizeSupportedForSubtract(const std::shared_ptr& node) { - if (!is_type(node)) { + if (!ov::is_type(node)) { return false; } @@ -1255,7 +1255,7 @@ bool NetworkHelper::areQuantizeAndDequantizeSupportedForSubtract(const std::shar } bool NetworkHelper::areQuantizeAndDequantizeSupportedForMultiply(const std::shared_ptr& node) { - if (!is_type(node)) { + if (!ov::is_type(node)) { return false; } @@ -1266,14 +1266,14 @@ bool NetworkHelper::areQuantizeAndDequantizeSupportedForMultiply(const std::shar } const auto dataNode = dequantization.data.get_node(); - if (is_type(dataNode)) { - const auto quantize = as_type_ptr(dataNode->get_input_node_shared_ptr(0)); + if (ov::is_type(dataNode)) { + const auto quantize = ov::as_type_ptr(dataNode->get_input_node_shared_ptr(0)); if (quantize == nullptr) { return false; } return NetworkHelper::isQuantizeSupported(quantize); - } else if (is_type(dataNode)) { + } else if (ov::is_type(dataNode)) { return true; } @@ -1286,15 +1286,15 @@ bool NetworkHelper::isQuantizeSupported(const std::shared_ptr& node, const size_t parentIndex, const bool inPlace) { auto getDataIndex = [](const std::shared_ptr& node) { - if (is_type(node->get_input_node_ptr(1))) { + if (ov::is_type(node->get_input_node_ptr(1))) { return 0ul; } - if (is_type(node->get_input_node_ptr(1)) && is_type(node->get_input_node_ptr(1)->get_input_node_ptr(0))) { + if (ov::is_type(node->get_input_node_ptr(1)) && ov::is_type(node->get_input_node_ptr(1)->get_input_node_ptr(0))) { return 0ul; } - if (is_type(node->get_input_node_ptr(0)) && is_type(node->get_input_node_ptr(0)->get_input_node_ptr(0))) { + if (ov::is_type(node->get_input_node_ptr(0)) && ov::is_type(node->get_input_node_ptr(0)->get_input_node_ptr(0))) { return 1ul; } @@ -1303,7 +1303,7 @@ FakeQuantizeDequantization NetworkHelper::getDequantization(const std::shared_pt Output dataNode = inPlace ? std::const_pointer_cast(node)->output(0) : node->input_value(parentIndex); - const std::shared_ptr multiply = as_type_ptr(dataNode.get_node_shared_ptr()); + const std::shared_ptr multiply = ov::as_type_ptr(dataNode.get_node_shared_ptr()); std::shared_ptr multiplyConstant; if (multiply != nullptr) { if (!FakeQuantizeDequantization::checkShape(multiply)) { @@ -1317,7 +1317,7 @@ FakeQuantizeDequantization NetworkHelper::getDequantization(const std::shared_pt dataNode = multiply->get_input_source_output(getDataIndex(multiply)); } - const std::shared_ptr subtract = as_type_ptr(dataNode.get_node_shared_ptr()); + const std::shared_ptr subtract = ov::as_type_ptr(dataNode.get_node_shared_ptr()); std::shared_ptr subtractConvert; std::shared_ptr subtractConstant; if (subtract != nullptr) { @@ -1332,7 +1332,7 @@ FakeQuantizeDequantization NetworkHelper::getDequantization(const std::shared_pt dataNode = subtract->get_input_source_output(getDataIndex(subtract)); } - const std::shared_ptr convert = as_type_ptr(dataNode.get_node_shared_ptr()); + const std::shared_ptr convert = ov::as_type_ptr(dataNode.get_node_shared_ptr()); if (convert != nullptr) { if ((convert->input(0).get_element_type() != element::i8) && (convert->input(0).get_element_type() != element::u8) && (convert->output(0).get_element_type() != element::f32)) { @@ -1353,7 +1353,7 @@ FakeQuantizeDequantization NetworkHelper::getDequantizationBelow(const std::shar std::shared_ptr lastNode = targetInputs.begin()->get_node()->shared_from_this(); - const std::shared_ptr convert = as_type_ptr(lastNode); + const std::shared_ptr convert = ov::as_type_ptr(lastNode); if (convertIsMandatory && (convert == nullptr)) { return FakeQuantizeDequantization(); } @@ -1371,7 +1371,7 @@ FakeQuantizeDequantization NetworkHelper::getDequantizationBelow(const std::shar lastNode = inputs.begin()->get_node()->shared_from_this(); } - const std::shared_ptr subtract = as_type_ptr(lastNode); + const std::shared_ptr subtract = ov::as_type_ptr(lastNode); std::shared_ptr subtractConvert; std::shared_ptr subtractConstant; if (subtract != nullptr) { @@ -1387,7 +1387,7 @@ FakeQuantizeDequantization NetworkHelper::getDequantizationBelow(const std::shar lastNode = inputs.begin()->get_node()->shared_from_this(); } - const std::shared_ptr multiply = as_type_ptr(lastNode); + const std::shared_ptr multiply = ov::as_type_ptr(lastNode); std::shared_ptr multiplyConstant; if (multiply != nullptr) { FakeQuantizeDequantization::fillDequantizationParams(multiply, multiplyConstant); @@ -1403,18 +1403,18 @@ FakeQuantizeDequantization NetworkHelper::normalizeDequantization(FakeQuantizeDe if (dequantization.empty()) { return dequantization; } - if (dequantization.multiply != nullptr && as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(0))) { + if (dequantization.multiply != nullptr && ov::as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(0))) { std::shared_ptr leftParent = dequantization.multiply->get_input_node_shared_ptr(0); std::shared_ptr rightParent = dequantization.multiply->get_input_node_shared_ptr(1); - std::shared_ptr normalized_multiply = as_type_ptr( + std::shared_ptr normalized_multiply = ov::as_type_ptr( dequantization.multiply->clone_with_new_inputs({rightParent, leftParent})); replace_node(dequantization.multiply, normalized_multiply); dequantization.multiply = normalized_multiply; } - if (dequantization.subtract != nullptr && as_type_ptr(dequantization.subtract->get_input_node_shared_ptr(0))) { + if (dequantization.subtract != nullptr && ov::as_type_ptr(dequantization.subtract->get_input_node_shared_ptr(0))) { std::shared_ptr leftParent = dequantization.subtract->get_input_node_shared_ptr(0); std::shared_ptr rightParent = dequantization.subtract->get_input_node_shared_ptr(1); - std::shared_ptr normalized_subtract = as_type_ptr( + std::shared_ptr normalized_subtract = ov::as_type_ptr( dequantization.subtract->clone_with_new_inputs({rightParent, leftParent})); replace_node(dequantization.subtract, normalized_subtract); dequantization.subtract = normalized_subtract; @@ -1424,7 +1424,7 @@ FakeQuantizeDequantization NetworkHelper::normalizeDequantization(FakeQuantizeDe std::shared_ptr NetworkHelper::normalizeDequantizationShape(const std::shared_ptr& eltwise) { const size_t constantIdx = getConstantInputIndex(eltwise); - const auto constant = as_type_ptr(eltwise->get_input_node_shared_ptr(constantIdx)); + const auto constant = ov::as_type_ptr(eltwise->get_input_node_shared_ptr(constantIdx)); const auto getConstWithNormalizeShape = []( const std::shared_ptr& eltwise, @@ -1443,7 +1443,7 @@ std::shared_ptr NetworkHelper::normalizeDequantizationShape(co constant, op::Constant::create(element::i32, Shape{ unsqueezeConstantShape.size() }, unsqueezeConstantShape)); - return as_type_ptr(newConstant); + return ov::as_type_ptr(newConstant); } else { return constant; } @@ -1473,7 +1473,7 @@ FakeQuantizeDequantizationValues NetworkHelper::createEmptyValues(const FakeQuan } bool NetworkHelper::isZeroConst(const std::shared_ptr& node) { - std::shared_ptr constant = as_type_ptr(node); + std::shared_ptr constant = ov::as_type_ptr(node); if (constant == nullptr) return false; @@ -1492,13 +1492,13 @@ bool NetworkHelper::isZeroConst(const std::shared_ptr& node) { std::shared_ptr NetworkHelper::optimizeSubtract(std::shared_ptr subtract) { auto convertOnSubtract = subtract->input_value(0).get_node_shared_ptr(); - if (as_type_ptr(convertOnSubtract) == nullptr) { + if (ov::as_type_ptr(convertOnSubtract) == nullptr) { return subtract; } // TODO: replace assert to condition and omit conversion part if there is no convert // TODO: also check convertInputType to understand if we really want to propagate type - assert(as_type_ptr(convertOnSubtract)); + assert(ov::as_type_ptr(convertOnSubtract)); const element::Type convertInputType = convertOnSubtract->get_input_element_type(0); const element::Type convertOutputType = convertOnSubtract->get_output_element_type(0); @@ -1508,7 +1508,7 @@ std::shared_ptr NetworkHelper::optimizeSubtract(std::shared_ptrinput_value(0); const auto subtractParent = subtract->get_input_node_shared_ptr(1); - if (is_type(subtractParent)) { + if (ov::is_type(subtractParent)) { std::shared_ptr replacement; auto shift = subtract->input_value(1).get_node_shared_ptr(); @@ -1533,7 +1533,7 @@ std::shared_ptr NetworkHelper::optimizeSubtract(std::shared_ptr(subtractParent) && is_type(subtractParent->get_input_node_shared_ptr(0))) { + } else if (ov::is_type(subtractParent) && ov::is_type(subtractParent->get_input_node_shared_ptr(0))) { auto replacement = std::make_shared>(data, subtractParent->get_input_node_shared_ptr(0)); NetworkHelper::copyInfo(subtract, replacement); NetworkHelper::setOutDataPrecisionForTypeRelaxed(replacement, convertOutputType); @@ -1652,7 +1652,7 @@ bool NetworkHelper::checkConstantValuePrecision(const element::Type expectedPrec return true; } - std::shared_ptr constantOp = as_type_ptr(constant); + std::shared_ptr constantOp = ov::as_type_ptr(constant); if (constantOp == nullptr) { return false; } @@ -1687,7 +1687,7 @@ size_t NetworkHelper::getParentOutputIndex(const std::shared_ptr& } std::shared_ptr NetworkHelper::toScalarIfPossible(std::shared_ptr node) { - std::shared_ptr constant = as_type_ptr(node); + std::shared_ptr constant = ov::as_type_ptr(node); if (constant == nullptr) { return node; } @@ -1700,7 +1700,7 @@ std::shared_ptr NetworkHelper::toScalarIfPossible(std::shared_ptr no } std::shared_ptr foldConvert(const Output& node, const element::Type targetPrecision) { - if (is_type(node.get_node_shared_ptr()) && (node.get_element_type() == targetPrecision)) { + if (ov::is_type(node.get_node_shared_ptr()) && (node.get_element_type() == targetPrecision)) { return node.get_node_shared_ptr(); } @@ -1713,9 +1713,9 @@ bool NetworkHelper::checkZeroPoint(const std::shared_ptr& node, const Data } float min, max; - if (is_type(node)) { + if (ov::is_type(node)) { const auto parent = node->get_input_node_shared_ptr(0); - const auto intNode = is_type(parent) ? parent : node; + const auto intNode = ov::is_type(parent) ? parent : node; const auto type = intNode->get_input_element_type(0); if (type == element::u8 || type == element::i8) { min = DataPrecision::getMinValue(type, 256) - 0.5f; @@ -1724,12 +1724,12 @@ bool NetworkHelper::checkZeroPoint(const std::shared_ptr& node, const Data return type == element::f32 || type == element::f16; } auto subtract1input = node->get_input_node_shared_ptr(1); - if (is_type(subtract1input)) { + if (ov::is_type(subtract1input)) { return true; } - auto subtractConst = as_type_ptr(subtract1input); + auto subtractConst = ov::as_type_ptr(subtract1input); if (!subtractConst) { - subtractConst = as_type_ptr(node->get_input_node_shared_ptr(1)->get_input_node_shared_ptr(0)); + subtractConst = ov::as_type_ptr(node->get_input_node_shared_ptr(1)->get_input_node_shared_ptr(0)); if (subtractConst == nullptr) { return false; } @@ -1739,13 +1739,13 @@ bool NetworkHelper::checkZeroPoint(const std::shared_ptr& node, const Data return (val < min) || (val > max); })) { return false; } - } else if (is_type(node)) { + } else if (ov::is_type(node)) { if (!dataPrecision.hasZeroPoint) { return true; } min = dataPrecision.min - 0.5f; max = dataPrecision.max + 0.5f; - const auto quantizationDetails = QuantizationDetails::getDetails(as_type_ptr(node)); + const auto quantizationDetails = QuantizationDetails::getDetails(ov::as_type_ptr(node)); for (size_t i = 0; i < quantizationDetails.outputLowValues.size(); ++i) { float shift; if (quantizationDetails.outputHighValues[i] != quantizationDetails.outputLowValues[i]) { diff --git a/inference-engine/src/low_precision_transformations/src/normalize_l2.cpp b/inference-engine/src/low_precision_transformations/src/normalize_l2.cpp index 1d269094762..d7ca932335b 100644 --- a/inference-engine/src/low_precision_transformations/src/normalize_l2.cpp +++ b/inference-engine/src/low_precision_transformations/src/normalize_l2.cpp @@ -64,16 +64,16 @@ bool NormalizeL2Transformation::canBeTransformed(const TransformationContext& co } const std::shared_ptr multiply = operation->get_input_node_shared_ptr(0); - auto scalesConst = as_type_ptr(multiply->get_input_node_shared_ptr(1)); + auto scalesConst = ov::as_type_ptr(multiply->get_input_node_shared_ptr(1)); if (scalesConst == nullptr) { - scalesConst = as_type_ptr(multiply->get_input_node_shared_ptr(0)); + scalesConst = ov::as_type_ptr(multiply->get_input_node_shared_ptr(0)); } if (scalesConst == nullptr) { return false; } // TODO: Expand transformation for all cases of axes values - const auto axes = as_type_ptr(operation->get_input_node_shared_ptr(1)); + const auto axes = ov::as_type_ptr(operation->get_input_node_shared_ptr(1)); const std::vector axesAcrossSpatial = { 1 }; const std::vector axesByChannels = { 1, 2, 3 }; @@ -104,13 +104,13 @@ bool NormalizeL2Transformation::transform(TransformationContext &context, ngraph return false; } - auto normalize = as_type_ptr(NetworkHelper::separateInStandaloneBranch(operation)); + auto normalize = ov::as_type_ptr(NetworkHelper::separateInStandaloneBranch(operation)); - const auto axes = as_type_ptr(normalize->get_input_node_shared_ptr(1)); + const auto axes = ov::as_type_ptr(normalize->get_input_node_shared_ptr(1)); FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(normalize); - auto scalesConst = as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(1)); + auto scalesConst = ov::as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(1)); if (scalesConst == nullptr) { - scalesConst = as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(0)); + scalesConst = ov::as_type_ptr(dequantization.multiply->get_input_node_shared_ptr(0)); } std::shared_ptr newScalesConst; diff --git a/inference-engine/src/low_precision_transformations/src/pad.cpp b/inference-engine/src/low_precision_transformations/src/pad.cpp index 88141e8b5fc..696c1683995 100644 --- a/inference-engine/src/low_precision_transformations/src/pad.cpp +++ b/inference-engine/src/low_precision_transformations/src/pad.cpp @@ -40,8 +40,8 @@ bool PadTransformation::transform(TransformationContext& context, ngraph::patter return false; } - const auto pad = as_type_ptr(NetworkHelper::separateInStandaloneBranch(m.get_match_root())); - const auto padConstant = as_type_ptr(pad->get_input_node_shared_ptr(3)); + const auto pad = ov::as_type_ptr(NetworkHelper::separateInStandaloneBranch(m.get_match_root())); + const auto padConstant = ov::as_type_ptr(pad->get_input_node_shared_ptr(3)); const auto padConstantValue = padConstant->cast_vector()[0]; const auto padsBegin = pad->get_pads_begin(); @@ -67,7 +67,7 @@ bool PadTransformation::transform(TransformationContext& context, ngraph::patter bcastedShape[padIdx] = inputPShape[padIdx].get_length(); const auto bCastConst = opset1::Constant::create(element::i32, Shape{bcastedShape.size()}, bcastedShape); - return as_type_ptr(fold(constant, bCastConst)); + return ov::as_type_ptr(fold(constant, bCastConst)); }; if (dequantization.subtract && shape_size(dequantization.subtractConstant->get_shape()) == 1ul) { @@ -114,7 +114,7 @@ bool PadTransformation::transform(TransformationContext& context, ngraph::patter const auto endConst = opset1::Constant::create(element::u32, { padsForConstantEnd.size() }, padsForConstantEnd); const auto padValueConstant = opset1::Constant::create(constant->get_element_type(), Shape{}, { padVal }); const auto foldedConstant = fold(constant, beginConst, endConst, padValueConstant, padMode); - return as_type_ptr(foldedConstant); + return ov::as_type_ptr(foldedConstant); } else { return constant; } @@ -157,7 +157,7 @@ bool PadTransformation::canBeTransformed(const TransformationContext& context, s return false; } - const auto pad = as_type_ptr(op); + const auto pad = ov::as_type_ptr(op); if (!pad) { return false; } @@ -231,7 +231,7 @@ bool PadTransformation::canBeTransformed(const TransformationContext& context, s return false; } - const auto constant = as_type_ptr(pad->get_input_node_shared_ptr(3)); + const auto constant = ov::as_type_ptr(pad->get_input_node_shared_ptr(3)); const auto constantValue = constant->cast_vector()[0]; if (constantValue != 0.f && !padAndDqByTheSameDimension(dequantization.multiplyConstant)) { return false; diff --git a/inference-engine/src/low_precision_transformations/src/pull_reshape_through_dequantization.cpp b/inference-engine/src/low_precision_transformations/src/pull_reshape_through_dequantization.cpp index 4c4679c8046..68617278844 100644 --- a/inference-engine/src/low_precision_transformations/src/pull_reshape_through_dequantization.cpp +++ b/inference-engine/src/low_precision_transformations/src/pull_reshape_through_dequantization.cpp @@ -23,13 +23,13 @@ std::shared_ptr moveThroughElementwise(const std::shared_ptr& reshap const auto reshapeValues = reshape->get_input_node_shared_ptr(1); NGRAPH_CHECK(reshapeValues != nullptr, "Reshape constant was not found"); - auto elementwiseValuesConvert = as_type_ptr(elementwise->get_input_node_shared_ptr(1ul)); + auto elementwiseValuesConvert = ov::as_type_ptr(elementwise->get_input_node_shared_ptr(1ul)); auto elementwiseValues = elementwiseValuesConvert == nullptr ? elementwise->get_input_node_shared_ptr(1ul) : elementwiseValuesConvert->get_input_node_shared_ptr(0ul); - assert(is_type(elementwiseValues)); + assert(ov::is_type(elementwiseValues)); - const std::shared_ptr newReshape = as_type_ptr(reshape->clone_with_new_inputs({ + const std::shared_ptr newReshape = ov::as_type_ptr(reshape->clone_with_new_inputs({ elementwise->get_input_node_shared_ptr(0ul), reshapeValues })); @@ -39,7 +39,7 @@ std::shared_ptr moveThroughElementwise(const std::shared_ptr& reshap if (!elementwiseValuesShape.empty() && (elementwiseValuesShape.size() != 1ul)) { // update shape constant value to avoid eltwise constan value broadcasting const Shape elementwiseShape = elementwise->output(0).get_shape(); - const std::vector reshapeValuesVector = as_type_ptr(reshapeValues)->cast_vector(); + const std::vector reshapeValuesVector = ov::as_type_ptr(reshapeValues)->cast_vector(); const std::vector newReshapeValuesVector = ngraph::pass::low_precision::NetworkHelper::updateReshapeValues( elementwiseValuesShape, @@ -54,8 +54,8 @@ std::shared_ptr moveThroughElementwise(const std::shared_ptr& reshap newElementwiseValues = ngraph::pass::low_precision::fold_reshape( elementwiseValues->output(0), newReshapeValues->output(0), - as_type_ptr(reshape)->get_special_zero()); - assert(is_type(newElementwiseValues)); + ov::as_type_ptr(reshape)->get_special_zero()); + assert(ov::is_type(newElementwiseValues)); } else { newElementwiseValues = elementwiseValues; } @@ -113,18 +113,18 @@ ngraph::pass::low_precision::PullReshapeThroughDequantization::PullReshapeThroug auto reshape = opsMap.find(reshapeWrapper)->second.get_node()->shared_from_this(); auto child = reshape->get_output_target_inputs(0).begin()->get_node(); - if (is_type(child)) { + if (ov::is_type(child)) { return false; } while (reshape != nullptr) { const auto parent = reshape->get_input_node_shared_ptr(0); - if (is_type(parent) || is_type(parent)) { + if (ov::is_type(parent) || ov::is_type(parent)) { reshape = pull_reshape_through_dequantization::moveThroughElementwise(reshape, parent); - } else if (is_type(parent)) { + } else if (ov::is_type(parent)) { reshape = pull_reshape_through_dequantization::moveThroughConvert(reshape, parent); - } else if (is_type(parent)) { - pull_reshape_through_dequantization::fuseConstant(reshape, as_type_ptr(parent)); + } else if (ov::is_type(parent)) { + pull_reshape_through_dequantization::fuseConstant(reshape, ov::as_type_ptr(parent)); reshape = nullptr; } else { THROW_IE_LPT_EXCEPTION(*parent) << "unexepcted operation type"; diff --git a/inference-engine/src/low_precision_transformations/src/pull_transpose_through_dequantization.cpp b/inference-engine/src/low_precision_transformations/src/pull_transpose_through_dequantization.cpp index a8dd26d26a1..3ee344884dc 100644 --- a/inference-engine/src/low_precision_transformations/src/pull_transpose_through_dequantization.cpp +++ b/inference-engine/src/low_precision_transformations/src/pull_transpose_through_dequantization.cpp @@ -24,11 +24,11 @@ std::shared_ptr moveThroughElementwise(const std::shared_ptr& transp const auto transposeValues = transpose->get_input_node_shared_ptr(1); NGRAPH_CHECK(transposeValues != nullptr, "transpose constant was not found"); - auto elementwiseValuesConvert = as_type_ptr(elementwise->get_input_node_shared_ptr(1ul)); + auto elementwiseValuesConvert = ov::as_type_ptr(elementwise->get_input_node_shared_ptr(1ul)); auto elementwiseValues = elementwiseValuesConvert == nullptr ? elementwise->get_input_node_shared_ptr(1ul) : elementwiseValuesConvert->get_input_node_shared_ptr(0ul); - assert(is_type(elementwiseValues)); + assert(ov::is_type(elementwiseValues)); const auto transposeValuesShape = transposeValues->output(0).get_shape(); const auto elementwiseValuesShape = elementwiseValues->output(0).get_shape(); @@ -43,17 +43,17 @@ std::shared_ptr moveThroughElementwise(const std::shared_ptr& transp element::i64, Shape{ shape_size(transposeValuesShape) }, std::vector(shape_size(transposeValuesShape), 1ul))); - assert(is_type(elementwiseValues)); + assert(ov::is_type(elementwiseValues)); } - const std::shared_ptr newTranspose = as_type_ptr(transpose->clone_with_new_inputs({ + const std::shared_ptr newTranspose = ov::as_type_ptr(transpose->clone_with_new_inputs({ elementwise->get_input_node_shared_ptr(0ul), transposeValues })); const auto newElementwiseValues = ngraph::pass::low_precision::fold( elementwiseValues->output(0), transposeValues->output(0)); - assert(is_type(newElementwiseValues)); + assert(ov::is_type(newElementwiseValues)); const auto newElementwise = elementwise->clone_with_new_inputs({ newTranspose, @@ -112,12 +112,12 @@ ngraph::pass::low_precision::PullTransposeThroughDequantization::PullTransposeTh while (transpose != nullptr) { const auto parent = transpose->get_input_node_shared_ptr(0); - if (is_type(parent) || is_type(parent)) { + if (ov::is_type(parent) || ov::is_type(parent)) { transpose = pull_transpose_through_dequantization::moveThroughElementwise(transpose, parent); - } else if (is_type(parent)) { + } else if (ov::is_type(parent)) { transpose = pull_transpose_through_dequantization::moveThroughConvert(transpose, parent); - } else if (is_type(parent)) { - pull_transpose_through_dequantization::fuseConstant(transpose, as_type_ptr(parent)); + } else if (ov::is_type(parent)) { + pull_transpose_through_dequantization::fuseConstant(transpose, ov::as_type_ptr(parent)); transpose = nullptr; } else { THROW_IE_LPT_EXCEPTION(*parent) << "unexepcted operation type"; diff --git a/inference-engine/src/low_precision_transformations/src/quantization_details.cpp b/inference-engine/src/low_precision_transformations/src/quantization_details.cpp index ca97aae0dc3..79486394a2f 100644 --- a/inference-engine/src/low_precision_transformations/src/quantization_details.cpp +++ b/inference-engine/src/low_precision_transformations/src/quantization_details.cpp @@ -49,21 +49,21 @@ QuantizationDetails::QuantizationDetails(const size_t levels, const std::vector< outputHighValues(outputHighValues) {} bool QuantizationDetails::outputLayoutIsSupported(std::shared_ptr quantize) { - return is_type(quantize->get_input_node_ptr(1)) && - is_type(quantize->get_input_node_ptr(2)) && - is_type(quantize->get_input_node_ptr(3)) && - is_type(quantize->get_input_node_ptr(4)); + return ov::is_type(quantize->get_input_node_ptr(1)) && + ov::is_type(quantize->get_input_node_ptr(2)) && + ov::is_type(quantize->get_input_node_ptr(3)) && + ov::is_type(quantize->get_input_node_ptr(4)); } void QuantizationDetails::getInputIntervals( std::shared_ptr quantize, std::vector& inputLowValues, std::vector& inputHighValues) { - std::shared_ptr inputLowLayer = as_type_ptr(quantize->get_input_node_shared_ptr(1)); + std::shared_ptr inputLowLayer = ov::as_type_ptr(quantize->get_input_node_shared_ptr(1)); const std::vector& inputLowBlobValues = getBlobValue(inputLowLayer); inputLowValues.insert(inputLowValues.end(), inputLowBlobValues.begin(), inputLowBlobValues.end()); - std::shared_ptr inputHighLayer = as_type_ptr(quantize->get_input_node_shared_ptr(2)); + std::shared_ptr inputHighLayer = ov::as_type_ptr(quantize->get_input_node_shared_ptr(2)); const std::vector inputHighBlobValues = getBlobValue(inputHighLayer); inputHighValues.insert(inputHighValues.end(), inputHighBlobValues.begin(), inputHighBlobValues.end()); @@ -77,11 +77,11 @@ void QuantizationDetails::getOutputIntervals( std::shared_ptr quantize, std::vector& outputLowValues, std::vector& outputHighValues) { - std::shared_ptr outputLowLayer = as_type_ptr(quantize->get_input_node_shared_ptr(3)); + std::shared_ptr outputLowLayer = ov::as_type_ptr(quantize->get_input_node_shared_ptr(3)); const std::vector& outputLowBlobValues = getBlobValue(outputLowLayer); outputLowValues.insert(outputLowValues.end(), outputLowBlobValues.begin(), outputLowBlobValues.end()); - std::shared_ptr outputHighLayer = as_type_ptr(quantize->get_input_node_shared_ptr(4)); + std::shared_ptr outputHighLayer = ov::as_type_ptr(quantize->get_input_node_shared_ptr(4)); const std::vector outputHighBlobValues = getBlobValue(outputHighLayer); outputHighValues.insert(outputHighValues.end(), outputHighBlobValues.begin(), outputHighBlobValues.end()); @@ -91,11 +91,11 @@ void QuantizationDetails::getOutputIntervals( } QuantizationDetails QuantizationDetails::getDetails(std::shared_ptr quantize) { - const std::vector inputLowValues = as_type_ptr(quantize->get_input_node_shared_ptr(1))->cast_vector(); - const std::vector inputHighValues = as_type_ptr(quantize->get_input_node_shared_ptr(2))->cast_vector(); + const std::vector inputLowValues = ov::as_type_ptr(quantize->get_input_node_shared_ptr(1))->cast_vector(); + const std::vector inputHighValues = ov::as_type_ptr(quantize->get_input_node_shared_ptr(2))->cast_vector(); - const std::vector outputLowValues = as_type_ptr(quantize->get_input_node_shared_ptr(3))->cast_vector(); - const std::vector outputHighValues = as_type_ptr(quantize->get_input_node_shared_ptr(4))->cast_vector(); + const std::vector outputLowValues = ov::as_type_ptr(quantize->get_input_node_shared_ptr(3))->cast_vector(); + const std::vector outputHighValues = ov::as_type_ptr(quantize->get_input_node_shared_ptr(4))->cast_vector(); return QuantizationDetails( quantize->get_levels(), @@ -150,7 +150,7 @@ float QuantizationDetails::getOutputHighValue(const size_t index) const { } std::vector QuantizationDetails::getBlobValue(std::shared_ptr constantLayer) { - return as_type_ptr(constantLayer)->cast_vector(); + return ov::as_type_ptr(constantLayer)->cast_vector(); } bool QuantizationDetails::isSupportedLevel(const size_t level) { diff --git a/inference-engine/src/low_precision_transformations/src/reduce_base_transformation.cpp b/inference-engine/src/low_precision_transformations/src/reduce_base_transformation.cpp index e178d94b98a..ddd7cb110f6 100644 --- a/inference-engine/src/low_precision_transformations/src/reduce_base_transformation.cpp +++ b/inference-engine/src/low_precision_transformations/src/reduce_base_transformation.cpp @@ -40,7 +40,7 @@ bool ReduceBaseTransformation::canBeTransformed(const TransformationContext& con return false; } - const auto axesConstant = as_type_ptr(reduce->get_input_node_shared_ptr(1)); + const auto axesConstant = ov::as_type_ptr(reduce->get_input_node_shared_ptr(1)); if (axesConstant == nullptr) { return false; } diff --git a/inference-engine/src/low_precision_transformations/src/reduce_max.cpp b/inference-engine/src/low_precision_transformations/src/reduce_max.cpp index 29e230314e7..9cfec0038b8 100644 --- a/inference-engine/src/low_precision_transformations/src/reduce_max.cpp +++ b/inference-engine/src/low_precision_transformations/src/reduce_max.cpp @@ -31,7 +31,7 @@ ReduceMaxTransformation::ReduceMaxTransformation(const Params& params) : ReduceB } bool ReduceMaxTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const { - if (!is_type(reduce)) { + if (!ov::is_type(reduce)) { return false; } @@ -40,7 +40,7 @@ bool ReduceMaxTransformation::canBeTransformed(const TransformationContext& cont } const auto dequantization = NetworkHelper::getDequantization(reduce); - const std::vector scales = as_type_ptr(dequantization.multiplyConstant)->cast_vector(); + const std::vector scales = ov::as_type_ptr(dequantization.multiplyConstant)->cast_vector(); if (std::any_of(scales.begin(), scales.end(), [](const float value) { return value < 0.0; })) { return false; } diff --git a/inference-engine/src/low_precision_transformations/src/reduce_mean.cpp b/inference-engine/src/low_precision_transformations/src/reduce_mean.cpp index c91abbeb1cc..1e9ab73ae30 100644 --- a/inference-engine/src/low_precision_transformations/src/reduce_mean.cpp +++ b/inference-engine/src/low_precision_transformations/src/reduce_mean.cpp @@ -31,7 +31,7 @@ ReduceMeanTransformation::ReduceMeanTransformation(const Params& params) : Reduc } bool ReduceMeanTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const { - return is_type(reduce) ? ReduceBaseTransformation::canBeTransformed(context, reduce) : false; + return ov::is_type(reduce) ? ReduceBaseTransformation::canBeTransformed(context, reduce) : false; } bool ReduceMeanTransformation::isPrecisionPreserved(std::shared_ptr reduce) const noexcept { diff --git a/inference-engine/src/low_precision_transformations/src/reduce_min.cpp b/inference-engine/src/low_precision_transformations/src/reduce_min.cpp index 1d0e9da5acc..c049fbde197 100644 --- a/inference-engine/src/low_precision_transformations/src/reduce_min.cpp +++ b/inference-engine/src/low_precision_transformations/src/reduce_min.cpp @@ -31,7 +31,7 @@ ReduceMinTransformation::ReduceMinTransformation(const Params& params) : ReduceB } bool ReduceMinTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const { - if (!is_type(reduce)) { + if (!ov::is_type(reduce)) { return false; } @@ -40,7 +40,7 @@ bool ReduceMinTransformation::canBeTransformed(const TransformationContext& cont } const auto dequantization = NetworkHelper::getDequantization(reduce); - const std::vector scales = as_type_ptr(dequantization.multiplyConstant)->cast_vector(); + const std::vector scales = ov::as_type_ptr(dequantization.multiplyConstant)->cast_vector(); if (std::any_of(scales.begin(), scales.end(), [](const float value) { return value < 0.0; })) { return false; } diff --git a/inference-engine/src/low_precision_transformations/src/reduce_sum.cpp b/inference-engine/src/low_precision_transformations/src/reduce_sum.cpp index 7ffcb435bd0..a6642a5f97f 100644 --- a/inference-engine/src/low_precision_transformations/src/reduce_sum.cpp +++ b/inference-engine/src/low_precision_transformations/src/reduce_sum.cpp @@ -31,7 +31,7 @@ ReduceSumTransformation::ReduceSumTransformation(const Params& params) : ReduceB } bool ReduceSumTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr reduce) const { - const auto reduceSum = as_type_ptr(reduce); + const auto reduceSum = ov::as_type_ptr(reduce); if (!reduceSum || !ReduceBaseTransformation::canBeTransformed(context, reduceSum)) { return false; } @@ -57,7 +57,7 @@ void ReduceSumTransformation::changeDequantizationValues( ReduceBaseTransformation::changeDequantizationValues(reduce, dequantization); if (dequantization.subtract) { - const auto reduceSum = as_type_ptr(reduce); + const auto reduceSum = ov::as_type_ptr(reduce); const auto reductionAxes = reduceSum->get_reduction_axes(); const auto inputShape = reduceSum->get_input_partial_shape(0); @@ -72,7 +72,7 @@ void ReduceSumTransformation::changeDequantizationValues( const auto result = fold(dequantization.subtractConstant, reductionSizeConstant); replace_node(dequantization.subtractConstant, result); - dequantization.subtractConstant = as_type_ptr(result); + dequantization.subtractConstant = ov::as_type_ptr(result); } } diff --git a/inference-engine/src/low_precision_transformations/src/reshape.cpp b/inference-engine/src/low_precision_transformations/src/reshape.cpp index e8263bd7528..da44763ba0d 100644 --- a/inference-engine/src/low_precision_transformations/src/reshape.cpp +++ b/inference-engine/src/low_precision_transformations/src/reshape.cpp @@ -125,7 +125,7 @@ void reshapeDequantizationConstant(const std::shared_ptr& resha } bool ReshapeTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) { - std::shared_ptr reshape = as_type_ptr(m.get_match_root()); + std::shared_ptr reshape = ov::as_type_ptr(m.get_match_root()); if (NetworkHelper::isConstantPath(reshape)) { return false; } @@ -134,7 +134,7 @@ bool ReshapeTransformation::transform(TransformationContext& context, ngraph::pa return false; } - reshape = as_type_ptr(NetworkHelper::separateInStandaloneBranch(reshape)); + reshape = ov::as_type_ptr(NetworkHelper::separateInStandaloneBranch(reshape)); reshapeDequantizationConstant(reshape); moveDequantizationAfter(context, reshape, NetworkHelper::getDequantization(reshape, 0), false); return true; diff --git a/inference-engine/src/low_precision_transformations/src/rt_info/intervals_alignment_attribute.cpp b/inference-engine/src/low_precision_transformations/src/rt_info/intervals_alignment_attribute.cpp index 2425b3f1e12..95a6168db9c 100644 --- a/inference-engine/src/low_precision_transformations/src/rt_info/intervals_alignment_attribute.cpp +++ b/inference-engine/src/low_precision_transformations/src/rt_info/intervals_alignment_attribute.cpp @@ -36,11 +36,11 @@ constexpr VariantTypeInfo VariantWrapper::type_i std::shared_ptr>> VariantWrapper::create( const std::shared_ptr& node, const AttributeParameters& params) { - if (!is_type(node)) { + if (!ov::is_type(node)) { return nullptr; } - auto fakeQuantize = as_type_ptr(node); + auto fakeQuantize = ov::as_type_ptr(node); if (!QuantizationDetails::outputLayoutIsSupported(fakeQuantize) || !QuantizationDetails::isSupportedLevel(fakeQuantize->get_levels())) { return nullptr; } @@ -58,8 +58,8 @@ std::shared_ptr>> Va } } - const auto outLow = as_type_ptr(node->get_input_node_shared_ptr(3)); - const auto outHigh = as_type_ptr(node->get_input_node_shared_ptr(4)); + const auto outLow = ov::as_type_ptr(node->get_input_node_shared_ptr(3)); + const auto outHigh = ov::as_type_ptr(node->get_input_node_shared_ptr(4)); if (!NetworkHelper::isScalarLike(outLow) || !NetworkHelper::isScalarLike(outHigh)) { return nullptr; } @@ -78,7 +78,7 @@ std::shared_ptr>> Va foldConvert(node->get_input_node_ptr(3)->shared_from_this(), params.deqPrecision), dequantization.multiplyConstant); - auto multiplyResultConstant = as_type_ptr(multiplyResult); + auto multiplyResultConstant = ov::as_type_ptr(multiplyResult); auto intervals = multiplyResultConstant->cast_vector(); lowInterval = *std::min_element(intervals.begin(), intervals.end()); } @@ -90,7 +90,7 @@ std::shared_ptr>> Va foldConvert(node->get_input_node_ptr(4)->shared_from_this(), params.deqPrecision), dequantization.multiplyConstant); - auto multiplyResultConstant = as_type_ptr(multiplyResult); + auto multiplyResultConstant = ov::as_type_ptr(multiplyResult); auto intervals = multiplyResultConstant->cast_vector(); highInterval = *std::max_element(intervals.begin(), intervals.end()); } @@ -115,8 +115,8 @@ std::shared_ptr>> Va fakeQuantize->get_levels())); rtInfo[ngraph::VariantWrapper::type_info.name] = attribute; - const std::vector outputLowValues = as_type_ptr(fakeQuantize->get_input_node_shared_ptr(3))->cast_vector(); - const std::vector outputHighValues = as_type_ptr(fakeQuantize->get_input_node_shared_ptr(4))->cast_vector(); + const std::vector outputLowValues = ov::as_type_ptr(fakeQuantize->get_input_node_shared_ptr(3))->cast_vector(); + const std::vector outputHighValues = ov::as_type_ptr(fakeQuantize->get_input_node_shared_ptr(4))->cast_vector(); LayerTransformation::PrecisionDetails preferablePrecision = LayerTransformation::getPrecisionDetails( fakeQuantize->get_levels(), outputLowValues, diff --git a/inference-engine/src/low_precision_transformations/src/rt_info/precisions_attribute.cpp b/inference-engine/src/low_precision_transformations/src/rt_info/precisions_attribute.cpp index 334f3a3eae3..3344f4f74a7 100644 --- a/inference-engine/src/low_precision_transformations/src/rt_info/precisions_attribute.cpp +++ b/inference-engine/src/low_precision_transformations/src/rt_info/precisions_attribute.cpp @@ -33,7 +33,7 @@ std::shared_ptr>> VariantWra auto attribute = ngraph::pass::low_precision::make_shared_attribute(); auto wrapper = std::make_shared>>(attribute); - auto& rt = is_type(node) ? node->output(0).get_rt_info() : node->get_rt_info(); + auto& rt = ov::is_type(node) ? node->output(0).get_rt_info() : node->get_rt_info(); rt[ngraph::VariantWrapper>::type_info.name] = wrapper; return wrapper; } diff --git a/inference-engine/src/low_precision_transformations/src/rt_info/quantization_alignment_attribute.cpp b/inference-engine/src/low_precision_transformations/src/rt_info/quantization_alignment_attribute.cpp index b95a9567a3e..26fd6711c34 100644 --- a/inference-engine/src/low_precision_transformations/src/rt_info/quantization_alignment_attribute.cpp +++ b/inference-engine/src/low_precision_transformations/src/rt_info/quantization_alignment_attribute.cpp @@ -47,16 +47,16 @@ std::shared_ptr>> const auto dequantization = NetworkHelper::getDequantization(node, index); if (!dequantization.empty() && - (is_type(dequantization.data.get_node())) && - is_type(dequantization.data.get_node()->get_input_node_ptr(0))) { + (ov::is_type(dequantization.data.get_node())) && + ov::is_type(dequantization.data.get_node()->get_input_node_ptr(0))) { inputNode = dequantization.data.get_node()->get_input_node_shared_ptr(0); } - if (is_type(inputNode)) { + if (ov::is_type(inputNode)) { continue; } - if (!is_type(inputNode)) { + if (!ov::is_type(inputNode)) { leastOneOperationIsNotFakeQuantize = true; break; } diff --git a/inference-engine/src/low_precision_transformations/src/shuffle_channels.cpp b/inference-engine/src/low_precision_transformations/src/shuffle_channels.cpp index 129bcb23977..2b0621c982c 100644 --- a/inference-engine/src/low_precision_transformations/src/shuffle_channels.cpp +++ b/inference-engine/src/low_precision_transformations/src/shuffle_channels.cpp @@ -38,7 +38,7 @@ bool ShuffleChannelsTransformation::transform(TransformationContext& context, ng return false; } - const auto shuffleChannels = as_type_ptr(NetworkHelper::separateInStandaloneBranch(m.get_match_root())); + const auto shuffleChannels = ov::as_type_ptr(NetworkHelper::separateInStandaloneBranch(m.get_match_root())); auto dequantization = NetworkHelper::getDequantization(shuffleChannels); const auto shuffleDequantizationConstant = [&](const std::shared_ptr& eltwise) { @@ -58,7 +58,7 @@ bool ShuffleChannelsTransformation::transform(TransformationContext& context, ng } else { const auto group = shuffleChannels->get_group(); const auto shuffledConst = fold(normalizedConst, normalizedAxis, group); - return as_type_ptr(shuffledConst); + return ov::as_type_ptr(shuffledConst); } } }; @@ -82,7 +82,7 @@ bool ShuffleChannelsTransformation::canBeTransformed(const TransformationContext return false; } - const auto shuffleChannels = as_type_ptr(op); + const auto shuffleChannels = ov::as_type_ptr(op); if (shuffleChannels == nullptr) { return false; } diff --git a/inference-engine/src/low_precision_transformations/src/split.cpp b/inference-engine/src/low_precision_transformations/src/split.cpp index a663fc64f0a..eabe1f7e45e 100644 --- a/inference-engine/src/low_precision_transformations/src/split.cpp +++ b/inference-engine/src/low_precision_transformations/src/split.cpp @@ -46,7 +46,7 @@ bool SplitTransformation::transform(TransformationContext& context, ngraph::patt newSplit->set_friendly_name(split->get_friendly_name()); ngraph::copy_runtime_info(split, newSplit); - const int64_t axis = as_type_ptr(split->get_input_node_shared_ptr(1))->cast_vector()[0]; + const int64_t axis = ov::as_type_ptr(split->get_input_node_shared_ptr(1))->cast_vector()[0]; const size_t normalizedAxis = normalize_axis(split->get_friendly_name(), axis, split->get_input_partial_shape(0).rank()); const size_t outputSize = newSplit->get_output_size(); @@ -128,7 +128,7 @@ void SplitTransformation::updateOutputs( const auto lastNode = lastNodes[i]; for (auto output : lastNodes[i]->outputs()) { for (auto input : output.get_target_inputs()) { - if (is_type(input.get_node())) { + if (ov::is_type(input.get_node())) { originalNode->set_friendly_name(originalName + LayerTransformation::originalLayerPostfix); lastNode->set_friendly_name(originalName + "." + std::to_string(i)); break; @@ -149,7 +149,7 @@ bool SplitTransformation::canBeTransformed(const TransformationContext& context, } const auto consumers = NetworkHelper::consumers(layer); - const auto concat = as_type_ptr(consumers[0]); + const auto concat = ov::as_type_ptr(consumers[0]); // WA to avoid propagation of dequantization if after Split all consumers are the same unsupported Concat if (concat && concat->get_axis() != 1ul) { diff --git a/inference-engine/src/low_precision_transformations/src/squeeze.cpp b/inference-engine/src/low_precision_transformations/src/squeeze.cpp index 919364d1bbf..42d8e7e5932 100644 --- a/inference-engine/src/low_precision_transformations/src/squeeze.cpp +++ b/inference-engine/src/low_precision_transformations/src/squeeze.cpp @@ -47,7 +47,7 @@ bool SqueezeTransformation::transform(TransformationContext& context, ngraph::pa return NetworkHelper::toScalar(dequantizationOpConstant); } if (constantShape.size() == inputRankValue) { - return as_type_ptr(fold(dequantizationOpConstant, squeeze->get_input_node_shared_ptr(1))); + return ov::as_type_ptr(fold(dequantizationOpConstant, squeeze->get_input_node_shared_ptr(1))); } return dequantizationOpConstant; diff --git a/inference-engine/src/low_precision_transformations/src/strided_slice.cpp b/inference-engine/src/low_precision_transformations/src/strided_slice.cpp index 5e34d1bf45b..470e7aec2fc 100644 --- a/inference-engine/src/low_precision_transformations/src/strided_slice.cpp +++ b/inference-engine/src/low_precision_transformations/src/strided_slice.cpp @@ -19,7 +19,7 @@ NGRAPH_RTTI_DEFINITION(ngraph::pass::low_precision::StridedSliceTransformation, std::shared_ptr stridedSliceDeqConstant( const std::shared_ptr strSlice, const std::shared_ptr dequantizaitonConstant) { - auto constant = as_type_ptr(dequantizaitonConstant); + auto constant = ov::as_type_ptr(dequantizaitonConstant); auto constantShape = constant->get_shape(); if (shape_size(constantShape) == 1ul) { return NetworkHelper::toScalar(constant); @@ -45,10 +45,10 @@ std::shared_ptr stridedSliceDeqConstant( const auto newConstant = fold( constant, ngraph::opset1::Constant::create(ngraph::element::i32, { newConstantShape.size() }, newConstantShape)); - constant = as_type_ptr(newConstant); + constant = ov::as_type_ptr(newConstant); } - const auto stridedSlice = as_type_ptr(strSlice); + const auto stridedSlice = ov::as_type_ptr(strSlice); auto beginMask = stridedSlice->get_begin_mask(); auto endMask = stridedSlice->get_end_mask(); @@ -116,7 +116,7 @@ bool StridedSliceTransformation::transform(TransformationContext& context, ngrap } bool StridedSliceTransformation::canBeTransformed(const TransformationContext& context, std::shared_ptr operation) const { - if (!is_type(operation) || NetworkHelper::isDQByDynamicDimension(operation)) { + if (!ov::is_type(operation) || NetworkHelper::isDQByDynamicDimension(operation)) { return false; } diff --git a/inference-engine/src/low_precision_transformations/src/subtract.cpp b/inference-engine/src/low_precision_transformations/src/subtract.cpp index 4c71e191c2f..83569ef8dc2 100644 --- a/inference-engine/src/low_precision_transformations/src/subtract.cpp +++ b/inference-engine/src/low_precision_transformations/src/subtract.cpp @@ -42,7 +42,7 @@ SubtractTransformation::SubtractTransformation(const Params& params) : LayerTran } bool SubtractTransformation::transform(TransformationContext& context, ngraph::pattern::Matcher &m) { - std::shared_ptr subtract = as_type_ptr(m.get_match_root()); + std::shared_ptr subtract = ov::as_type_ptr(m.get_match_root()); if (!canBeTransformed(context, subtract)) { return false; } @@ -54,7 +54,7 @@ bool SubtractTransformation::transform(TransformationContext& context, ngraph::p // before: Y = X * SC - SH, after: Y = (X - SH') * SC // X * SC - SH = X * SC - SH' * SC // SH' = SH / SC - std::shared_ptr newSubtract = as_type_ptr(subtract->copy_with_new_inputs({ + std::shared_ptr newSubtract = ov::as_type_ptr(subtract->copy_with_new_inputs({ dequantization.multiply->get_input_node_shared_ptr(0), ngraph::pass::low_precision::fold( subtract->get_input_node_shared_ptr(1), @@ -71,7 +71,7 @@ bool SubtractTransformation::transform(TransformationContext& context, ngraph::p } if (dequantization.subtract != nullptr) { - std::shared_ptr newSubtract = as_type_ptr(subtract->copy_with_new_inputs({ + std::shared_ptr newSubtract = ov::as_type_ptr(subtract->copy_with_new_inputs({ dequantization.subtract->get_input_node_shared_ptr(0), ngraph::pass::low_precision::fold( subtract->get_input_node_shared_ptr(1), diff --git a/inference-engine/src/low_precision_transformations/src/subtract_multiply_to_multiply_add.cpp b/inference-engine/src/low_precision_transformations/src/subtract_multiply_to_multiply_add.cpp index f8554db8721..895a502f178 100644 --- a/inference-engine/src/low_precision_transformations/src/subtract_multiply_to_multiply_add.cpp +++ b/inference-engine/src/low_precision_transformations/src/subtract_multiply_to_multiply_add.cpp @@ -37,9 +37,9 @@ SubtractMultiplyToMultiplyAddTransformation::SubtractMultiplyToMultiplyAddTransf FakeQuantizeDequantization get(const std::shared_ptr node) { Output dataNode = node; - const std::shared_ptr multiply = is_type( + const std::shared_ptr multiply = ov::is_type( dataNode.get_node_shared_ptr()->get_input_node_shared_ptr(1)) ? - as_type_ptr(dataNode.get_node_shared_ptr()) : + ov::as_type_ptr(dataNode.get_node_shared_ptr()) : nullptr; std::shared_ptr multiplyConstant; if (multiply != nullptr) { @@ -48,8 +48,8 @@ FakeQuantizeDequantization get(const std::shared_ptr node) { } const std::shared_ptr subtract = (dataNode.get_node_shared_ptr()->get_input_size() > 1ul) - && is_type(dataNode.get_node_shared_ptr()->get_input_node_ptr(1)) ? - as_type_ptr(dataNode.get_node_shared_ptr()) : + && ov::is_type(dataNode.get_node_shared_ptr()->get_input_node_ptr(1)) ? + ov::as_type_ptr(dataNode.get_node_shared_ptr()) : nullptr; std::shared_ptr subtractConvert; std::shared_ptr subtractConstant; @@ -58,7 +58,7 @@ FakeQuantizeDequantization get(const std::shared_ptr node) { dataNode = subtract->get_input_source_output(0); } - const std::shared_ptr convert = as_type_ptr(dataNode.get_node_shared_ptr()); + const std::shared_ptr convert = ov::as_type_ptr(dataNode.get_node_shared_ptr()); if (convert != nullptr) { dataNode = convert->get_input_source_output(0); } @@ -119,8 +119,8 @@ bool SubtractMultiplyToMultiplyAddTransformation::transform(TransformationContex std::make_shared(deqPrecision, Shape{}, std::vector{ -1.f })), foldConvert(dequantization.multiply->get_input_node_shared_ptr(1), deqPrecision)); - if (is_type(subtractConstant)) { - std::shared_ptr constant = as_type_ptr(subtractConstant); + if (ov::is_type(subtractConstant)) { + std::shared_ptr constant = ov::as_type_ptr(subtractConstant); if (NetworkHelper::isScalarLike(constant)) { subtractConstant = NetworkHelper::toScalar(constant); } @@ -137,7 +137,7 @@ bool SubtractMultiplyToMultiplyAddTransformation::transform(TransformationContex lastNewPrecision = precisionAfterDequantization; } else { - NetworkHelper::setOutDataPrecision(as_type_ptr(lastNew.get_node_shared_ptr()), precisionAfterDequantization); + NetworkHelper::setOutDataPrecision(ov::as_type_ptr(lastNew.get_node_shared_ptr()), precisionAfterDequantization); } const std::shared_ptr lastOriginal = dequantization.multiply == nullptr ? diff --git a/inference-engine/src/low_precision_transformations/src/transpose.cpp b/inference-engine/src/low_precision_transformations/src/transpose.cpp index a7be7c7f6f4..518b945b088 100644 --- a/inference-engine/src/low_precision_transformations/src/transpose.cpp +++ b/inference-engine/src/low_precision_transformations/src/transpose.cpp @@ -100,7 +100,7 @@ bool TransposeTransformation::canBeTransformed(const TransformationContext& cont return false; } - const std::shared_ptr constant = as_type_ptr(op->get_input_node_shared_ptr(1)); + const std::shared_ptr constant = ov::as_type_ptr(op->get_input_node_shared_ptr(1)); if (constant == nullptr) { return false; } @@ -113,7 +113,7 @@ bool TransposeTransformation::canBeTransformed(const TransformationContext& cont } } if (dequantization.multiply != nullptr) { - const auto mulConst = as_type_ptr(dequantization.multiplyConstant); + const auto mulConst = ov::as_type_ptr(dequantization.multiplyConstant); if (!NetworkHelper::isScalarLike(mulConst)) { return false; } diff --git a/inference-engine/src/low_precision_transformations/src/unsqueeze.cpp b/inference-engine/src/low_precision_transformations/src/unsqueeze.cpp index d2f0636c832..011bb4d46f0 100644 --- a/inference-engine/src/low_precision_transformations/src/unsqueeze.cpp +++ b/inference-engine/src/low_precision_transformations/src/unsqueeze.cpp @@ -48,7 +48,7 @@ bool UnsqueezeTransformation::transform(TransformationContext& context, ngraph:: } if (constantShape.size() == inputRankValue) { - return as_type_ptr(fold(dequantizationOpConstant, unsqueeze->get_input_node_shared_ptr(1))); + return ov::as_type_ptr(fold(dequantizationOpConstant, unsqueeze->get_input_node_shared_ptr(1))); } return dequantizationOpConstant; diff --git a/inference-engine/src/low_precision_transformations/src/weightable_layer_transformation.cpp b/inference-engine/src/low_precision_transformations/src/weightable_layer_transformation.cpp index 6649492dd55..63401238089 100644 --- a/inference-engine/src/low_precision_transformations/src/weightable_layer_transformation.cpp +++ b/inference-engine/src/low_precision_transformations/src/weightable_layer_transformation.cpp @@ -34,7 +34,7 @@ bool WeightableLayerTransformation::canConvolutionBeTransformed(const Transforma return false; } - std::shared_ptr reshapeFromWeights = as_type_ptr(layer->get_input_node_shared_ptr(1)); + std::shared_ptr reshapeFromWeights = ov::as_type_ptr(layer->get_input_node_shared_ptr(1)); dequantization = reshapeFromWeights == nullptr ? NetworkHelper::getDequantization(layer, 1ul) : NetworkHelper::getDequantization(reshapeFromWeights); @@ -134,20 +134,20 @@ bool WeightableLayerTransformation::canBeTransformed(const TransformationContext // TODO Implement similar checks in other weightable operaitons - const std::shared_ptr reshapeFromWeights = as_type_ptr(layer->get_input_node_shared_ptr(1)); + const std::shared_ptr reshapeFromWeights = ov::as_type_ptr(layer->get_input_node_shared_ptr(1)); std::shared_ptr fqFromWeights; if (reshapeFromWeights == nullptr) { - fqFromWeights = as_type_ptr(layer->get_input_node_shared_ptr(1)); + fqFromWeights = ov::as_type_ptr(layer->get_input_node_shared_ptr(1)); if (fqFromWeights == nullptr) { const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(layer, 1ul); - fqFromWeights = as_type_ptr(dequantization.data.get_node_shared_ptr()); + fqFromWeights = ov::as_type_ptr(dequantization.data.get_node_shared_ptr()); } } else { - fqFromWeights = as_type_ptr(reshapeFromWeights->get_input_node_shared_ptr(0)); + fqFromWeights = ov::as_type_ptr(reshapeFromWeights->get_input_node_shared_ptr(0)); if (fqFromWeights == nullptr) { const FakeQuantizeDequantization dequantization = NetworkHelper::getDequantization(reshapeFromWeights, 0ul); - fqFromWeights = as_type_ptr(dequantization.data.get_node_shared_ptr()); + fqFromWeights = ov::as_type_ptr(dequantization.data.get_node_shared_ptr()); } } @@ -164,7 +164,7 @@ bool WeightableLayerTransformation::canBeTransformed(const TransformationContext const auto fqOutPShape = fqFromWeights->get_output_partial_shape(0); - const size_t outChannelsIdx = is_type(layer) ? 1ul : 0ul; + const size_t outChannelsIdx = ov::is_type(layer) ? 1ul : 0ul; if (fqOutPShape.rank().is_dynamic() || fqOutPShape[outChannelsIdx].is_dynamic()) { return false; } @@ -188,7 +188,7 @@ bool WeightableLayerTransformation::canBeTransformed(const TransformationContext return false; } - const auto weightsData = as_type_ptr(dequantizationOnWeights.data.get_node_shared_ptr()); + const auto weightsData = ov::as_type_ptr(dequantizationOnWeights.data.get_node_shared_ptr()); if (weightsData == nullptr) { return false; } @@ -205,7 +205,7 @@ bool WeightableLayerTransformation::canBeTransformed(const TransformationContext } } - const size_t outChannelsIdx = is_type(layer) ? 1ul : 0ul; + const size_t outChannelsIdx = ov::is_type(layer) ? 1ul : 0ul; if (dequantizationOnWeights.subtract) { const auto subConstShape = dequantizationOnWeights.subtractConstant->get_shape(); if (shape_size(subConstShape) > 1ul && shape_size(subConstShape) != subConstShape[outChannelsIdx]) { @@ -227,18 +227,18 @@ bool WeightableLayerTransformation::isQuantizedStatic(const std::shared_ptrget_input_node_shared_ptr(1); - if (!is_type(reshape)) { + if (!ov::is_type(reshape)) { return false; } - if (is_type(reshape->get_input_node_shared_ptr(0))) { - const std::shared_ptr fq = as_type_ptr(reshape->get_input_node_shared_ptr(0)); + if (ov::is_type(reshape->get_input_node_shared_ptr(0))) { + const std::shared_ptr fq = ov::as_type_ptr(reshape->get_input_node_shared_ptr(0)); return NetworkHelper::isQuantizeSupported(fq); } dequantizationOnWeights = NetworkHelper::getDequantization(reshape, 0); - } else if (is_type(layer->get_input_node_shared_ptr(1))) { - const std::shared_ptr fq = as_type_ptr(layer->get_input_node_shared_ptr(1)); + } else if (ov::is_type(layer->get_input_node_shared_ptr(1))) { + const std::shared_ptr fq = ov::as_type_ptr(layer->get_input_node_shared_ptr(1)); return NetworkHelper::isQuantizeSupported(fq); } else { // TODO: update NetworkHelper API later @@ -251,7 +251,7 @@ bool WeightableLayerTransformation::isQuantizedStatic(const std::shared_ptr(dequantizationOnWeights.data.get_node())) { + if (ov::is_type(dequantizationOnWeights.data.get_node())) { const ngraph::element::Type weightsDataPrecision = dequantizationOnWeights.data.get_element_type(); if (!DataPrecision::isSupported(weightsDataPrecision)) { return false; @@ -264,7 +264,7 @@ bool WeightableLayerTransformation::isQuantizedStatic(const std::shared_ptr(layer) ? 1ul : 0ul; + const size_t outChannelsShapeIndex = ov::is_type(layer) ? 1ul : 0ul; if (dequantizationOnWeights.subtract) { const auto subConstShape = dequantizationOnWeights.subtractConstant->get_shape(); if (shape_size(subConstShape) > 1ul && shape_size(subConstShape) != subConstShape[outChannelsShapeIndex]) { @@ -279,7 +279,7 @@ bool WeightableLayerTransformation::isQuantizedStatic(const std::shared_ptr(dequantizationOnWeights.data.get_node())) { + } else if (ov::is_type(dequantizationOnWeights.data.get_node())) { return true; } @@ -321,7 +321,7 @@ bool WeightableLayerTransformation::decomposeFakeQuantizeForWeightsPath(const st return false; } - if (as_type_ptr(fqOnWeights) == nullptr) { + if (ov::as_type_ptr(fqOnWeights) == nullptr) { THROW_IE_LPT_EXCEPTION(*fqOnWeights) << "FakeQuantize on weights was not folded to constant"; } @@ -329,7 +329,7 @@ bool WeightableLayerTransformation::decomposeFakeQuantizeForWeightsPath(const st } bool WeightableLayerTransformation::isGroup(const std::shared_ptr& layer) { - if (!is_type(layer) && !is_type(layer)) { + if (!ov::is_type(layer) && !ov::is_type(layer)) { return false; } @@ -338,7 +338,7 @@ bool WeightableLayerTransformation::isGroup(const std::shared_ptr& layer) } bool WeightableLayerTransformation::isDepthwise(const std::shared_ptr& layer) { - if (!as_type_ptr(layer) && !as_type_ptr(layer)) { + if (!ov::as_type_ptr(layer) && !ov::as_type_ptr(layer)) { return false; } @@ -349,10 +349,10 @@ bool WeightableLayerTransformation::isDepthwise(const std::shared_ptr& lay } std::shared_ptr WeightableLayerTransformation::getFakeQuantizeOnWeights(const std::shared_ptr& node) { - auto fq = as_type_ptr(node->get_input_node_shared_ptr(1)); + auto fq = ov::as_type_ptr(node->get_input_node_shared_ptr(1)); // TODO: temporary workaround if (fq == nullptr) { - fq = as_type_ptr(node->get_input_node_ptr(1)->get_input_node_shared_ptr(0)); + fq = ov::as_type_ptr(node->get_input_node_ptr(1)->get_input_node_shared_ptr(0)); } return fq; diff --git a/inference-engine/src/offline_transformations/src/pruning/mask_attribute.cpp b/inference-engine/src/offline_transformations/src/pruning/mask_attribute.cpp index 42fa47a3eb6..46d4a761740 100644 --- a/inference-engine/src/offline_transformations/src/pruning/mask_attribute.cpp +++ b/inference-engine/src/offline_transformations/src/pruning/mask_attribute.cpp @@ -19,7 +19,7 @@ Mask::Ptr getMask(const Output & output) { if (!rtInfo.count(MaskWrapper::type_info.name)) return nullptr; const auto &attr = rtInfo.at(MaskWrapper::type_info.name); - return as_type_ptr(attr)->get(); + return ov::as_type_ptr(attr)->get(); } Mask::Ptr getMask(const Output & output) { @@ -29,7 +29,7 @@ Mask::Ptr getMask(const Output & output) { if (!rtInfo.count(MaskWrapper::type_info.name)) return nullptr; const auto &attr = rtInfo.at(MaskWrapper::type_info.name); - return as_type_ptr(attr)->get(); + return ov::as_type_ptr(attr)->get(); } void setMask(Output output, const Mask::Ptr & mask) { diff --git a/inference-engine/src/snippets/src/op/subgraph.cpp b/inference-engine/src/snippets/src/op/subgraph.cpp index f58b1d383dc..f114b49e4e1 100644 --- a/inference-engine/src/snippets/src/op/subgraph.cpp +++ b/inference-engine/src/snippets/src/op/subgraph.cpp @@ -223,7 +223,7 @@ snippets::Schedule snippets::op::Subgraph::generate(const BlockedShapeVector& ou // chack that body doesnt have constants for scheduling std::vector> constants; for (auto op : m_body->get_ordered_ops()) { - if (auto constant = as_type_ptr(op)) { + if (auto constant = ov::as_type_ptr(op)) { if (ngraph::shape_size(constant->get_shape()) != 1 && constant->get_shape() != Shape()) { constants.push_back(constant); } diff --git a/inference-engine/src/snippets/src/pass/assign_registers.cpp b/inference-engine/src/snippets/src/pass/assign_registers.cpp index de7df3792b8..7561b2403a0 100644 --- a/inference-engine/src/snippets/src/pass/assign_registers.cpp +++ b/inference-engine/src/snippets/src/pass/assign_registers.cpp @@ -156,13 +156,13 @@ bool ngraph::snippets::pass::AssignRegisters::run_on_function(std::shared_ptr(n) || as_type_ptr(n)) { + if (ov::as_type_ptr(n) || ov::as_type_ptr(n)) { auto source = n->get_input_source_output(0).get_node_shared_ptr(); - if (auto param = as_type_ptr(source)) { + if (auto param = ov::as_type_ptr(source)) { auto ea = reg64_tmp_start+static_cast(f->get_parameter_index(param)); rt["effectiveAddress"] = std::make_shared>(VariantWrapper(ea)); - } else if (auto constant = as_type_ptr(source)) { + } else if (auto constant = ov::as_type_ptr(source)) { auto ea = reg64_tmp_start+static_cast(f->get_parameters().size() + f->get_results().size() + 1 + constantID); rt["effectiveAddress"] = std::make_shared>(VariantWrapper(ea)); constantID++; diff --git a/inference-engine/src/snippets/src/pass/collapse_subgraph.cpp b/inference-engine/src/snippets/src/pass/collapse_subgraph.cpp index 6f05719d9d5..afa847b80f5 100644 --- a/inference-engine/src/snippets/src/pass/collapse_subgraph.cpp +++ b/inference-engine/src/snippets/src/pass/collapse_subgraph.cpp @@ -104,7 +104,7 @@ auto has_subgraph_as_input(std::shared_ptr node) -> bool { auto inputs = node->inputs(); for (auto input : inputs) { auto parent = input.get_source_output().get_node_shared_ptr(); - if (!!as_type_ptr(parent)) { + if (!!ov::as_type_ptr(parent)) { return true; } } @@ -114,66 +114,66 @@ auto has_subgraph_as_input(std::shared_ptr node) -> bool { auto is_lo(std::shared_ptr n) -> bool { auto is_lob = [](std::shared_ptr n) -> bool { using ngraph::as_type_ptr; - return !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n); + return !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n); }; auto is_lou = [](std::shared_ptr n) -> bool { using ngraph::as_type_ptr; - return !!as_type_ptr(n) - // || !!as_type_ptr(n) - // || !!as_type_ptr(n) - // || !!as_type_ptr(n) - // || !!as_type_ptr(n) ? - || !!as_type_ptr(n) - // || !!as_type_ptr(n) - // || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - // || !!as_type_ptr(n) ? - // || !!as_type_ptr(n) ? - || !!as_type_ptr(n) - || !!as_type_ptr(n) - || !!as_type_ptr(n) - // || !!as_type_ptr(n) ? - || !!as_type_ptr(n) - // || !!as_type_ptr(n) - // || !!as_type_ptr(n) - || !!as_type_ptr(n) - // || !!as_type_ptr(n) - || !!as_type_ptr(n); + return !!ov::as_type_ptr(n) + // || !!ov::as_type_ptr(n) + // || !!ov::as_type_ptr(n) + // || !!ov::as_type_ptr(n) + // || !!ov::as_type_ptr(n) ? + || !!ov::as_type_ptr(n) + // || !!ov::as_type_ptr(n) + // || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + // || !!ov::as_type_ptr(n) ? + // || !!ov::as_type_ptr(n) ? + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + // || !!ov::as_type_ptr(n) ? + || !!ov::as_type_ptr(n) + // || !!ov::as_type_ptr(n) + // || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n) + // || !!ov::as_type_ptr(n) + || !!ov::as_type_ptr(n); }; auto is_lot = [](std::shared_ptr n) -> bool { using ngraph::as_type_ptr; return false; - // return !!as_type_ptr(n) // ternary with 2 constants - // || !!as_type_ptr(n); // ternary with 2 constants / or DW + // return !!ov::as_type_ptr(n) // ternary with 2 constants + // || !!ov::as_type_ptr(n); // ternary with 2 constants / or DW }; auto is_fq = [](std::shared_ptr n) -> bool { using ngraph::as_type_ptr; - return false;//!!as_type_ptr(n); // 4->1 + return false;//!!ov::as_type_ptr(n); // 4->1 }; return is_lou(n) || is_lob(n) ||is_lot(n) || is_fq(n); @@ -208,11 +208,11 @@ auto has_supported_in_out(std::shared_ptr n) -> bool { } for (auto in_out : out.get_target_inputs()) { - if (!!as_type_ptr(in_out.get_node()->shared_from_this())) { + if (!!ov::as_type_ptr(in_out.get_node()->shared_from_this())) { return false; } - if (!!as_type_ptr(in_out.get_node()->shared_from_this())) { + if (!!ov::as_type_ptr(in_out.get_node()->shared_from_this())) { return false; } } @@ -305,7 +305,7 @@ ngraph::snippets::pass::AttachToSubgraph::AttachToSubgraph(bool tokenize_by_node for (auto& input : found.get_target_inputs()) { remark(13) << input.get_node() << " " << input.get_source_output() << " vs " << found << " : " << input.get_index() << " " << found.get_index() << std::endl; - if (as_type_ptr(input.get_node()->shared_from_this()) != nullptr && input.get_source_output() == found) { + if (ov::as_type_ptr(input.get_node()->shared_from_this()) != nullptr && input.get_source_output() == found) { return input.get_index(); } } @@ -315,7 +315,7 @@ ngraph::snippets::pass::AttachToSubgraph::AttachToSubgraph(bool tokenize_by_node for (auto input : inputs) { auto input_node = input.get_source_output().get_node_shared_ptr(); - if (auto subgraph = as_type_ptr(input_node)) { + if (auto subgraph = ov::as_type_ptr(input_node)) { if (!clones.count(input_node)) { auto f = ngraph::clone_function(*subgraph->get_body().get()); f->set_friendly_name(subgraph->get_body()->get_friendly_name()); @@ -327,7 +327,7 @@ ngraph::snippets::pass::AttachToSubgraph::AttachToSubgraph(bool tokenize_by_node for (auto input : inputs) { auto input_node = input.get_source_output().get_node_shared_ptr(); - if (auto subgraph = as_type_ptr(input_node)) { + if (auto subgraph = ov::as_type_ptr(input_node)) { if (!input_subgraphs.count(input_node)) { input_subgraphs.insert(input_node); @@ -356,7 +356,7 @@ ngraph::snippets::pass::AttachToSubgraph::AttachToSubgraph(bool tokenize_by_node for (auto output : internal->outputs()) { for (auto consumer : output.get_target_inputs()) { - if (auto to_replace_with = as_type_ptr(subgraph->input_value(i).get_node_shared_ptr())) { + if (auto to_replace_with = ov::as_type_ptr(subgraph->input_value(i).get_node_shared_ptr())) { auto other_body = clones[subgraph->input_value(i).get_node_shared_ptr()]; auto other_body_result = other_body->get_results()[consumer.get_source_output().get_index()]; auto result_producer = other_body_result->input(0).get_source_output(); diff --git a/inference-engine/src/transformations/src/ngraph_ops/nms_ie_internal.cpp b/inference-engine/src/transformations/src/ngraph_ops/nms_ie_internal.cpp index 85b96ada850..9049ff0272c 100644 --- a/inference-engine/src/transformations/src/ngraph_ops/nms_ie_internal.cpp +++ b/inference-engine/src/transformations/src/ngraph_ops/nms_ie_internal.cpp @@ -75,7 +75,7 @@ int64_t op::internal::NonMaxSuppressionIEInternal::max_boxes_output_from_input() } const auto max_output_boxes_input = - as_type_ptr(input_value(max_output_boxes_per_class_port).get_node_shared_ptr()); + ov::as_type_ptr(input_value(max_output_boxes_per_class_port).get_node_shared_ptr()); max_output_boxes = max_output_boxes_input->cast_vector().at(0); return max_output_boxes; diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/nop_elimination.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/nop_elimination.cpp index 32f139a15ed..982240bb128 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/nop_elimination.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/nop_elimination.cpp @@ -20,7 +20,7 @@ using namespace ngraph; //`simplify_gather`, optimizes gather if Gather is gathering the // whole input tensor static bool simplify_gather(std::shared_ptr node) { - if (auto gather = as_type_ptr(node)) { + if (auto gather = ov::as_type_ptr(node)) { // check if we are gathering the whole input auto data = gather->input_value(0); auto indices = gather->input_value(1); @@ -56,7 +56,7 @@ static bool simplify_gather(std::shared_ptr node) { // check if the indices is constant auto constant_indices = - as_type_ptr(gather->input_value(1).get_node_shared_ptr()); + ov::as_type_ptr(gather->input_value(1).get_node_shared_ptr()); if (!constant_indices) { return false; } else { @@ -98,9 +98,9 @@ static bool eliminate_reshape_v1(const std::shared_ptr& node) { } // eliminate redundant reshape, squeeze, or unsqueeze auto input_node = input.get_node_shared_ptr(); - if (as_type_ptr(input_node) || - as_type_ptr(input_node) || - as_type_ptr(input_node)) { + if (ov::as_type_ptr(input_node) || + ov::as_type_ptr(input_node) || + ov::as_type_ptr(input_node)) { auto shape = node->get_output_shape(0); std::vector vi; vi.assign(shape.begin(), shape.end()); @@ -151,8 +151,8 @@ static bool replace_squeeze_unsqueeze(const std::shared_ptr& node) { auto pat = opset3::Constant::create(element::i64, Shape{target_shape.size()}, target_shape); - if (is_type(input) || is_type(input) || - is_type(input)) { + if (ov::is_type(input) || ov::is_type(input) || + ov::is_type(input)) { reshape = make_shared(input->input_value(0), pat, false); } else { reshape = make_shared(node->input_value(0), pat, false); @@ -205,11 +205,11 @@ static bool eliminate_unsqueeze(const std::shared_ptr& node) { return replace_squeeze_unsqueeze(node); } - auto unsqueeze = as_type_ptr(node); + auto unsqueeze = ov::as_type_ptr(node); if (unsqueeze == nullptr) return false; auto input = unsqueeze->input_value(0).get_node_shared_ptr(); - auto squeeze = as_type_ptr(input); + auto squeeze = ov::as_type_ptr(input); auto replace_unsqueeze_only = [&](const vector& axes) { auto axes_const = opset3::Constant::create(element::i64, Shape{axes.size()}, axes); auto new_unsq = make_shared(input->input_value(0), axes_const); @@ -253,7 +253,7 @@ static bool eliminate_unsqueeze(const std::shared_ptr& node) { return false; } // eliminate redundant unsqueeze->unsqueeze - auto unsqueeze_i = as_type_ptr(input); + auto unsqueeze_i = ov::as_type_ptr(input); if (unsqueeze_i) { const auto& data_shape = unsqueeze_i->input_value(0).get_partial_shape(); if (data_shape.rank().is_dynamic() || out_shape.rank().is_dynamic()) { @@ -273,7 +273,7 @@ static bool eliminate_squeeze(const std::shared_ptr& node) { return replace_squeeze_unsqueeze(node); } - auto squeeze = as_type_ptr(node); + auto squeeze = ov::as_type_ptr(node); if (squeeze == nullptr) return false; auto input = squeeze->input_value(0).get_node_shared_ptr(); @@ -286,7 +286,7 @@ static bool eliminate_squeeze(const std::shared_ptr& node) { return false; }; // eliminate redundant unsqueeze->squeeze - if (auto unsqueeze = as_type_ptr(input)) { + if (auto unsqueeze = ov::as_type_ptr(input)) { PartialShape data_shape; if (op::is_parameter(input)) { data_shape = unsqueeze->input(0).get_partial_shape(); @@ -324,7 +324,7 @@ static bool eliminate_squeeze(const std::shared_ptr& node) { return false; } // eliminate redundant squeeze->squeeze - if (auto squeeze_i = as_type_ptr(input)) { + if (auto squeeze_i = ov::as_type_ptr(input)) { PartialShape data_shape; if (op::is_parameter(input)) { data_shape = squeeze_i->input(0).get_partial_shape(); @@ -522,4 +522,4 @@ ngraph::pass::NopElimination::NopElimination(bool use_shape_for_elimination) { add_matcher(); add_matcher(); } -} \ No newline at end of file +} diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/simplify_shape_of_sub_graph.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/simplify_shape_of_sub_graph.cpp index 244670d3678..7e11215d9d0 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/simplify_shape_of_sub_graph.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/simplify_shape_of_sub_graph.cpp @@ -30,7 +30,7 @@ bool ngraph::pass::SharedShapeOf::run_on_function(std::shared_ptrget_function()) graph_rewritten |= run_on_function(sub_graph); - if (is_type(node) || is_type(node)) + if (ov::is_type(node) || ov::is_type(node)) source_to_shape_of[node->input_value(0)].push_back(node); } @@ -59,12 +59,12 @@ ngraph::pass::GroupedGatherElimination::GroupedGatherElimination() { while (inputs.size() > i + 1) { auto curr = inputs[i].get_node_shared_ptr(), next = inputs[i + 1].get_node_shared_ptr(); if (curr->get_type_info() != next->get_type_info() || - (!is_type(curr) && !is_type(curr)) || + (!ov::is_type(curr) && !ov::is_type(curr)) || (curr->input_value(0) != next->input_value(0))) { ++i; continue; } // curr and next are the same type of gather which takes data from the same source - bool is_opset1 = is_type(curr); + bool is_opset1 = ov::is_type(curr); auto joint_indices = ngraph::op::util::make_try_fold(OutputVector{curr->input_value(1), next->input_value(1)}, 0); std::shared_ptr new_gather; if (is_opset1) @@ -131,7 +131,7 @@ ngraph::pass::SimplifyGatherShapeOf::SimplifyGatherShapeOf() { ngraph::matcher_pass_callback callback = [](pattern::Matcher& m) { auto node = m.get_match_root(); - auto gather = as_type_ptr(node->input_value(0).get_node_shared_ptr()); + auto gather = ov::as_type_ptr(node->input_value(0).get_node_shared_ptr()); if (!gather) { return false; } diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/transpose_to_reshape.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/transpose_to_reshape.cpp index bdee74a50f9..fe968bf5484 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/transpose_to_reshape.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/transpose_to_reshape.cpp @@ -24,7 +24,7 @@ bool replace_transpose_with_reshape(const std::shared_ptr& transpose) { const size_t input_shape_rank = input_shape.rank().get_length(); - auto order = as_type_ptr(transpose->input_value(1).get_node_shared_ptr()); + auto order = ov::as_type_ptr(transpose->input_value(1).get_node_shared_ptr()); if (!order || !ngraph::shape_size(order->get_shape())) { return false; } diff --git a/inference-engine/src/transformations/src/transformations/common_optimizations/weights_dequantize_to_fake_quantize.cpp b/inference-engine/src/transformations/src/transformations/common_optimizations/weights_dequantize_to_fake_quantize.cpp index 6b1872a0566..d0a6175f29e 100644 --- a/inference-engine/src/transformations/src/transformations/common_optimizations/weights_dequantize_to_fake_quantize.cpp +++ b/inference-engine/src/transformations/src/transformations/common_optimizations/weights_dequantize_to_fake_quantize.cpp @@ -29,7 +29,7 @@ ngraph::pass::WeightsDequantizeToFakeQuantize::WeightsDequantizeToFakeQuantize() callback = [=](ngraph::pattern::Matcher &m) { const auto &pattern_map = m.get_pattern_map(); - const auto &weights_node = as_type_ptr(pattern_map.at(weights)); + const auto &weights_node = ov::as_type_ptr(pattern_map.at(weights)); const auto &convert_node = pattern_map.at(convert); const auto &multiply_node = pattern_map.at(mul); const auto &scale_node = pattern_map.at(mul_c); diff --git a/inference-engine/src/transformations/src/transformations/low_precision/disable_convert_constant_folding_on_const_path.cpp b/inference-engine/src/transformations/src/transformations/low_precision/disable_convert_constant_folding_on_const_path.cpp index 44d05860c4f..15dc27bada2 100644 --- a/inference-engine/src/transformations/src/transformations/low_precision/disable_convert_constant_folding_on_const_path.cpp +++ b/inference-engine/src/transformations/src/transformations/low_precision/disable_convert_constant_folding_on_const_path.cpp @@ -52,8 +52,8 @@ ngraph::pass::DisableConvertConstantFoldingOnConstPath::DisableConvertConstantFo return false; } auto child = target_inputs.begin()->get_node(); - if (is_type(parent) && - (is_type(child) || is_type(child))) { + if (ov::is_type(parent) && + (ov::is_type(child) || ov::is_type(child))) { auto& rtInfo = convert->get_rt_info(); rtInfo["DISABLED_CONSTANT_FOLDING"] = std::make_shared>(""); return true; diff --git a/inference-engine/src/transformations/src/transformations/op_conversions/convert_batch_to_space.cpp b/inference-engine/src/transformations/src/transformations/op_conversions/convert_batch_to_space.cpp index 214e1359188..7f5c0e67f5e 100644 --- a/inference-engine/src/transformations/src/transformations/op_conversions/convert_batch_to_space.cpp +++ b/inference-engine/src/transformations/src/transformations/op_conversions/convert_batch_to_space.cpp @@ -150,9 +150,9 @@ void ngraph::pass::ConvertBatchToSpace::convert_batch_to_space_by_elements() { auto crops_begin = batch_to_space->input_value(2); auto crops_end = batch_to_space->input_value(3); - const auto block_const = as_type_ptr(block.get_node_shared_ptr()); - const auto crops_begin_const = as_type_ptr(crops_begin.get_node_shared_ptr()); - const auto crops_end_const = as_type_ptr(crops_end.get_node_shared_ptr()); + const auto block_const = ov::as_type_ptr(block.get_node_shared_ptr()); + const auto crops_begin_const = ov::as_type_ptr(crops_begin.get_node_shared_ptr()); + const auto crops_end_const = ov::as_type_ptr(crops_end.get_node_shared_ptr()); const std::vector &block_values = block_const->cast_vector(); const std::vector &crops_end_values = crops_end_const->cast_vector(); diff --git a/inference-engine/src/transformations/src/transformations/op_conversions/convert_space_to_batch.cpp b/inference-engine/src/transformations/src/transformations/op_conversions/convert_space_to_batch.cpp index 250d4e1674b..9ba7d30c1f1 100644 --- a/inference-engine/src/transformations/src/transformations/op_conversions/convert_space_to_batch.cpp +++ b/inference-engine/src/transformations/src/transformations/op_conversions/convert_space_to_batch.cpp @@ -142,9 +142,9 @@ void ngraph::pass::ConvertSpaceToBatch::convert_space_to_batch_by_elements() { auto pads_begin = space_to_batch->input_value(2); auto pads_end = space_to_batch->input_value(3); - const auto block_const = as_type_ptr(block.get_node_shared_ptr()); - const auto pads_begin_const = as_type_ptr(pads_begin.get_node_shared_ptr()); - const auto pads_end_const = as_type_ptr(pads_end.get_node_shared_ptr()); + const auto block_const = ov::as_type_ptr(block.get_node_shared_ptr()); + const auto pads_begin_const = ov::as_type_ptr(pads_begin.get_node_shared_ptr()); + const auto pads_end_const = ov::as_type_ptr(pads_end.get_node_shared_ptr()); if (!block_const || !pads_begin_const || !pads_end_const) { return false; diff --git a/inference-engine/src/transformations/src/transformations/op_conversions/convert_subtract.cpp b/inference-engine/src/transformations/src/transformations/op_conversions/convert_subtract.cpp index 30d621cfb5c..cd91573bffa 100644 --- a/inference-engine/src/transformations/src/transformations/op_conversions/convert_subtract.cpp +++ b/inference-engine/src/transformations/src/transformations/op_conversions/convert_subtract.cpp @@ -37,15 +37,16 @@ ngraph::pass::ConvertSubtract::ConvertSubtract() { if (subChildren.size() == 1ul) { const std::shared_ptr child = subChildren.begin()->get_node()->shared_from_this(); if (child != nullptr) { - if (is_type(child) || - is_type(child) || - is_type(child) || - is_type(child) || - is_type(child) || - (is_type(child) && + if (ov::is_type(child) || + ov::is_type(child) || + ov::is_type(child) || + ov::is_type(child) || + ov::is_type(child) || + (ov::is_type(child) && (child->output(0).get_target_inputs().size() == 1ul) && - (is_type(child->output(0).get_target_inputs().begin()->get_node()->shared_from_this()) || - is_type(child->output(0).get_target_inputs().begin()->get_node()->shared_from_this())))) { + (ov::is_type(child->output(0).get_target_inputs().begin()->get_node()->shared_from_this()) || + ov::is_type(child->output(0).get_target_inputs().begin() + ->get_node()->shared_from_this())))) { const auto input1Type = sub->input(0).get_element_type(); const auto input2Type = sub->input(1).get_element_type(); if (((input1Type == element::u8) && (input2Type == element::u8)) || diff --git a/inference-engine/src/transformations/src/transformations/rt_info/dequantization_attribute.cpp b/inference-engine/src/transformations/src/transformations/rt_info/dequantization_attribute.cpp index 829f5a0d85b..c2858c6d8f5 100644 --- a/inference-engine/src/transformations/src/transformations/rt_info/dequantization_attribute.cpp +++ b/inference-engine/src/transformations/src/transformations/rt_info/dequantization_attribute.cpp @@ -27,7 +27,7 @@ std::string ngraph::getDequantization(const std::shared_ptr& node) if (!rtInfo.count(getDequantizationWrapper::type_info.name)) return ""; const auto& attr = rtInfo.at(getDequantizationWrapper::type_info.name); - DequantizationAttr pp = as_type_ptr(attr)->get(); + DequantizationAttr pp = ov::as_type_ptr(attr)->get(); return pp.getDequantizationAttr(); } diff --git a/inference-engine/src/transformations/src/transformations/rt_info/fused_names_attribute.cpp b/inference-engine/src/transformations/src/transformations/rt_info/fused_names_attribute.cpp index 01af6fc50d4..7666d633ad9 100644 --- a/inference-engine/src/transformations/src/transformations/rt_info/fused_names_attribute.cpp +++ b/inference-engine/src/transformations/src/transformations/rt_info/fused_names_attribute.cpp @@ -41,7 +41,7 @@ std::string ngraph::getFusedNames(const std::shared_ptr &node) { if (!rtInfo.count(FusedNamesWrapper::type_info.name)) return {}; const auto &attr = rtInfo.at(FusedNamesWrapper::type_info.name); - FusedNames fusedNames = as_type_ptr(attr)->get(); + FusedNames fusedNames = ov::as_type_ptr(attr)->get(); return fusedNames.getNames(); } @@ -54,7 +54,7 @@ std::vector ngraph::getFusedNamesVector(const std::shared_ptr(attr)->get(); + FusedNames fusedNames = ov::as_type_ptr(attr)->get(); return fusedNames.getVectorNames(); } diff --git a/inference-engine/src/transformations/src/transformations/rt_info/primitives_priority_attribute.cpp b/inference-engine/src/transformations/src/transformations/rt_info/primitives_priority_attribute.cpp index 86244edd94d..bfd61ab8e9e 100644 --- a/inference-engine/src/transformations/src/transformations/rt_info/primitives_priority_attribute.cpp +++ b/inference-engine/src/transformations/src/transformations/rt_info/primitives_priority_attribute.cpp @@ -30,7 +30,7 @@ std::string ngraph::getPrimitivesPriority(const std::shared_ptr &n if (!rtInfo.count(PrimitivesPriorityWrapper::type_info.name)) return ""; const auto &attr = rtInfo.at(PrimitivesPriorityWrapper::type_info.name); - PrimitivesPriority pp = as_type_ptr(attr)->get(); + PrimitivesPriority pp = ov::as_type_ptr(attr)->get(); return pp.getPrimitivesPriority(); } diff --git a/inference-engine/src/transformations/src/transformations/serialize.cpp b/inference-engine/src/transformations/src/transformations/serialize.cpp index 0ce92c208ea..3fbb1463f08 100644 --- a/inference-engine/src/transformations/src/transformations/serialize.cpp +++ b/inference-engine/src/transformations/src/transformations/serialize.cpp @@ -215,13 +215,13 @@ class XmlSerializer : public ngraph::AttributeVisitor { input.append_attribute("external_port_id").set_value(input_description->m_input_index); input.append_attribute("internal_layer_id").set_value(parameter_mapping[input_description->m_body_parameter_index].c_str()); - if (auto slice_input = as_type_ptr(input_description)) { + if (auto slice_input = ov::as_type_ptr(input_description)) { input.prepend_attribute("axis").set_value(slice_input->m_axis); input.append_attribute("start").set_value(slice_input->m_start); input.append_attribute("end").set_value(slice_input->m_end); input.append_attribute("stride").set_value(slice_input->m_stride); input.append_attribute("part_size").set_value(slice_input->m_part_size); - } else if (auto merged_input = as_type_ptr(input_description)) { + } else if (auto merged_input = ov::as_type_ptr(input_description)) { pugi::xml_node back_edges = m_xml_node.parent().child("back_edges"); if (!back_edges) { back_edges = m_xml_node.parent().insert_child_after("back_edges", port_map); @@ -249,7 +249,7 @@ class XmlSerializer : public ngraph::AttributeVisitor { output.append_attribute("external_port_id").set_value(input_count + output_description->m_output_index); output.append_attribute("internal_layer_id").set_value(result_mapping[output_description->m_body_value_index].c_str()); - if (auto concat_output = as_type_ptr(output_description)) { + if (auto concat_output = ov::as_type_ptr(output_description)) { output.prepend_attribute("axis").set_value(concat_output->m_axis); output.append_attribute("start").set_value(concat_output->m_start); output.append_attribute("end").set_value(concat_output->m_end); diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_backprop_data_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_backprop_data_transformation.cpp index dc81761eb5b..8e93a05dfa1 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_backprop_data_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_backprop_data_transformation.cpp @@ -118,13 +118,13 @@ public: outputShape, netPrecision, testValues.actual.fakeQuantizeOnWeights, - as_type_ptr(actualWeights)); + ov::as_type_ptr(actualWeights)); } else { actualWeights = ngraph::builder::subgraph::ConvolutionBackpropDataFunction::getWeights( outputShape, netPrecision, testValues.actual.dequantizationOnWeights, - as_type_ptr(actualWeights)); + ov::as_type_ptr(actualWeights)); } actualFunction = ngraph::builder::subgraph::ConvolutionBackpropDataFunction::getOriginal( @@ -152,13 +152,13 @@ public: outputShape, netPrecision, testValues.actual.fakeQuantizeOnWeights, - as_type_ptr(refWeights)); + ov::as_type_ptr(refWeights)); } else { refWeights = ngraph::builder::subgraph::ConvolutionBackpropDataFunction::getWeights( outputShape, netPrecision, testValues.expected.dequantizationOnWeights, - as_type_ptr(refWeights)); + ov::as_type_ptr(refWeights)); } referenceFunction = ngraph::builder::subgraph::ConvolutionBackpropDataFunction::getReference( diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_transformation.cpp index 394ea243b3c..adff5124470 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/convolution_transformation.cpp @@ -83,7 +83,7 @@ public: OutputVector convertedOutput(1); convertOnWeights->constant_fold(convertedOutput, convertOnWeights->input_values()); const auto convertedWeights = convertedOutput[0].get_node_shared_ptr(); - testValues.expected.weights = as_type_ptr(convertedWeights); + testValues.expected.weights = ov::as_type_ptr(convertedWeights); } referenceFunction = ngraph::builder::subgraph::ConvolutionFunction::getReference( diff --git a/inference-engine/tests/functional/inference_engine/snippets/registers.cpp b/inference-engine/tests/functional/inference_engine/snippets/registers.cpp index 1058b5d78b3..e76d8318f59 100644 --- a/inference-engine/tests/functional/inference_engine/snippets/registers.cpp +++ b/inference-engine/tests/functional/inference_engine/snippets/registers.cpp @@ -52,7 +52,7 @@ TEST(TransformationTests, AssignRegisters) { auto& rt = op->get_rt_info(); if (auto rinfo = rt["reginfo"]) { - auto reginfo = as_type_ptr>>(rinfo)->get(); + auto reginfo = ov::as_type_ptr>>(rinfo)->get(); auto reg = reginfo[0]; ASSERT_TRUE(ref_registers[op->get_friendly_name()] == reg); total_ops++; @@ -126,7 +126,7 @@ TEST(TransformationTests, AssignRegisters2) { auto& rt = op->get_rt_info(); if (auto rinfo = rt["reginfo"]) { - auto reginfo = as_type_ptr>>(rinfo)->get(); + auto reginfo = ov::as_type_ptr>>(rinfo)->get(); auto reg = reginfo[0]; ASSERT_TRUE(ref_registers[op->get_friendly_name()] == reg); total_ops++; diff --git a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/builders.hpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/builders.hpp index 9a4e12d78ea..cda84be9b22 100644 --- a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/builders.hpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/include/lpt_ngraph_functions/common/builders.hpp @@ -55,7 +55,7 @@ std::shared_ptr makeElementwise(const std::shared_ptr data, ngraph::pass::low_precision::NetworkHelper::setOutDataPrecision(operation, description.outPrecision); } - if (is_type(operation) || is_type(operation)) { + if (ov::is_type(operation) || ov::is_type(operation)) { replace_node( operationConst, ngraph::pass::low_precision::fold(operationConst, data->get_output_element_type(0))); diff --git a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/add_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/add_function.cpp index 438af17ff92..77c1bcac6fe 100644 --- a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/add_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/add_function.cpp @@ -139,11 +139,11 @@ std::shared_ptr AddFunction::getOriginal( ngraph::ResultVector results {std::make_shared(output)}; ngraph::ParameterVector parameters; if (constInput == -1) { - parameters = { as_type_ptr(input1), as_type_ptr(input2) }; + parameters = { ov::as_type_ptr(input1), ov::as_type_ptr(input2) }; } else if (constInput == 0) { - parameters = { as_type_ptr(input2) }; + parameters = { ov::as_type_ptr(input2) }; } else if (constInput == 1) { - parameters = { as_type_ptr(input1) }; + parameters = { ov::as_type_ptr(input1) }; } else { throw std::runtime_error("Unexpected constant input index"); } @@ -226,7 +226,7 @@ std::shared_ptr AddFunction::getReference( auto dequantizationStructure1 = dequantization1; dequantizationStructure1.multiply.outPrecision = dequantizationAfter.empty() ? precision : element::f32; - const auto dequantizationOp1 = is_type(parent1) ? parent1 : makeDequantization(parent1, dequantizationStructure1); + const auto dequantizationOp1 = ov::is_type(parent1) ? parent1 : makeDequantization(parent1, dequantizationStructure1); std::shared_ptr input2; if (constInputIndex == 1) { @@ -292,7 +292,7 @@ std::shared_ptr AddFunction::getReference( auto dequantizationStructure2 = dequantization2; dequantizationStructure2.multiply.outPrecision = dequantizationAfter.empty() ? precision : element::f32; - const auto dequantizationOp2 = is_type(parent) ? parent : makeDequantization(parent, dequantizationStructure2); + const auto dequantizationOp2 = ov::is_type(parent) ? parent : makeDequantization(parent, dequantizationStructure2); const std::shared_ptr add = operationType == "Add" ? std::dynamic_pointer_cast(std::make_shared>( @@ -325,11 +325,11 @@ std::shared_ptr AddFunction::getReference( ngraph::ParameterVector parameters; if (constInputIndex == -1) { - parameters = { as_type_ptr(input1), as_type_ptr(input2) }; + parameters = { ov::as_type_ptr(input1), ov::as_type_ptr(input2) }; } else if (constInputIndex == 0) { - parameters = { as_type_ptr(input2) }; + parameters = { ov::as_type_ptr(input2) }; } else if (constInputIndex == 1) { - parameters = { as_type_ptr(input1) }; + parameters = { ov::as_type_ptr(input1) }; } else { throw std::runtime_error("Unexpected constant input index"); } diff --git a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/builders.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/builders.cpp index a387627bb0c..c6153dffa2d 100644 --- a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/builders.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/common/builders.cpp @@ -238,7 +238,7 @@ std::shared_ptr makeFakeQuantize( const Output& output, const ngraph::element::Type precision, const FakeQuantizeOnData& fqOnData) { - return as_type_ptr(ngraph::builder::makeFakeQuantize( + return ov::as_type_ptr(ngraph::builder::makeFakeQuantize( output, precision, fqOnData.quantizationLevel, diff --git a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_backprop_data_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_backprop_data_function.cpp index 2e841c399d9..b7462c602b6 100644 --- a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_backprop_data_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_backprop_data_function.cpp @@ -82,7 +82,7 @@ std::shared_ptr ConvolutionBackpropDataFunction::getWeights( dequantizationStructure.subtract.constantPrecision = dequantizationOnWeights.subtract.constantPrecision; } if (weights->get_element_type().is_real()) { - weights = as_type_ptr(fold(weights, netPrecision)); + weights = ov::as_type_ptr(fold(weights, netPrecision)); } const auto dq = makeDequantization(weights, dequantizationStructure); diff --git a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_function.cpp index 886cfa2e6aa..5c512508455 100644 --- a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/convolution_function.cpp @@ -45,7 +45,7 @@ std::shared_ptr ConvolutionFunction::getOriginal( if (weights->cast_vector().size() == 1ul) { auto targetShape = ngraph::Shape{ outputChannelsCount, inputChannelsCount, 1, 1 }; - weights = as_type_ptr(fold( + weights = ov::as_type_ptr(fold( weights, op::Constant::create(ngraph::element::i64, Shape{ targetShape.size() }, targetShape))); } @@ -234,7 +234,7 @@ std::shared_ptr ConvolutionFunction::getReference( if (weights->cast_vector().size() == 1ul) { auto targetShape = ngraph::Shape{ outputChannelsCount, inputChannelsCount, 1, 1 }; - weights = as_type_ptr(fold( + weights = ov::as_type_ptr(fold( weights, op::Constant::create(ngraph::element::i64, Shape{ targetShape.size() }, targetShape))); } @@ -295,7 +295,7 @@ std::shared_ptr ConvolutionFunction::get( const auto input = std::make_shared(precision, ngraph::Shape(inputShape)); input->set_friendly_name("input"); - const std::shared_ptr fqOnData = as_type_ptr(ngraph::builder::makeFakeQuantize( + const std::shared_ptr fqOnData = ov::as_type_ptr(ngraph::builder::makeFakeQuantize( input, precision, fakeQuantizeOnData.quantizationLevel, diff --git a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/get_dequantization_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/get_dequantization_function.cpp index 1d5e452ac99..aedc7bf842b 100644 --- a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/get_dequantization_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/get_dequantization_function.cpp @@ -40,7 +40,7 @@ std::shared_ptr GetDequantizationFunction::get( return std::make_shared( ngraph::ResultVector{ std::make_shared(parent) }, - ngraph::ParameterVector{ as_type_ptr(input) }, + ngraph::ParameterVector{ ov::as_type_ptr(input) }, "DequantizationFunction"); } @@ -84,7 +84,7 @@ std::shared_ptr GetDequantizationFunction::get( return std::make_shared( ngraph::ResultVector{ std::make_shared(parent) }, - ngraph::ParameterVector{ as_type_ptr(input) }, + ngraph::ParameterVector{ ov::as_type_ptr(input) }, "DequantizationFunction"); } @@ -113,7 +113,7 @@ std::shared_ptr GetDequantizationFunction::getOriginal( return std::make_shared( ngraph::ResultVector{ std::make_shared(multiply) }, - ngraph::ParameterVector{ as_type_ptr(input) }, + ngraph::ParameterVector{ ov::as_type_ptr(input) }, "Dequantization"); } @@ -121,7 +121,7 @@ std::shared_ptr GetDequantizationFunction::getReference( ngraph::pass::low_precision::FakeQuantizeDequantization dequantization) { return std::make_shared( ngraph::ResultVector{ std::make_shared(dequantization.multiply) }, - ngraph::ParameterVector{ as_type_ptr(dequantization.data.get_node_shared_ptr()) }, + ngraph::ParameterVector{ ov::as_type_ptr(dequantization.data.get_node_shared_ptr()) }, "Dequantization"); } diff --git a/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/mock_mo_frontend.hpp b/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/mock_mo_frontend.hpp index 5293a72776e..2e077e2f0e5 100644 --- a/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/mock_mo_frontend.hpp +++ b/model-optimizer/unit_tests/mock_mo_frontend/mock_mo_ngraph_frontend/mock_mo_frontend.hpp @@ -425,9 +425,9 @@ public: private: InputModel::Ptr load_impl(const std::vector>& params) const override { - if (params.size() > 0 && is_type>(params[0])) + if (params.size() > 0 && ov::is_type>(params[0])) { - auto path = as_type_ptr>(params[0])->get(); + auto path = ov::as_type_ptr>(params[0])->get(); m_stat.m_load_paths.push_back(path); } return std::make_shared(); @@ -436,9 +436,9 @@ private: bool supported_impl(const std::vector>& params) const override { m_stat.m_supported++; - if (params.size() > 0 && is_type>(params[0])) + if (params.size() > 0 && ov::is_type>(params[0])) { - auto path = as_type_ptr>(params[0])->get(); + auto path = ov::as_type_ptr>(params[0])->get(); if (path.find(".test_mo_mock_mdl") != std::string::npos) { return true; diff --git a/ngraph/core/include/ngraph/pattern/matcher.hpp b/ngraph/core/include/ngraph/pattern/matcher.hpp index dce20cf2748..f361365bdcb 100644 --- a/ngraph/core/include/ngraph/pattern/matcher.hpp +++ b/ngraph/core/include/ngraph/pattern/matcher.hpp @@ -103,7 +103,7 @@ public: static std::shared_ptr unique_match(std::shared_ptr node) { std::shared_ptr matched; for (auto arg : node->input_values()) { - if (auto t_casted = as_type_ptr(arg.get_node_shared_ptr())) { + if (auto t_casted = ov::as_type_ptr(arg.get_node_shared_ptr())) { if (matched) { throw ngraph_error("There's more than two arguments of the same type"); } else { diff --git a/ngraph/core/include/ngraph/pattern/op/pattern.hpp b/ngraph/core/include/ngraph/pattern/op/pattern.hpp index 9ab0ae16d17..931c3d9e4e0 100644 --- a/ngraph/core/include/ngraph/pattern/op/pattern.hpp +++ b/ngraph/core/include/ngraph/pattern/op/pattern.hpp @@ -29,7 +29,7 @@ PatternValueMap as_pattern_value_map(const PatternMap& pattern_map); template std::function)> has_class() { auto pred = [](std::shared_ptr node) -> bool { - return is_type(node); + return ov::is_type(node); }; return pred; diff --git a/ngraph/core/include/ngraph/type.hpp b/ngraph/core/include/ngraph/type.hpp index 8971c77da9f..6980162a3f3 100644 --- a/ngraph/core/include/ngraph/type.hpp +++ b/ngraph/core/include/ngraph/type.hpp @@ -4,62 +4,10 @@ #pragma once -#include -#include -#include -#include -#include -#include -#include - -#include "ngraph/ngraph_visibility.hpp" +#include "openvino/core/type.hpp" namespace ngraph { -/// Supports three functions, is_type, as_type, and as_type_ptr for type-safe -/// dynamic conversions via static_cast/static_ptr_cast without using C++ RTTI. -/// Type must have a static type_info member and a virtual get_type_info() member that -/// returns a reference to its type_info member. - -/// Type information for a type system without inheritance; instances have exactly one type not -/// related to any other type. -struct NGRAPH_API DiscreteTypeInfo { - const char* name; - uint64_t version; - // A pointer to a parent type info; used for casting and inheritance traversal, not for - // exact type identification - const DiscreteTypeInfo* parent; - - DiscreteTypeInfo() = default; - - constexpr DiscreteTypeInfo(const char* _name, uint64_t _version, const DiscreteTypeInfo* _parent = nullptr) - : name(_name), - version(_version), - parent(_parent) {} - - bool is_castable(const DiscreteTypeInfo& target_type) const { - return *this == target_type || (parent && parent->is_castable(target_type)); - } - - // For use as a key - bool operator<(const DiscreteTypeInfo& b) const { - return version < b.version || (version == b.version && strcmp(name, b.name) < 0); - } - bool operator<=(const DiscreteTypeInfo& b) const { - return version < b.version || (version == b.version && strcmp(name, b.name) <= 0); - } - bool operator>(const DiscreteTypeInfo& b) const { - return version < b.version || (version == b.version && strcmp(name, b.name) > 0); - } - bool operator>=(const DiscreteTypeInfo& b) const { - return version < b.version || (version == b.version && strcmp(name, b.name) >= 0); - } - bool operator==(const DiscreteTypeInfo& b) const { - return version == b.version && strcmp(name, b.name) == 0; - } - bool operator!=(const DiscreteTypeInfo& b) const { - return version != b.version || strcmp(name, b.name) != 0; - } -}; +using ov::DiscreteTypeInfo; /// \brief Tests if value is a pointer/shared_ptr that can be statically cast to a /// Type*/shared_ptr @@ -68,7 +16,7 @@ typename std::enable_if< std::is_convertible()->get_type_info().is_castable(Type::type_info)), bool>::value, bool>::type is_type(Value value) { - return value->get_type_info().is_castable(Type::type_info); + return ov::is_type(value); } /// Casts a Value* to a Type* if it is of type Type, nullptr otherwise @@ -76,7 +24,7 @@ template typename std::enable_if(std::declval())), Type*>::value, Type*>::type as_type(Value value) { - return is_type(value) ? static_cast(value) : nullptr; + return ov::as_type(value); } /// Casts a std::shared_ptr to a std::shared_ptr if it is of type @@ -86,13 +34,6 @@ typename std::enable_if< std::is_convertible(std::declval())), std::shared_ptr>::value, std::shared_ptr>::type as_type_ptr(Value value) { - return is_type(value) ? std::static_pointer_cast(value) : std::shared_ptr(); + return ov::as_type_ptr(value); } } // namespace ngraph - -namespace std { -template <> -struct NGRAPH_API hash { - size_t operator()(const ngraph::DiscreteTypeInfo& k) const; -}; -} // namespace std diff --git a/ngraph/core/include/openvino/core/type.hpp b/ngraph/core/include/openvino/core/type.hpp new file mode 100644 index 00000000000..78d1cc85e8d --- /dev/null +++ b/ngraph/core/include/openvino/core/type.hpp @@ -0,0 +1,98 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "openvino/core/core_visibility.hpp" + +namespace ov { +/// Supports three functions, ov::is_type, ov::as_type, and ov::as_type_ptr for type-safe +/// dynamic conversions via static_cast/static_ptr_cast without using C++ RTTI. +/// Type must have a static type_info member and a virtual get_type_info() member that +/// returns a reference to its type_info member. + +/// Type information for a type system without inheritance; instances have exactly one type not +/// related to any other type. +struct OPENVINO_API DiscreteTypeInfo { + const char* name; + uint64_t version; + // A pointer to a parent type info; used for casting and inheritance traversal, not for + // exact type identification + const DiscreteTypeInfo* parent; + + DiscreteTypeInfo() = default; + + constexpr DiscreteTypeInfo(const char* _name, uint64_t _version, const DiscreteTypeInfo* _parent = nullptr) + : name(_name), + version(_version), + parent(_parent) {} + + bool is_castable(const DiscreteTypeInfo& target_type) const { + return *this == target_type || (parent && parent->is_castable(target_type)); + } + + // For use as a key + bool operator<(const DiscreteTypeInfo& b) const { + return version < b.version || (version == b.version && strcmp(name, b.name) < 0); + } + bool operator<=(const DiscreteTypeInfo& b) const { + return version < b.version || (version == b.version && strcmp(name, b.name) <= 0); + } + bool operator>(const DiscreteTypeInfo& b) const { + return version < b.version || (version == b.version && strcmp(name, b.name) > 0); + } + bool operator>=(const DiscreteTypeInfo& b) const { + return version < b.version || (version == b.version && strcmp(name, b.name) >= 0); + } + bool operator==(const DiscreteTypeInfo& b) const { + return version == b.version && strcmp(name, b.name) == 0; + } + bool operator!=(const DiscreteTypeInfo& b) const { + return version != b.version || strcmp(name, b.name) != 0; + } +}; + +/// \brief Tests if value is a pointer/shared_ptr that can be statically cast to a +/// Type*/shared_ptr +template +typename std::enable_if< + std::is_convertible()->get_type_info().is_castable(Type::type_info)), bool>::value, + bool>::type +is_type(Value value) { + return value->get_type_info().is_castable(Type::type_info); +} + +/// Casts a Value* to a Type* if it is of type Type, nullptr otherwise +template +typename std::enable_if(std::declval())), Type*>::value, + Type*>::type +as_type(Value value) { + return ov::is_type(value) ? static_cast(value) : nullptr; +} + +/// Casts a std::shared_ptr to a std::shared_ptr if it is of type +/// Type, nullptr otherwise +template +typename std::enable_if< + std::is_convertible(std::declval())), std::shared_ptr>::value, + std::shared_ptr>::type +as_type_ptr(Value value) { + return ov::is_type(value) ? std::static_pointer_cast(value) : std::shared_ptr(); +} +} // namespace ov + +namespace std { +template <> +struct OPENVINO_API hash { + size_t operator()(const ov::DiscreteTypeInfo& k) const; +}; +} // namespace std diff --git a/ngraph/core/src/function.cpp b/ngraph/core/src/function.cpp index c3c3375b224..88de5c26743 100644 --- a/ngraph/core/src/function.cpp +++ b/ngraph/core/src/function.cpp @@ -388,7 +388,7 @@ int64_t Function::get_parameter_index(const std::shared_ptr& para int64_t Function::get_result_index(const Output& value) const { int64_t pos = 0; - if (is_type(value.get_node_shared_ptr())) { + if (ov::is_type(value.get_node_shared_ptr())) { auto result = value.get_node_shared_ptr(); for (auto r : get_results()) { if (r == result) { diff --git a/ngraph/core/src/graph_util.cpp b/ngraph/core/src/graph_util.cpp index 81e24b087b9..f5bb89d84a0 100644 --- a/ngraph/core/src/graph_util.cpp +++ b/ngraph/core/src/graph_util.cpp @@ -366,7 +366,7 @@ std::shared_ptr ngraph::clone_function(const ngraph::Function& // get cloned function results and sinks and parameters ResultVector cloned_results; for (shared_ptr node : func.get_results()) { - auto result = as_type_ptr(node_map.at(node.get())); + auto result = ov::as_type_ptr(node_map.at(node.get())); if (!result) { throw ngraph_error("Results should be of type op::Result"); } @@ -379,7 +379,7 @@ std::shared_ptr ngraph::clone_function(const ngraph::Function& std::vector> cloned_params; for (const auto& param : func.get_parameters()) { - cloned_params.push_back(as_type_ptr(node_map.at(param.get()))); + cloned_params.push_back(ov::as_type_ptr(node_map.at(param.get()))); } // create and return cloned function @@ -392,7 +392,7 @@ std::shared_ptr ngraph::clone_function(const ngraph::Function& } bool ngraph::is_equal_to_const_value(const std::string& const_value, const Output& reduce_constant) { - if (auto rc = as_type_ptr(reduce_constant.get_node_shared_ptr())) { + if (auto rc = ov::as_type_ptr(reduce_constant.get_node_shared_ptr())) { return (rc->get_all_data_elements_bitwise_identical() && rc->convert_value_to_string(0) == const_value); } else { return false; @@ -777,17 +777,17 @@ bool ngraph::check_for_cycles(const ngraph::Function* func, ngraph::NodeVector& bool ngraph::replace_output_update_name(Output output, const Output& replacement) { bool has_result_output = false; for (auto& target_input : output.get_target_inputs()) { - if (is_type(target_input.get_node())) { + if (ov::is_type(target_input.get_node())) { // ignore trivial elimination has_result_output = true; - if (is_type(replacement.get_node())) { + if (ov::is_type(replacement.get_node())) { return false; } break; } } if (!has_result_output || replacement.get_node()->get_users().size() == 1) { - if (has_result_output && !is_type(replacement.get_node())) { + if (has_result_output && !ov::is_type(replacement.get_node())) { replacement.get_node()->set_friendly_name(output.get_node()->get_friendly_name()); // Update output tensor name replacement.get_tensor().set_name(output.get_node()->get_friendly_name()); @@ -810,8 +810,8 @@ bool ngraph::replace_output_update_name(Output output, const Output& bool ngraph::replace_node_update_name(std::shared_ptr target, std::shared_ptr replacement) { for (auto& output : target->output(0).get_target_inputs()) { - if (as_type(replacement->input_value(0).get_node()) && - as_type(output.get_node())) { + if (ov::as_type(replacement->input_value(0).get_node()) && + ov::as_type(output.get_node())) { return false; } } diff --git a/ngraph/core/src/node.cpp b/ngraph/core/src/node.cpp index cbc1c9611df..87524021306 100644 --- a/ngraph/core/src/node.cpp +++ b/ngraph/core/src/node.cpp @@ -632,7 +632,8 @@ ResultVector ngraph::as_result_vector(const OutputVector& values) { ResultVector result; for (auto value : values) { shared_ptr node = value.get_node_shared_ptr(); - result.push_back(is_type(node) ? as_type_ptr(node) : make_shared(value)); + result.push_back(ov::is_type(node) ? ov::as_type_ptr(node) + : make_shared(value)); } return result; } @@ -808,14 +809,15 @@ bool Node::constant_fold(OutputVector& output_values, const OutputVector& input_ // If all the inputs are constants, try to evaluate the outputs bool all_constants = std::all_of(input_values.begin(), input_values.end(), [](const Output& input) { - return as_type_ptr(input.get_node_shared_ptr()); + return ov::as_type_ptr(input.get_node_shared_ptr()); }); if (!all_constants) return false; HostTensorVector input_tensors; for (const auto& input : input_values) { - auto host_tensor = make_shared(as_type_ptr(input.get_node_shared_ptr())); + auto host_tensor = + make_shared(ov::as_type_ptr(input.get_node_shared_ptr())); input_tensors.push_back(host_tensor); } HostTensorVector output_tensors; diff --git a/ngraph/core/src/op/assign.cpp b/ngraph/core/src/op/assign.cpp index 5ec8f6e9b4f..3476abd5d87 100644 --- a/ngraph/core/src/op/assign.cpp +++ b/ngraph/core/src/op/assign.cpp @@ -35,7 +35,7 @@ void op::v3::Assign::validate_and_infer_types() { } auto nodes = topological_sort(start_nodes); for (const auto& node : nodes) { - if (auto read_value = as_type_ptr(node)) { + if (auto read_value = ov::as_type_ptr(node)) { if (read_value->get_variable_id() == m_variable_id) m_variable = read_value->get_variable(); } @@ -132,4 +132,4 @@ bool op::v6::Assign::has_evaluate() const { bool op::v6::Assign::constant_fold(OutputVector& output_values, const OutputVector& inputs_values) { return false; -} \ No newline at end of file +} diff --git a/ngraph/core/src/op/loop.cpp b/ngraph/core/src/op/loop.cpp index 1b9648adbb7..ededa0bb74e 100644 --- a/ngraph/core/src/op/loop.cpp +++ b/ngraph/core/src/op/loop.cpp @@ -156,7 +156,7 @@ void op::v5::Loop::validate_and_infer_types() { for (const auto& input_description : m_input_descriptions[0]) { auto index = input_description->m_input_index; - if (auto slice_input_description = as_type_ptr(input_description)) { + if (auto slice_input_description = ov::as_type_ptr(input_description)) { auto body_parameter = m_bodies[0]->get_parameters().at(slice_input_description->m_body_parameter_index); const auto& input_partial_shape = inputs().at(index).get_source_output().get_partial_shape(); if (input_partial_shape.rank().is_dynamic()) { @@ -168,7 +168,7 @@ void op::v5::Loop::validate_and_infer_types() { out_shape[axis] = slice_input_description->m_part_size; body_parameter->set_partial_shape(out_shape); } - } else if (auto merged_input_description = as_type_ptr(input_description)) { + } else if (auto merged_input_description = ov::as_type_ptr(input_description)) { auto body_value = m_bodies[0]->get_results().at(merged_input_description->m_body_value_index); auto body_parameter = m_bodies[0]->get_parameters().at(merged_input_description->m_body_parameter_index); @@ -178,7 +178,7 @@ void op::v5::Loop::validate_and_infer_types() { body_parameter->set_partial_shape(input_partial_shape); } else if (auto invariant_input_description = - as_type_ptr(input_description)) { + ov::as_type_ptr(input_description)) { auto body_parameter = m_bodies[0]->get_parameters().at(invariant_input_description->m_body_parameter_index); auto body_param_partial_shape = body_parameter->get_partial_shape(); @@ -197,7 +197,8 @@ void op::v5::Loop::validate_and_infer_types() { auto body_value = m_bodies[0]->get_results().at(output_description->m_body_value_index)->input_value(0); - if (auto concat_output_description = as_type_ptr(output_description)) { + if (auto concat_output_description = + ov::as_type_ptr(output_description)) { const auto& body_value_partial_shape = body_value.get_partial_shape(); auto out_shape = body_value_partial_shape; if (zero_number_of_iter) { @@ -219,7 +220,7 @@ void op::v5::Loop::validate_and_infer_types() { } else if (auto body_output_description = - as_type_ptr(output_description)) { + ov::as_type_ptr(output_description)) { const PartialShape& ps = body_value.get_partial_shape(); if (ps.is_dynamic()) { set_output_type(index, body_value.get_element_type(), ps); diff --git a/ngraph/core/src/op/non_max_suppression.cpp b/ngraph/core/src/op/non_max_suppression.cpp index cfc7d4a9b0a..4a02e7e6add 100644 --- a/ngraph/core/src/op/non_max_suppression.cpp +++ b/ngraph/core/src/op/non_max_suppression.cpp @@ -772,7 +772,7 @@ bool op::v5::NonMaxSuppression::is_soft_nms_sigma_constant_and_default() const { if (inputs().size() < 6 || !ngraph::op::is_constant(soft_nms_sigma_node)) { return false; } - const auto soft_nms_sigma_input = as_type_ptr(soft_nms_sigma_node); + const auto soft_nms_sigma_input = ov::as_type_ptr(soft_nms_sigma_node); return soft_nms_sigma_input->cast_vector().at(0) == 0.0f; } diff --git a/ngraph/core/src/op/parameter.cpp b/ngraph/core/src/op/parameter.cpp index ae239639a94..1c3d32ed8fb 100644 --- a/ngraph/core/src/op/parameter.cpp +++ b/ngraph/core/src/op/parameter.cpp @@ -68,7 +68,7 @@ bool AttributeAdapter::visit_attributes(AttributeVisitor& visit } visitor.on_attribute(index.str(), id); if (!m_ref[i]) { - m_ref[i] = as_type_ptr(visitor.get_registered_node(id)); + m_ref[i] = ov::as_type_ptr(visitor.get_registered_node(id)); } } return true; diff --git a/ngraph/core/src/op/result.cpp b/ngraph/core/src/op/result.cpp index 9ec4b07129e..db23b6adbd2 100644 --- a/ngraph/core/src/op/result.cpp +++ b/ngraph/core/src/op/result.cpp @@ -82,7 +82,7 @@ bool AttributeAdapter::visit_attributes(AttributeVisitor& visitor) } visitor.on_attribute(index.str(), id); if (!m_ref[i]) { - m_ref[i] = as_type_ptr(visitor.get_registered_node(id)); + m_ref[i] = ov::as_type_ptr(visitor.get_registered_node(id)); } } return true; diff --git a/ngraph/core/src/op/tensor_iterator.cpp b/ngraph/core/src/op/tensor_iterator.cpp index 16f41726faf..9669769b8bc 100644 --- a/ngraph/core/src/op/tensor_iterator.cpp +++ b/ngraph/core/src/op/tensor_iterator.cpp @@ -35,7 +35,7 @@ void op::v0::TensorIterator::revalidate_and_infer_types_for_body_ops() { while (nodes_to_do.size() > 0) { auto node = nodes_to_do.top(); if (nodes_done.count(node) == 0) { - NGRAPH_CHECK(as_type_ptr(node) == nullptr, "No nested TensorIterator"); + NGRAPH_CHECK(ov::as_type_ptr(node) == nullptr, "No nested TensorIterator"); bool can_add = true; size_t arg_count = node->get_input_size(); for (size_t i = 0; i < arg_count; ++i) { @@ -84,7 +84,7 @@ void op::v0::TensorIterator::validate_and_infer_types() { for (const auto& input_description : m_input_descriptions[0]) { auto index = input_description->m_input_index; - if (auto slice_input_description = as_type_ptr(input_description)) { + if (auto slice_input_description = ov::as_type_ptr(input_description)) { auto body_parameter = body->get_parameters().at(slice_input_description->m_body_parameter_index); auto input_partial_shape = inputs().at(index).get_source_output().get_partial_shape(); if (input_partial_shape.is_static()) { @@ -105,7 +105,7 @@ void op::v0::TensorIterator::validate_and_infer_types() { } else { body_parameter->set_partial_shape(PartialShape::dynamic(input_partial_shape.rank())); } - } else if (auto merged_input_description = as_type_ptr(input_description)) { + } else if (auto merged_input_description = ov::as_type_ptr(input_description)) { auto body_value = m_bodies[0]->get_results().at(merged_input_description->m_body_value_index)->input(0); ends.push_back(body_value.get_node()->shared_from_this()); @@ -114,7 +114,7 @@ void op::v0::TensorIterator::validate_and_infer_types() { auto body_param_partial_shape = body_parameter->get_partial_shape(); auto input_partial_shape = inputs().at(index).get_source_output().get_partial_shape(); body_parameter->set_partial_shape(input_partial_shape); - } else if (auto invariant_input_description = as_type_ptr(input_description)) { + } else if (auto invariant_input_description = ov::as_type_ptr(input_description)) { auto body_parameter = m_bodies[0]->get_parameters().at(invariant_input_description->m_body_parameter_index); auto body_param_partial_shape = body_parameter->get_partial_shape(); @@ -134,7 +134,7 @@ void op::v0::TensorIterator::validate_and_infer_types() { auto body_value = m_bodies[0]->get_results().at(output_description->m_body_value_index)->input_value(0); - if (auto concat_output_description = as_type_ptr(output_description)) { + if (auto concat_output_description = ov::as_type_ptr(output_description)) { auto body_value_partial_shape = body_value.get_partial_shape(); set_output_type(index, body_value.get_element_type(), PartialShape::dynamic()); if (body_value_partial_shape.is_static()) { @@ -164,7 +164,7 @@ void op::v0::TensorIterator::validate_and_infer_types() { body_value.get_element_type(), PartialShape::dynamic(body_value.get_partial_shape().rank())); } - } else if (auto body_output_description = as_type_ptr(output_description)) { + } else if (auto body_output_description = ov::as_type_ptr(output_description)) { set_output_type(index, body_value.get_element_type(), body_value.get_partial_shape()); } } @@ -178,7 +178,7 @@ namespace { template bool has_slice_input_desc(const Desc& desc) { const auto is_slice_input_desc = +[](typename Desc::const_reference d) { - return is_type(d); + return ov::is_type(d); }; return std::any_of(begin(desc), end(desc), is_slice_input_desc); } @@ -190,7 +190,7 @@ void op::v0::TensorIterator::try_to_set_num_iterations_if_no_slice_inputs() { } for (const auto& output_description : m_output_descriptions[0]) { - if (auto concat = as_type_ptr(output_description)) { + if (auto concat = ov::as_type_ptr(output_description)) { m_num_iterations = ((std::abs(concat->m_end - concat->m_start)) / concat->m_part_size); break; } diff --git a/ngraph/core/src/op/topk.cpp b/ngraph/core/src/op/topk.cpp index a4ee8004da6..e11457eca43 100644 --- a/ngraph/core/src/op/topk.cpp +++ b/ngraph/core/src/op/topk.cpp @@ -281,7 +281,7 @@ size_t op::v1::TopK::read_k_from_constant_node(const shared_ptr& node, k_element_type, ")."); - const auto k_constant = as_type_ptr(node); + const auto k_constant = ov::as_type_ptr(node); size_t k = 0; @@ -471,7 +471,7 @@ void op::v3::TopK::validate_and_infer_types() { size_t op::v3::TopK::read_k_from_constant_node(const shared_ptr& node, const element::Type& k_element_type) const { - const auto k_constant = as_type_ptr(node); + const auto k_constant = ov::as_type_ptr(node); size_t k = 0; diff --git a/ngraph/core/src/op/util/arithmetic_reduction.cpp b/ngraph/core/src/op/util/arithmetic_reduction.cpp index 66fd64912c2..40b15103990 100644 --- a/ngraph/core/src/op/util/arithmetic_reduction.cpp +++ b/ngraph/core/src/op/util/arithmetic_reduction.cpp @@ -19,7 +19,7 @@ op::util::ArithmeticReduction::ArithmeticReduction(const Output& arg, cons : ReductionBase(arg, reduction_axes) {} bool op::util::ArithmeticReduction::reduction_axes_constant() const { - return is_type(input_value(1).get_node()); + return ov::is_type(input_value(1).get_node()); } const AxisSet op::util::ArithmeticReduction::get_reduction_axes() const { diff --git a/ngraph/core/src/op/util/broadcast_base.cpp b/ngraph/core/src/op/util/broadcast_base.cpp index 476882b59c1..11d088e0bc0 100644 --- a/ngraph/core/src/op/util/broadcast_base.cpp +++ b/ngraph/core/src/op/util/broadcast_base.cpp @@ -193,7 +193,7 @@ void op::util::BroadcastBase::validate_and_infer_types() { PartialShape output_shape; bool output_shape_defined = evaluate_as_partial_shape(get_input_source_output(1), output_shape); - if (auto concat = as_type_ptr(input_value(1).get_node_shared_ptr())) { + if (auto concat = ov::as_type_ptr(input_value(1).get_node_shared_ptr())) { auto concat_inputs = concat->inputs(); if (!output_shape_defined && concat->get_output_partial_shape(0).is_static() && @@ -201,7 +201,7 @@ void op::util::BroadcastBase::validate_and_infer_types() { auto output_partial_shape = vector{}; for (const auto& concat_input : concat_inputs) { auto source_node_ptr = concat_input.get_source_output().get_node_shared_ptr(); - if (auto source_const_ptr = as_type_ptr(source_node_ptr)) { + if (auto source_const_ptr = ov::as_type_ptr(source_node_ptr)) { output_partial_shape.push_back(source_const_ptr->get_axis_vector_val()[0]); } else { output_partial_shape.push_back(Dimension::dynamic()); @@ -428,7 +428,7 @@ bool op::util::BroadcastBase::evaluate_broadcast(const HostTensorPtr& arg0, Shape op::util::BroadcastBase::get_target_shape(const HostTensorPtr& input1) const { Shape target_shape; - const auto shape_constant = as_type_ptr(input_value(1).get_node_shared_ptr()); + const auto shape_constant = ov::as_type_ptr(input_value(1).get_node_shared_ptr()); if (shape_constant) { target_shape = shape_constant->get_shape_val(); } else { @@ -450,7 +450,7 @@ bool op::util::BroadcastBase::evaluate(const HostTensorVector& outputs, const Ho if (m_mode.m_type == BroadcastType::NONE) { AxisVector axes_mapping_val; - const auto axes_mapping_constant = as_type_ptr(input_value(2).get_node_shared_ptr()); + const auto axes_mapping_constant = ov::as_type_ptr(input_value(2).get_node_shared_ptr()); if (axes_mapping_constant) { axes_mapping_val = axes_mapping_constant->get_axis_vector_val(); } else { diff --git a/ngraph/core/src/op/util/fft_base.cpp b/ngraph/core/src/op/util/fft_base.cpp index cc02453f25c..e1cc47e8d01 100644 --- a/ngraph/core/src/op/util/fft_base.cpp +++ b/ngraph/core/src/op/util/fft_base.cpp @@ -73,7 +73,7 @@ void op::util::FFTBase::validate() { axes_shape.to_shape()[0]); } - if (input_shape.rank().is_static() && is_type(input_value(1).get_node())) { + if (input_shape.rank().is_static() && ov::is_type(input_value(1).get_node())) { const auto input_rank = input_shape.rank().get_length(); const auto& const_axes = get_constant_from_source(input_value(1)); auto axes = const_axes->cast_vector(); @@ -146,7 +146,7 @@ void op::util::FFTBase::validate_and_infer_types() { const auto input_rank = input_shape.rank().get_length(); - if (axes_shape.rank().is_dynamic() || !is_type(input_value(1).get_node())) { + if (axes_shape.rank().is_dynamic() || !ov::is_type(input_value(1).get_node())) { for (int64_t i = 0; i < input_rank - 1; ++i) { output_shape[i] = Dimension::dynamic(); } @@ -179,7 +179,7 @@ void op::util::FFTBase::validate_and_infer_types() { } } - if (!is_type(input_value(2).get_node())) { + if (!ov::is_type(input_value(2).get_node())) { for (int64_t axis : axes) { output_shape[axis] = Dimension::dynamic(); } diff --git a/ngraph/core/src/pass/constant_folding.cpp b/ngraph/core/src/pass/constant_folding.cpp index 311cb121269..f9818321d1f 100644 --- a/ngraph/core/src/pass/constant_folding.cpp +++ b/ngraph/core/src/pass/constant_folding.cpp @@ -79,7 +79,7 @@ bool ngraph::pass::ConstantFolding::pre_calculated_values_folding(const std::sha while (!nodes.empty()) { auto curr_node = nodes.front(); nodes.pop_front(); - if (visited.count(curr_node) || is_type(curr_node)) + if (visited.count(curr_node) || ov::is_type(curr_node)) continue; visited.insert(curr_node); @@ -100,7 +100,7 @@ bool ngraph::pass::ConstantFolding::pre_calculated_values_folding(const std::sha if (status && input_value.get_tensor().has_and_set_bound()) { auto input_node = input_value.get_node_shared_ptr(); auto replacement = std::make_shared(input_value.get_tensor().get_lower_value()); - if (replacement && !is_type(input_node)) { + if (replacement && !ov::is_type(input_node)) { if (input_node->get_output_size() == 1) { replacement->set_friendly_name(input_node->get_friendly_name()); } else { diff --git a/ngraph/core/src/pass/convert_precision.cpp b/ngraph/core/src/pass/convert_precision.cpp index a638aef5152..6f8c0997b91 100644 --- a/ngraph/core/src/pass/convert_precision.cpp +++ b/ngraph/core/src/pass/convert_precision.cpp @@ -293,7 +293,7 @@ bool ngraph::pass::ConvertPrecision::run_on_function(std::shared_ptr& node, element::Type to, size_t idx) { - if (auto shapeof = as_type_ptr(node)) { + if (auto shapeof = ov::as_type_ptr(node)) { if (to == element::i32 || to == element::i64) { shapeof->set_output_type(to); return true; @@ -303,7 +303,7 @@ bool fuse_type_to_shapeof(const std::shared_ptr& node, element::Ty } bool fuse_type_to_range_v4(const std::shared_ptr& node, element::Type to, size_t idx) { - if (auto range = as_type_ptr(node)) { + if (auto range = ov::as_type_ptr(node)) { if (to.is_integral_number() || to.is_real()) { range->set_output_type(to); return true; @@ -313,7 +313,7 @@ bool fuse_type_to_range_v4(const std::shared_ptr& node, element::T } bool fuse_type_to_parameter(const std::shared_ptr& node, element::Type to, size_t idx) { - if (auto param = as_type_ptr(node)) { + if (auto param = ov::as_type_ptr(node)) { param->set_element_type(to); param->validate_and_infer_types(); return true; @@ -322,7 +322,7 @@ bool fuse_type_to_parameter(const std::shared_ptr& node, element:: } bool fuse_type_to_convert(const std::shared_ptr& node, element::Type to, size_t idx) { - if (auto convert = as_type_ptr(node)) { + if (auto convert = ov::as_type_ptr(node)) { convert->set_convert_element_type(to); return true; } @@ -330,7 +330,7 @@ bool fuse_type_to_convert(const std::shared_ptr& node, element::Ty } bool fuse_type_to_nms3(const std::shared_ptr& node, ngraph::element::Type to, size_t idx) { - if (auto nms = as_type_ptr(node)) { + if (auto nms = ov::as_type_ptr(node)) { nms->set_output_type(to); return true; } @@ -338,7 +338,7 @@ bool fuse_type_to_nms3(const std::shared_ptr& node, ngraph::elemen } bool fuse_type_to_nms4(const std::shared_ptr& node, ngraph::element::Type to, size_t idx) { - if (auto nms = as_type_ptr(node)) { + if (auto nms = ov::as_type_ptr(node)) { nms->set_output_type(to); return true; } @@ -346,7 +346,7 @@ bool fuse_type_to_nms4(const std::shared_ptr& node, ngraph::elemen } bool fuse_type_to_nms5(const std::shared_ptr& node, ngraph::element::Type to, size_t idx) { - if (auto nms = as_type_ptr(node)) { + if (auto nms = ov::as_type_ptr(node)) { nms->set_output_type(to); return true; } @@ -354,7 +354,7 @@ bool fuse_type_to_nms5(const std::shared_ptr& node, ngraph::elemen } bool fuse_type_to_topk(const std::shared_ptr& node, ngraph::element::Type to, size_t idx) { - if (auto topk = as_type_ptr(node)) { + if (auto topk = ov::as_type_ptr(node)) { if (idx == 1 && (to == element::i32 || to == element::i64)) { topk->set_index_element_type(to); return true; @@ -366,7 +366,7 @@ bool fuse_type_to_topk(const std::shared_ptr& node, ngraph::elemen bool fuse_type_to_ctc_greedy_decoder_seq_len(const std::shared_ptr& node, ngraph::element::Type to, size_t idx) { - if (auto ctc_decoder = as_type_ptr(node)) { + if (auto ctc_decoder = ov::as_type_ptr(node)) { if (idx == 0 && (to == element::i32 || to == element::i64)) { ctc_decoder->set_classes_index_type(to); return true; @@ -380,7 +380,7 @@ bool fuse_type_to_ctc_greedy_decoder_seq_len(const std::shared_ptr } bool fuse_type_to_nonzero(const std::shared_ptr& node, ngraph::element::Type to, size_t idx) { - if (auto nonzero = as_type_ptr(node)) { + if (auto nonzero = ov::as_type_ptr(node)) { if (to == element::i32 || to == element::i64) { nonzero->set_output_type(to); return true; @@ -390,7 +390,7 @@ bool fuse_type_to_nonzero(const std::shared_ptr& node, ngraph::ele } bool fuse_type_to_bucketize(const std::shared_ptr& node, ngraph::element::Type to, size_t idx) { - if (auto b = as_type_ptr(node)) { + if (auto b = ov::as_type_ptr(node)) { if (to == element::i32 || to == element::i64) { b->set_output_type(to); return true; @@ -682,7 +682,7 @@ std::shared_ptr convert_low_precisions_int(std::shared_ptr& node, element::Type to, const std::vector>& consumers) { - if (auto constant = as_type_ptr(node)) { + if (auto constant = ov::as_type_ptr(node)) { auto from = constant->get_element_type(); std::shared_ptr new_const; if (from == element::u64 && to == element::i32) { diff --git a/ngraph/core/src/pass/visualize_tree.cpp b/ngraph/core/src/pass/visualize_tree.cpp index ae0f30ba2d1..1c1040eb21e 100644 --- a/ngraph/core/src/pass/visualize_tree.cpp +++ b/ngraph/core/src/pass/visualize_tree.cpp @@ -227,7 +227,7 @@ void pass::VisualizeTree::add_node_arguments(shared_ptr node, for (auto input_value : node->input_values()) { auto arg = input_value.get_node_shared_ptr(); size_t jump_distance = height_maps[arg.get()].max_jump_to(height_maps[node.get()]); - if (is_type(arg) || is_type(arg)) { + if (ov::is_type(arg) || ov::is_type(arg)) { auto clone_name = "CLONE_" + to_string(fake_node_ctr); auto color = string("color=\"") + (arg->description() == "Parameter" ? "blue" : "black") + string("\""); std::vector attributes{"shape=\"box\"", @@ -383,7 +383,7 @@ std::string pass::VisualizeTree::get_constant_value(std::shared_ptr node, return ss.str(); ss << "\nvalue: "; - const auto constant = as_type_ptr(node); + const auto constant = ov::as_type_ptr(node); switch (constant->get_output_element_type(0)) { case element::Type_t::undefined: ss << "[ undefined value ]"; diff --git a/ngraph/core/src/specialize_function.cpp b/ngraph/core/src/specialize_function.cpp index 418b3434129..3a01a3de00d 100644 --- a/ngraph/core/src/specialize_function.cpp +++ b/ngraph/core/src/specialize_function.cpp @@ -72,7 +72,7 @@ std::shared_ptr ngraph::specialize_function(std::shared_ptr ParameterVector new_parameters = f->get_parameters(); for (size_t i = 0; i < new_parameters.size(); i++) { auto name = new_parameters[i]->get_friendly_name(); - new_parameters[i] = as_type_ptr(m[new_parameters[i].get()]); + new_parameters[i] = ov::as_type_ptr(m[new_parameters[i].get()]); // If the replacement for a Parameter is not itself a Parameter, we must have replaced it // with a constant. We will insert a dead Parameter into the clone's parameters, in order diff --git a/ngraph/core/src/validation_util.cpp b/ngraph/core/src/validation_util.cpp index 70741cf84cf..c5585e87013 100644 --- a/ngraph/core/src/validation_util.cpp +++ b/ngraph/core/src/validation_util.cpp @@ -966,7 +966,7 @@ struct MaxValue { vector exec_constant(Node* node, vector& inputs) { auto result = MaxValue(); - auto op = as_type(node); + auto op = ov::as_type(node); auto element_type = op->get_output_element_type(0); if (element_type.is_integral()) { uint64_t max_val = 0; @@ -1024,7 +1024,7 @@ vector exec_minimum(Node* node, vector& inputs) { } vector exec_concat(Node* node, vector& inputs) { - auto op = as_type(node); + auto op = ov::as_type(node); vector slice_maxen; for (auto input : inputs) { slice_maxen.push_back(input.m_value); @@ -1036,7 +1036,7 @@ vector exec_concat(Node* node, vector& inputs) { vector exec_reduce_min(Node* node, vector& inputs) { auto data = inputs.at(0); if (data.m_slice_axis >= 0 && data.m_slices.size() > 1) { - if (auto indices_const = as_type(node->get_input_node_ptr(1))) { + if (auto indices_const = ov::as_type(node->get_input_node_ptr(1))) { if (indices_const->get_output_element_type(0).is_integral()) { auto indices_shape = indices_const->get_output_shape(0); if (indices_shape == Shape{1}) { @@ -1068,10 +1068,10 @@ vector exec_shape_of(Node* node, vector& inputs) { } vector exec_gather(Node* node, vector& inputs) { - auto gather = as_type(node); + auto gather = ov::as_type(node); - const auto& indices = as_type_ptr(node->input_value(1).get_node_shared_ptr()); - const auto& axis = as_type_ptr(node->input_value(2).get_node_shared_ptr()); + const auto& indices = ov::as_type_ptr(node->input_value(1).get_node_shared_ptr()); + const auto& axis = ov::as_type_ptr(node->input_value(2).get_node_shared_ptr()); if (!indices || !axis) { return {MaxValue()}; @@ -1150,9 +1150,9 @@ bool ngraph::could_propagate(const Output& output, std::vector& ord auto current_node = nodes_to_calculate.front(); nodes_to_calculate.pop_front(); - if (current_node->inputs().empty() && !is_type(current_node)) + if (current_node->inputs().empty() && !ov::is_type(current_node)) status = false; - else if (!is_type(current_node) && !is_type(current_node)) { + else if (!ov::is_type(current_node) && !ov::is_type(current_node)) { // not a leaf, not a shape_of -- continue to search for (const auto& input_value : current_node->input_values()) { const auto& input_node = input_value.get_node(); @@ -1482,7 +1482,7 @@ bool ngraph::host_tensor_is_positive(const HostTensorPtr& bound) { const auto axes = op::Constant::create(element::i64, {axes_vector.size()}, axes_vector); OutputVector all(1); folded = std::make_shared(greater[0], axes)->constant_fold(all, {greater[0], axes}); - NGRAPH_CHECK(folded && is_type(all[0].get_node_shared_ptr())); + NGRAPH_CHECK(folded && ov::is_type(all[0].get_node_shared_ptr())); const auto result = std::dynamic_pointer_cast(all[0].get_node_shared_ptr())->cast_vector(); NGRAPH_CHECK(all[0].get_shape() == Shape{}); return result[0]; @@ -1499,7 +1499,7 @@ bool ngraph::has_and_set_equal_bounds(const Output& source) { shared_ptr ngraph::get_constant_from_source(const Output& source) { if (!has_and_set_equal_bounds(source)) return nullptr; - if (const auto& c = as_type_ptr(source.get_node_shared_ptr())) + if (const auto& c = ov::as_type_ptr(source.get_node_shared_ptr())) return c; return std::make_shared(source.get_tensor().get_upper_value()); } diff --git a/ngraph/frontend/onnx/frontend/src/frontend.cpp b/ngraph/frontend/onnx/frontend/src/frontend.cpp index e433aa44e60..8ec3fdf9525 100644 --- a/ngraph/frontend/onnx/frontend/src/frontend.cpp +++ b/ngraph/frontend/onnx/frontend/src/frontend.cpp @@ -28,9 +28,9 @@ extern "C" ONNX_FRONTEND_API void* GetFrontEndData() { InputModel::Ptr FrontEndONNX::load_impl(const std::vector>& variants) const { NGRAPH_CHECK(variants.size() == 1, "Only one parameter to load function is expected. Got " + std::to_string(variants.size())); - NGRAPH_CHECK(is_type>(variants[0]), + NGRAPH_CHECK(ov::is_type>(variants[0]), "Parameter to load function need to be a std::string"); - auto path = as_type_ptr>(variants[0])->get(); + auto path = ov::as_type_ptr>(variants[0])->get(); return std::make_shared(path); } diff --git a/ngraph/frontend/onnx/frontend/src/op/dropout.cpp b/ngraph/frontend/onnx/frontend/src/op/dropout.cpp index 259d32f5739..71c10d0ca89 100644 --- a/ngraph/frontend/onnx/frontend/src/op/dropout.cpp +++ b/ngraph/frontend/onnx/frontend/src/op/dropout.cpp @@ -44,7 +44,7 @@ OutputVector dropout(const Node& node) { ngraph::op::is_constant(ng_inputs.at(2).get_node_shared_ptr()), "Non-constant training_mode input is not supported."); training_mode = - as_type_ptr(ng_inputs.at(2).get_node_shared_ptr())->cast_vector()[0]; + ov::as_type_ptr(ng_inputs.at(2).get_node_shared_ptr())->cast_vector()[0]; } return build_dropout(node, training_mode); } diff --git a/ngraph/frontend/onnx/frontend/src/op/loop.cpp b/ngraph/frontend/onnx/frontend/src/op/loop.cpp index 5001b64e8f3..430fb85b085 100644 --- a/ngraph/frontend/onnx/frontend/src/op/loop.cpp +++ b/ngraph/frontend/onnx/frontend/src/op/loop.cpp @@ -38,10 +38,10 @@ bool is_termination_condition_always_true(const Output& body_out_c // value of loop_cond - true // Identity op for boolean value is represented by LogicalOr op whose second // input is always false - if (is_type(body_out_cond.get_node_shared_ptr())) { + if (ov::is_type(body_out_cond.get_node_shared_ptr())) { const auto second_input = body_out_cond.get_node_shared_ptr()->input_value(1).get_node_shared_ptr(); if (ngraph::op::is_constant(second_input) && second_input->get_element_type() == element::boolean && - as_type_ptr(second_input)->cast_vector().at(0) == false) { + ov::as_type_ptr(second_input)->cast_vector().at(0) == false) { return true; } } @@ -74,7 +74,7 @@ OutputVector loop(const Node& node) { // trip count skipped or has value max(int64_t) means infinitive loop if (ngraph::op::is_null(ng_inputs.at(0)) || (ngraph::op::is_constant(ng_inputs.at(0).get_node_shared_ptr()) && - as_type_ptr(ng_inputs.at(0).get_node_shared_ptr())->cast_vector()[0] == + ov::as_type_ptr(ng_inputs.at(0).get_node_shared_ptr())->cast_vector()[0] == std::numeric_limits::max())) { // -1 means infinite Loop trip_count = ngraph::op::Constant::create(ngraph::element::i64, {1}, {-1}); @@ -87,8 +87,8 @@ OutputVector loop(const Node& node) { { termination_cond = ngraph::op::Constant::create(ngraph::element::boolean, {1}, {true}); } else if (ngraph::op::is_constant(ng_inputs.at(1).get_node_shared_ptr()) && - as_type_ptr(ng_inputs.at(1).get_node_shared_ptr())->cast_vector()[0] == - false) { + ov::as_type_ptr(ng_inputs.at(1).get_node_shared_ptr()) + ->cast_vector()[0] == false) { // no iteration is performed so initial values are returned OutputVector node_outputs; // final values diff --git a/ngraph/frontend/paddlepaddle/src/frontend.cpp b/ngraph/frontend/paddlepaddle/src/frontend.cpp index 2cd3e2a210a..0ef30903455 100644 --- a/ngraph/frontend/paddlepaddle/src/frontend.cpp +++ b/ngraph/frontend/paddlepaddle/src/frontend.cpp @@ -109,15 +109,15 @@ bool normalize_framework_node(const std::shared_ptr& node, } std::istream* variant_to_stream_ptr(const std::shared_ptr& variant, std::ifstream& ext_stream) { - if (is_type>(variant)) { - return as_type_ptr>(variant)->get(); - } else if (is_type>(variant)) { - const auto& model_path = as_type_ptr>(variant)->get(); + if (ov::is_type>(variant)) { + return ov::as_type_ptr>(variant)->get(); + } else if (ov::is_type>(variant)) { + const auto& model_path = ov::as_type_ptr>(variant)->get(); ext_stream.open(model_path, std::ios::in | std::ifstream::binary); } #if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) - else if (is_type>(variant)) { - const auto& model_path = as_type_ptr>(variant)->get(); + else if (ov::is_type>(variant)) { + const auto& model_path = ov::as_type_ptr>(variant)->get(); ext_stream.open(model_path, std::ios::in | std::ifstream::binary); } #endif @@ -201,9 +201,9 @@ bool FrontEndPDPD::supported_impl(const std::vector>& v return false; // Validating first path, it must contain a model - if (is_type>(variants[0])) { + if (ov::is_type>(variants[0])) { std::string suffix = ".pdmodel"; - std::string model_path = as_type_ptr>(variants[0])->get(); + std::string model_path = ov::as_type_ptr>(variants[0])->get(); if (!pdpd::endsWith(model_path, suffix)) { model_path += pdpd::get_path_sep() + "__model__"; } @@ -213,9 +213,9 @@ bool FrontEndPDPD::supported_impl(const std::vector>& v return model_str && model_str.is_open(); } #if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) - else if (is_type>(variants[0])) { + else if (ov::is_type>(variants[0])) { std::wstring suffix = L".pdmodel"; - std::wstring model_path = as_type_ptr>(variants[0])->get(); + std::wstring model_path = ov::as_type_ptr>(variants[0])->get(); if (!pdpd::endsWith(model_path, suffix)) { model_path += pdpd::get_path_sep() + L"__model__"; } @@ -225,9 +225,9 @@ bool FrontEndPDPD::supported_impl(const std::vector>& v return model_str && model_str.is_open(); } #endif - else if (is_type>(variants[0])) { + else if (ov::is_type>(variants[0])) { // Validating first stream, it must contain a model - auto p_model_stream = as_type_ptr>(variants[0])->get(); + auto p_model_stream = ov::as_type_ptr>(variants[0])->get(); paddle::framework::proto::ProgramDesc fw; return fw.ParseFromIstream(p_model_stream); } @@ -237,20 +237,20 @@ bool FrontEndPDPD::supported_impl(const std::vector>& v InputModel::Ptr FrontEndPDPD::load_impl(const std::vector>& variants) const { if (variants.size() == 1) { // The case when folder with __model__ and weight files is provided or .pdmodel file - if (is_type>(variants[0])) { - std::string m_path = as_type_ptr>(variants[0])->get(); + if (ov::is_type>(variants[0])) { + std::string m_path = ov::as_type_ptr>(variants[0])->get(); return std::make_shared(m_path); } #if defined(ENABLE_UNICODE_PATH_SUPPORT) && defined(_WIN32) - else if (is_type>(variants[0])) { - std::wstring m_path = as_type_ptr>(variants[0])->get(); + else if (ov::is_type>(variants[0])) { + std::wstring m_path = ov::as_type_ptr>(variants[0])->get(); return std::make_shared(m_path); } #endif // The case with only model stream provided and no weights. This means model has // no learnable weights - else if (is_type>(variants[0])) { - auto p_model_stream = as_type_ptr>(variants[0])->get(); + else if (ov::is_type>(variants[0])) { + auto p_model_stream = ov::as_type_ptr>(variants[0])->get(); return std::make_shared(std::vector{p_model_stream}); } } else if (variants.size() == 2) { @@ -279,7 +279,7 @@ std::shared_ptr FrontEndPDPD::convert(InputModel::Ptr model) c void FrontEndPDPD::convert(std::shared_ptr partiallyConverted) const { for (const auto& node : partiallyConverted->get_ordered_ops()) { - if (is_type(node)) { + if (ov::is_type(node)) { pdpd::normalize_framework_node(std::dynamic_pointer_cast(node), pdpd::get_supported_ops()); } @@ -330,4 +330,4 @@ extern "C" PDPD_API void* GetFrontEndData() { return std::make_shared(); }; return res; -} \ No newline at end of file +} diff --git a/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp b/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp index 28e402a92a7..5e089770ec2 100644 --- a/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp +++ b/ngraph/python/tests/mock/mock_py_ngraph_frontend/mock_py_frontend.hpp @@ -556,15 +556,15 @@ public: FrontEndMockPy() {} InputModel::Ptr load_impl(const std::vector>& params) const override { - if (params.size() > 0 && is_type>(params[0])) - m_stat.m_load_paths.push_back(as_type_ptr>(params[0])->get()); + if (params.size() > 0 && ov::is_type>(params[0])) + m_stat.m_load_paths.push_back(ov::as_type_ptr>(params[0])->get()); return std::make_shared(); } bool supported_impl(const std::vector>& params) const override { m_stat.m_supported++; - if (params.size() > 0 && is_type>(params[0])) { - auto path = as_type_ptr>(params[0])->get(); + if (params.size() > 0 && ov::is_type>(params[0])) { + auto path = ov::as_type_ptr>(params[0])->get(); if (path.find(".test_mock_py_mdl") != std::string::npos) { return true; } diff --git a/ngraph/test/builder_autobroadcast.cpp b/ngraph/test/builder_autobroadcast.cpp index 86ac0ed44d1..48c04a7b65c 100644 --- a/ngraph/test/builder_autobroadcast.cpp +++ b/ngraph/test/builder_autobroadcast.cpp @@ -321,7 +321,7 @@ TEST(autobroadcast, axes_mapping_from_bcast_axes) { auto axes_mapping = builder::opset1::get_axes_mapping_output(output_shape, broadcast_axes); EXPECT_TRUE(op::is_constant(axes_mapping.get_node())); - Shape axes_mapping_shape = as_type(axes_mapping.get_node())->get_shape_val(); + Shape axes_mapping_shape = ov::as_type(axes_mapping.get_node())->get_shape_val(); EXPECT_EQ(axes_mapping_shape.size(), 2); EXPECT_EQ(axes_mapping_shape, (Shape{1, 3})); } @@ -333,7 +333,7 @@ TEST(autobroadcast, axes_mapping_from_bcast_axes_scalar) { auto axes_mapping = builder::opset1::get_axes_mapping_output(output_shape, broadcast_axes); EXPECT_TRUE(op::is_constant(axes_mapping.get_node())); - Shape axes_mapping_shape = as_type(axes_mapping.get_node())->get_shape_val(); + Shape axes_mapping_shape = ov::as_type(axes_mapping.get_node())->get_shape_val(); EXPECT_EQ(axes_mapping_shape.size(), 0); EXPECT_EQ(axes_mapping_shape, (Shape{})); } @@ -345,7 +345,7 @@ TEST(autobroadcast, axes_mapping_from_bcast_axes_identical) { auto axes_mapping = builder::opset1::get_axes_mapping_output(output_shape, broadcast_axes); EXPECT_TRUE(op::is_constant(axes_mapping.get_node())); - Shape axes_mapping_shape = as_type(axes_mapping.get_node())->get_shape_val(); + Shape axes_mapping_shape = ov::as_type(axes_mapping.get_node())->get_shape_val(); EXPECT_EQ(axes_mapping_shape.size(), output_shape.size()); EXPECT_EQ(axes_mapping_shape, (Shape{0, 1, 2, 3})); } @@ -357,7 +357,7 @@ TEST(autobroadcast, axes_mapping_start_match_axis) { auto axes_mapping = builder::opset1::get_axes_mapping_output(output_shape, input_shape, start_match_axis); EXPECT_TRUE(op::is_constant(axes_mapping.get_node())); - Shape axes_mapping_shape = as_type(axes_mapping.get_node())->get_shape_val(); + Shape axes_mapping_shape = ov::as_type(axes_mapping.get_node())->get_shape_val(); EXPECT_EQ(axes_mapping_shape.size(), 2); EXPECT_EQ(axes_mapping_shape, (Shape{1, 2})); } @@ -369,7 +369,7 @@ TEST(autobroadcast, axes_mapping_start_match_axis_scalar) { auto axes_mapping = builder::opset1::get_axes_mapping_output(output_shape, input_shape, start_match_axis); EXPECT_TRUE(op::is_constant(axes_mapping.get_node())); - Shape axes_mapping_shape = as_type(axes_mapping.get_node())->get_shape_val(); + Shape axes_mapping_shape = ov::as_type(axes_mapping.get_node())->get_shape_val(); EXPECT_EQ(axes_mapping_shape.size(), 0); EXPECT_EQ(axes_mapping_shape, (Shape{})); } @@ -381,7 +381,7 @@ TEST(autobroadcast, axes_mapping_start_match_axis_identical) { auto axes_mapping = builder::opset1::get_axes_mapping_output(output_shape, input_shape, start_match_axis); EXPECT_TRUE(op::is_constant(axes_mapping.get_node())); - Shape axes_mapping_shape = as_type(axes_mapping.get_node())->get_shape_val(); + Shape axes_mapping_shape = ov::as_type(axes_mapping.get_node())->get_shape_val(); EXPECT_EQ(axes_mapping_shape.size(), output_shape.rank().get_length()); EXPECT_EQ(axes_mapping_shape, (Shape{0, 1, 2, 3})); } diff --git a/ngraph/test/constant_folding.cpp b/ngraph/test/constant_folding.cpp index 2daab19e497..2881549eaff 100644 --- a/ngraph/test/constant_folding.cpp +++ b/ngraph/test/constant_folding.cpp @@ -16,7 +16,7 @@ using namespace std; template static std::vector get_result_constant(std::shared_ptr f, size_t pos) { - auto new_const = as_type_ptr(f->get_results().at(pos)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(pos)->input_value(0).get_node_shared_ptr()); return new_const->cast_vector(); } @@ -55,7 +55,7 @@ TEST(constant_folding, acosh) { EXPECT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(f->get_results().size(), 1); - auto new_const = as_type_ptr(f->get_results()[0]->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results()[0]->input_value(0).get_node_shared_ptr()); EXPECT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); @@ -84,7 +84,7 @@ TEST(constant_folding, asinh) { EXPECT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(f->get_results().size(), 1); - auto new_const = as_type_ptr(f->get_results()[0]->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results()[0]->input_value(0).get_node_shared_ptr()); EXPECT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); @@ -113,7 +113,7 @@ TEST(constant_folding, atanh) { EXPECT_EQ(count_ops_of_type(f), 1); ASSERT_EQ(f->get_results().size(), 1); - auto new_const = as_type_ptr(f->get_results()[0]->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results()[0]->input_value(0).get_node_shared_ptr()); EXPECT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); @@ -141,7 +141,7 @@ TEST(constant_folding, constant_squeeze) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_shape(), shape_out); @@ -170,7 +170,7 @@ TEST(constant_folding, constant_unsqueeze) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_shape(), shape_out); @@ -197,7 +197,7 @@ TEST(constant_folding, constant_broadcast_v1) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -222,7 +222,7 @@ TEST(constant_folding, constant_broadcast_v1_with_target_shape) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -247,7 +247,7 @@ TEST(constant_folding, constant_broadcast_v1_numpy) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -409,7 +409,7 @@ static void test_const_convert(const vector& values_in, const vector& valu ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_output_element_type(0), element::from()); @@ -461,7 +461,7 @@ TEST(constant_folding, shape_of_v0) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_output_element_type(0), element::i64); @@ -485,7 +485,7 @@ TEST(constant_folding, shape_of_v3) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_output_element_type(0), element::i64); @@ -509,7 +509,7 @@ TEST(constant_folding, shape_of_i32_v3) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_output_element_type(0), element::i32); @@ -675,7 +675,7 @@ void const_reverse(const element::Type& axes_elem_type) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -717,7 +717,7 @@ TEST(constant_folding, const_reduceprod) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_shape(), output_shape); @@ -749,7 +749,7 @@ TEST(constant_folding, const_reduceprod_keepdims) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_shape(), output_shape); @@ -781,7 +781,7 @@ TEST(constant_folding, const_reducesum) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_shape(), output_shape); @@ -813,7 +813,7 @@ TEST(constant_folding, const_reducesum_keepdims) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_shape(), output_shape); @@ -845,7 +845,7 @@ TEST(constant_folding, const_reducemax) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_shape(), output_shape); @@ -877,7 +877,7 @@ TEST(constant_folding, const_reducemax_keepdims) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_shape(), output_shape); @@ -909,7 +909,7 @@ TEST(constant_folding, const_reducemin) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_shape(), output_shape); @@ -941,7 +941,7 @@ TEST(constant_folding, const_reducemin_keepdims) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_shape(), output_shape); @@ -973,7 +973,7 @@ TEST(constant_folding, const_reducemean) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_shape(), output_shape); @@ -1005,7 +1005,7 @@ TEST(constant_folding, const_reducemean_keepdims) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(new_const->get_shape(), output_shape); @@ -1034,7 +1034,7 @@ TEST(constant_folding, const_reduce_logical_and__no_keepdims) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); @@ -1065,7 +1065,7 @@ TEST(constant_folding, const_reduce_logical_and__keepdims) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); @@ -1098,7 +1098,7 @@ TEST(constant_folding, const_reduce_logical_and__keepdims_3d) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); @@ -1129,7 +1129,7 @@ TEST(constant_folding, const_reduce_logical_or__no_keepdims) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); @@ -1157,7 +1157,7 @@ TEST(constant_folding, const_concat) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1181,7 +1181,7 @@ TEST(constant_folding, const_concat_3d_single_elem) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); @@ -1208,7 +1208,7 @@ TEST(constant_folding, const_concat_axis_2) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); @@ -1235,7 +1235,7 @@ TEST(constant_folding, const_concat_axis_1_bool_type) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); @@ -1259,7 +1259,7 @@ TEST(constant_folding, const_logical_not) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1283,7 +1283,7 @@ TEST(constant_folding, const_equal) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1307,7 +1307,7 @@ TEST(constant_folding, const_not_equal) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1331,7 +1331,7 @@ TEST(constant_folding, const_greater) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1355,7 +1355,7 @@ TEST(constant_folding, const_greater_eq) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1379,7 +1379,7 @@ TEST(constant_folding, const_less) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1403,7 +1403,7 @@ TEST(constant_folding, const_less_eq) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1427,7 +1427,7 @@ TEST(constant_folding, const_or) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1451,7 +1451,7 @@ TEST(constant_folding, const_xor) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1475,7 +1475,7 @@ TEST(constant_folding, const_ceiling) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1499,7 +1499,7 @@ TEST(constant_folding, const_floor) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1527,7 +1527,7 @@ TEST(constant_folding, const_gather_v1) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1555,7 +1555,7 @@ TEST(constant_folding, const_gather_v1_scalar) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1589,7 +1589,7 @@ TEST(constant_folding, const_gather_v1_subgraph) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); @@ -1621,7 +1621,7 @@ TEST(constant_folding, const_gather_v1_subgraph_neg_axis) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); @@ -1781,7 +1781,7 @@ TEST(constant_folding, const_gather_v7) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1809,7 +1809,7 @@ TEST(constant_folding, const_gather_v7_scalar) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -1843,7 +1843,7 @@ TEST(constant_folding, const_gather_v7_subgraph) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); @@ -1875,7 +1875,7 @@ TEST(constant_folding, const_gather_v7_subgraph_neg_axis) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); @@ -2042,7 +2042,7 @@ TEST(constant_folding, const_strided_slice) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -2071,7 +2071,7 @@ TEST(constant_folding, constant_dyn_reshape) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -2107,7 +2107,7 @@ TEST(constant_folding, constant_dyn_reshape_shape_not_originally_constant) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -2201,7 +2201,7 @@ TEST(constant_folding, constant_transpose) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -2230,7 +2230,7 @@ void range_test(T start, T stop, T step, const vector& values_expected) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); @@ -2271,7 +2271,7 @@ TEST(constant_folding, constant_v1_select) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -2296,9 +2296,9 @@ TEST(constant_folding, constant_v1_split) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), num_splits); - auto res1 = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - auto res2 = as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); - auto res3 = as_type_ptr(f->get_results().at(2)->input_value(0).get_node_shared_ptr()); + auto res1 = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto res2 = ov::as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); + auto res3 = ov::as_type_ptr(f->get_results().at(2)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(res1); ASSERT_TRUE(res2); ASSERT_TRUE(res3); @@ -2327,9 +2327,9 @@ TEST(constant_folding, constant_v1_split_specialized) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), num_splits); - auto res1 = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - auto res2 = as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); - auto res3 = as_type_ptr(f->get_results().at(2)->input_value(0).get_node_shared_ptr()); + auto res1 = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto res2 = ov::as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); + auto res3 = ov::as_type_ptr(f->get_results().at(2)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(res1); ASSERT_TRUE(res2); ASSERT_TRUE(res3); @@ -2366,10 +2366,10 @@ TEST(constant_folding, constant_v1_split_axis_1_4_splits) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), num_splits); - auto res1 = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - auto res2 = as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); - auto res3 = as_type_ptr(f->get_results().at(2)->input_value(0).get_node_shared_ptr()); - auto res4 = as_type_ptr(f->get_results().at(3)->input_value(0).get_node_shared_ptr()); + auto res1 = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto res2 = ov::as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); + auto res3 = ov::as_type_ptr(f->get_results().at(2)->input_value(0).get_node_shared_ptr()); + auto res4 = ov::as_type_ptr(f->get_results().at(3)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(res1); ASSERT_EQ(res1->get_friendly_name(), "test.0"); ASSERT_TRUE(res2); @@ -2412,8 +2412,8 @@ TEST(constant_folding, constant_v1_split_axis_1_2_splits) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), num_splits); - auto res1 = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - auto res2 = as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); + auto res1 = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto res2 = ov::as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(res1); ASSERT_TRUE(res2); @@ -2451,8 +2451,8 @@ TEST(constant_folding, constant_v1_variadic_split_axis_1_2_splits) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), values_lengths.size()); - auto res1 = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - auto res2 = as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); + auto res1 = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto res2 = ov::as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(res1); ASSERT_TRUE(res2); @@ -2489,9 +2489,9 @@ TEST(constant_folding, constant_v1_variadic_split_axis_1_3_splits_neg_length) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), values_lengths.size()); - auto res1 = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - auto res2 = as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); - auto res3 = as_type_ptr(f->get_results().at(2)->input_value(0).get_node_shared_ptr()); + auto res1 = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto res2 = ov::as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); + auto res3 = ov::as_type_ptr(f->get_results().at(2)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(res1); ASSERT_TRUE(res2); ASSERT_TRUE(res3); @@ -2527,7 +2527,7 @@ TEST(constant_folding, constant_v1_one_hot) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto res = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto res = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(res); ASSERT_EQ((Shape{3, 3}), res->get_output_shape(0)); @@ -2557,7 +2557,7 @@ TEST(constant_folding, constant_v1_one_hot_negative_axes) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto res = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto res = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(res); ASSERT_EQ((Shape{4, 3}), res->get_output_shape(0)); @@ -2598,7 +2598,7 @@ TEST(constant_folding, constant_v1_one_hot_negative_axes_2) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto res = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto res = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(res); ASSERT_EQ(res->get_friendly_name(), "test"); @@ -2638,7 +2638,7 @@ TEST(constant_folding, constant_tile_1d) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -2667,7 +2667,7 @@ TEST(constant_folding, constant_tile_3d_small_data_rank) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -2696,7 +2696,7 @@ TEST(constant_folding, constant_tile_3d_few_repeats) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -2725,7 +2725,7 @@ TEST(constant_folding, constant_tile_1d_0_repeats) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -2754,7 +2754,7 @@ TEST(constant_folding, constant_tile_0_rank_data) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -2778,7 +2778,7 @@ TEST(constant_folding, constant_non_zero_0D) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); const auto values_out = new_const->get_vector(); @@ -2802,7 +2802,7 @@ TEST(constant_folding, constant_non_zero_1D) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); const auto values_out = new_const->get_vector(); @@ -2826,7 +2826,7 @@ TEST(constant_folding, constant_non_zero_int32_output_type) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(element::i32, new_const->get_element_type()); @@ -2851,7 +2851,7 @@ TEST(constant_folding, constant_non_zero_1D_all_indices) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); const auto values_out = new_const->get_vector(); @@ -2875,7 +2875,7 @@ TEST(constant_folding, constant_non_zero_2D) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); const auto values_out = new_const->get_vector(); @@ -2899,7 +2899,7 @@ TEST(constant_folding, DISABLED_constant_non_zero_2D_all_indices) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); const auto values_out = new_const->get_vector(); @@ -2924,7 +2924,7 @@ TEST(constant_folding, DISABLED_constant_non_zero_2D_all_zeros) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); ASSERT_EQ(shape_size(new_const->get_shape()), 0); @@ -2944,7 +2944,7 @@ TEST(constant_folding, constant_non_zero_3D) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - const auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + const auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); const auto values_out = new_const->get_vector(); @@ -2977,7 +2977,7 @@ TEST(constant_folding, constant_scatter_elements_update_basic) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto result_node = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto result_node = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(result_node); ASSERT_EQ(result_node->get_friendly_name(), "test"); ASSERT_EQ(data_shape, result_node->get_output_shape(0)); @@ -3006,7 +3006,7 @@ TEST(constant_folding, constant_scatter_elements_update_negative_axis) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto result_node = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto result_node = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(result_node); ASSERT_EQ(data_shape, result_node->get_output_shape(0)); std::vector expected{1.1f, 1.0f, 1.2f, 2.0f, 2.2f, 2.1f, 0.0f, 0.0f, 0.0f}; @@ -3034,7 +3034,7 @@ TEST(constant_folding, constant_scatter_elements_update_1d_axis) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto result_node = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto result_node = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(result_node); ASSERT_EQ(data_shape, result_node->get_output_shape(0)); std::vector expected{2.f, 1.1f, 0.0f, 1.f, 0.0f, 2.2f, 0.f, 2.1f, 1.2f}; @@ -3063,7 +3063,7 @@ TEST(constant_folding, constant_scatter_elements_update_3d_i16) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto result_node = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto result_node = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(result_node); ASSERT_EQ(data_shape, result_node->get_output_shape(0)); std::vector expected{4, 2, 0, 1, 0, 6, 0, 5, 3, 10, 0, 12, 0, 11, 0, 7, 8, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0}; @@ -3091,7 +3091,7 @@ TEST(constant_folding, constant_scatter_elements_update_one_elem) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto result_node = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto result_node = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(result_node); ASSERT_EQ(data_shape, result_node->get_output_shape(0)); std::vector expected{input_data}; @@ -3118,7 +3118,7 @@ void test_constant_folding_reshape_v1(Shape& shape_in, ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 1); - auto new_const = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto new_const = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(new_const); ASSERT_EQ(new_const->get_friendly_name(), "test"); auto values_out = new_const->get_vector(); @@ -3205,8 +3205,8 @@ TEST(constant_folding, constant_loop) { ASSERT_EQ(count_ops_of_type(f), 0); ASSERT_EQ(count_ops_of_type(f), 2); - auto result_node_0 = as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); - auto result_node_1 = as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); + auto result_node_0 = ov::as_type_ptr(f->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto result_node_1 = ov::as_type_ptr(f->get_results().at(1)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(result_node_0); ASSERT_TRUE(result_node_1); diff --git a/ngraph/test/copy.cpp b/ngraph/test/copy.cpp index 15959ab6747..9185fdfb0cf 100644 --- a/ngraph/test/copy.cpp +++ b/ngraph/test/copy.cpp @@ -74,7 +74,7 @@ TEST(copy, broadcast) { op::Constant::create(element::u64, Shape{new_shape.size()}, new_shape), op::Constant::create(element::i64, Shape{axes.size()}, axes.to_vector())); auto new_node = node->copy_with_new_inputs(new_args); - auto node_cast = as_type_ptr(new_node); + auto node_cast = ov::as_type_ptr(new_node); ASSERT_NE(node_cast, nullptr); ASSERT_NE(nullptr, new_node); @@ -99,7 +99,7 @@ TEST(copy, concat) { int64_t axis = 0; auto node = make_shared(NodeVector{arg0, arg1}, axis); auto new_node = node->clone_with_new_inputs(new_args); - auto node_cast = as_type_ptr(new_node); + auto node_cast = ov::as_type_ptr(new_node); ASSERT_NE(node_cast, nullptr); ASSERT_TRUE(nullptr != new_node); @@ -113,7 +113,7 @@ TEST(copy, constant) { auto& et = element::f32; auto node = op::Constant::create(et, shape, c); auto new_node = node->clone_with_new_inputs(OutputVector{}); - auto node_cast = as_type_ptr(new_node); + auto node_cast = ov::as_type_ptr(new_node); ASSERT_NE(node_cast, nullptr); ASSERT_TRUE(nullptr != new_node); ASSERT_TRUE(OutputVector{} == new_node->input_values()); @@ -130,7 +130,7 @@ TEST(copy, convert) { auto node = make_shared(arg0, et); auto new_node = node->clone_with_new_inputs(new_args); - auto node_cast = as_type_ptr(new_node); + auto node_cast = ov::as_type_ptr(new_node); ASSERT_NE(node_cast, nullptr); ASSERT_TRUE(nullptr != new_node); @@ -206,7 +206,7 @@ TEST(copy, parameter) { Shape shape{1}; auto node = make_shared(element::f32, shape); auto new_node = node->clone_with_new_inputs({}); - auto node_cast = as_type_ptr(new_node); + auto node_cast = ov::as_type_ptr(new_node); ASSERT_NE(node_cast, nullptr); ASSERT_TRUE(nullptr != new_node); @@ -228,7 +228,7 @@ TEST(copy, reduce_sum) { OutputVector new_args{make_shared(element::f32, shape), op::Constant::create(element::i64, {axes.size()}, axes.to_vector())}; auto new_node = node->clone_with_new_inputs(new_args); - auto node_cast = as_type_ptr(new_node); + auto node_cast = ov::as_type_ptr(new_node); ASSERT_NE(node_cast, nullptr); ASSERT_TRUE(nullptr != new_node); @@ -248,7 +248,7 @@ TEST(copy, reshape) { auto shape_pattern = op::Constant::create(element::u64, {shape_out.size()}, shape_out); auto node = make_shared(arg0, shape_pattern, false); auto new_node = node->clone_with_new_inputs(new_args); - auto node_cast = as_type_ptr(new_node); + auto node_cast = ov::as_type_ptr(new_node); ASSERT_NE(node_cast, nullptr); ASSERT_TRUE(nullptr != new_node); @@ -267,7 +267,7 @@ TEST(copy, select) { auto node = make_shared(arg0, arg1, arg2); auto new_node = node->clone_with_new_inputs(new_args); - auto node_cast = as_type_ptr(new_node); + auto node_cast = ov::as_type_ptr(new_node); ASSERT_NE(node_cast, nullptr); ASSERT_TRUE(nullptr != new_node); @@ -311,7 +311,7 @@ TEST(copy, strided_slice) { std::vector{0, 0, 1}, std::vector{1, 0, 0}); auto new_node = node->clone_with_new_inputs(new_args); - auto node_cast = as_type_ptr(new_node); + auto node_cast = ov::as_type_ptr(new_node); ASSERT_NE(node_cast, nullptr); ASSERT_TRUE(nullptr != new_node); diff --git a/ngraph/test/graph_rewrite.cpp b/ngraph/test/graph_rewrite.cpp index 68810d1ff90..13b178840cd 100644 --- a/ngraph/test/graph_rewrite.cpp +++ b/ngraph/test/graph_rewrite.cpp @@ -396,7 +396,7 @@ public: for (auto output : node->outputs()) { cnt += output.get_target_inputs().size(); } - if (as_type(node) || as_type(node)) { + if (ov::as_type(node) || ov::as_type(node)) { cnt += 1; } return cnt; diff --git a/ngraph/test/onnx/onnx_import_const_folding.in.cpp b/ngraph/test/onnx/onnx_import_const_folding.in.cpp index c6b9554af3f..1599707dd3b 100644 --- a/ngraph/test/onnx/onnx_import_const_folding.in.cpp +++ b/ngraph/test/onnx/onnx_import_const_folding.in.cpp @@ -33,7 +33,7 @@ void test_constant_folding(std::shared_ptr ng_function, for (auto ng_node : ng_function->get_ordered_ops()) { if (op::is_constant(ng_node)) { - const auto folded_node = as_type_ptr(ng_node); + const auto folded_node = ov::as_type_ptr(ng_node); const auto output_values = folded_node->cast_vector(); EXPECT_TRUE(ngraph::test::all_close(expected_output, output_values)); diff --git a/ngraph/test/op.cpp b/ngraph/test/op.cpp index 7529b586b32..c60edaa4c35 100644 --- a/ngraph/test/op.cpp +++ b/ngraph/test/op.cpp @@ -99,17 +99,17 @@ constexpr VariantTypeInfo VariantWrapper::type_info; TEST(op, variant) { shared_ptr var_std_string = make_variant("My string"); - ASSERT_TRUE((is_type>(var_std_string))); - EXPECT_EQ((as_type_ptr>(var_std_string)->get()), "My string"); + ASSERT_TRUE((ov::is_type>(var_std_string))); + EXPECT_EQ((ov::as_type_ptr>(var_std_string)->get()), "My string"); shared_ptr var_int64_t = make_variant(27); - ASSERT_TRUE((is_type>(var_int64_t))); - EXPECT_FALSE((is_type>(var_int64_t))); - EXPECT_EQ((as_type_ptr>(var_int64_t)->get()), 27); + ASSERT_TRUE((ov::is_type>(var_int64_t))); + EXPECT_FALSE((ov::is_type>(var_int64_t))); + EXPECT_EQ((ov::as_type_ptr>(var_int64_t)->get()), 27); shared_ptr var_ship = make_variant(Ship{"Lollipop", 3, 4}); - ASSERT_TRUE((is_type>(var_ship))); - Ship& ship = as_type_ptr>(var_ship)->get(); + ASSERT_TRUE((ov::is_type>(var_ship))); + Ship& ship = ov::as_type_ptr>(var_ship)->get(); EXPECT_EQ(ship.name, "Lollipop"); EXPECT_EQ(ship.x, 3); EXPECT_EQ(ship.y, 4); @@ -118,22 +118,22 @@ TEST(op, variant) { // Check Node RTInfo node->get_rt_info()["A"] = var_ship; auto node_var_ship = node->get_rt_info().at("A"); - ASSERT_TRUE((is_type>(node_var_ship))); - Ship& node_ship = as_type_ptr>(node_var_ship)->get(); + ASSERT_TRUE((ov::is_type>(node_var_ship))); + Ship& node_ship = ov::as_type_ptr>(node_var_ship)->get(); EXPECT_EQ(&node_ship, &ship); // Check Node Input RTInfo auto relu = make_shared(node); relu->input(0).get_rt_info()["A"] = var_ship; auto node_input_var_ship = node->get_rt_info().at("A"); - ASSERT_TRUE((is_type>(node_input_var_ship))); - Ship& node_input_ship = as_type_ptr>(node_input_var_ship)->get(); + ASSERT_TRUE((ov::is_type>(node_input_var_ship))); + Ship& node_input_ship = ov::as_type_ptr>(node_input_var_ship)->get(); EXPECT_EQ(&node_input_ship, &ship); // Check Node Input RTInfo node->output(0).get_rt_info()["A"] = var_ship; auto node_output_var_ship = node->get_rt_info().at("A"); - ASSERT_TRUE((is_type>(node_output_var_ship))); - Ship& node_output_ship = as_type_ptr>(node_input_var_ship)->get(); + ASSERT_TRUE((ov::is_type>(node_output_var_ship))); + Ship& node_output_ship = ov::as_type_ptr>(node_input_var_ship)->get(); EXPECT_EQ(&node_output_ship, &ship); } diff --git a/ngraph/test/pattern.cpp b/ngraph/test/pattern.cpp index 09e03fe776d..ae653767b21 100644 --- a/ngraph/test/pattern.cpp +++ b/ngraph/test/pattern.cpp @@ -84,7 +84,7 @@ public: size_t const_node_index = m.get_match_root()->input_value(0).get_node_shared_ptr() == pattern_map[pattern]; auto const_node = - as_type_ptr(m.get_match_root()->input_value(const_node_index).get_node_shared_ptr()); + ov::as_type_ptr(m.get_match_root()->input_value(const_node_index).get_node_shared_ptr()); auto second_node = m.get_match_root()->input_value(const_node_index).get_node_shared_ptr(); NGRAPH_DEBUG << "second_node = " << second_node->get_name() << " , pattern = " << pattern_map[pattern]->get_name(); @@ -128,7 +128,7 @@ public: size_t const_node_index = m.get_match_root()->input_value(0).get_node_shared_ptr() == pattern_map[pattern]; auto const_node = - as_type_ptr(m.get_match_root()->input_value(const_node_index).get_node_shared_ptr()); + ov::as_type_ptr(m.get_match_root()->input_value(const_node_index).get_node_shared_ptr()); auto second_node = m.get_match_root()->input_value(const_node_index).get_node_shared_ptr(); NGRAPH_DEBUG << "second_node = " << second_node->get_name() << " , pattern = " << pattern_map[pattern]->get_name(); @@ -690,7 +690,7 @@ TEST(pattern, label_on_skip) { auto const_label = std::make_shared(iconst, ngraph::is_zero, NodeVector{iconst}); auto bcst_pred = [](std::shared_ptr n) { - return as_type_ptr(n) != nullptr; + return ov::as_type_ptr(n) != nullptr; }; auto shape_const = op::Constant::create(element::u64, Shape{shape.size()}, shape); diff --git a/ngraph/test/runtime/dynamic/dynamic_backend.cpp b/ngraph/test/runtime/dynamic/dynamic_backend.cpp index a0adc243fa6..a504d9b9cdb 100644 --- a/ngraph/test/runtime/dynamic/dynamic_backend.cpp +++ b/ngraph/test/runtime/dynamic/dynamic_backend.cpp @@ -75,8 +75,8 @@ runtime::dynamic::DynamicExecutable::DynamicExecutable(shared_ptr wrap // count_dyn_nodes. bool is_dynamic_op(const std::shared_ptr& op) { - return is_type(op) || is_type(op) || - is_type(op); + return ov::is_type(op) || ov::is_type(op) || + ov::is_type(op); } // Helper for a vile hack in DynamicExecutable::call. See body of that function for details. diff --git a/ngraph/test/runtime/interpreter/evaluates_map.cpp b/ngraph/test/runtime/interpreter/evaluates_map.cpp index 6f410d90abf..e6a56212f0f 100644 --- a/ngraph/test/runtime/interpreter/evaluates_map.cpp +++ b/ngraph/test/runtime/interpreter/evaluates_map.cpp @@ -2921,21 +2921,21 @@ namespace const HostTensorVector& inputs) { auto element_type = node->get_output_element_type(0); - if (is_type(node)) + if (ov::is_type(node)) { element_type = node->get_input_element_type(1); } - else if (is_type(node)) + else if (ov::is_type(node)) { element_type = node->get_input_element_type(0); } for (size_t i = 1; i < node->outputs().size(); i++) { - if ((is_type(node) || - is_type(node) || - is_type(node) || - is_type(node) || - is_type(node)) && + if ((ov::is_type(node) || + ov::is_type(node) || + ov::is_type(node) || + ov::is_type(node) || + ov::is_type(node)) && i == 1) { continue; @@ -2944,37 +2944,37 @@ namespace switch (element_type) { case element::Type_t::boolean: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); case element::Type_t::bf16: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); case element::Type_t::f16: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); case element::Type_t::f64: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); case element::Type_t::f32: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); case element::Type_t::i4: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); case element::Type_t::i8: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); case element::Type_t::i16: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); case element::Type_t::i32: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); case element::Type_t::i64: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); case element::Type_t::u1: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); case element::Type_t::u4: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); case element::Type_t::u8: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); case element::Type_t::u16: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); case element::Type_t::u32: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); case element::Type_t::u64: - return evaluate(as_type_ptr(node), outputs, inputs); + return evaluate(ov::as_type_ptr(node), outputs, inputs); default: throw ngraph_error(std::string("Unhandled data type ") + node->get_element_type().get_type_name() + diff --git a/ngraph/test/runtime/interpreter/int_executable.cpp b/ngraph/test/runtime/interpreter/int_executable.cpp index 3d2cad83343..8d24cd6f088 100644 --- a/ngraph/test/runtime/interpreter/int_executable.cpp +++ b/ngraph/test/runtime/interpreter/int_executable.cpp @@ -69,7 +69,7 @@ bool runtime::interpreter::INTExecutable::call(const vector(output)) + if (!ov::is_type(output)) { throw ngraph_error("One of function's outputs isn't op::Result"); } @@ -114,13 +114,13 @@ bool runtime::interpreter::INTExecutable::call(const vector(op) || is_type(op)) + if (ov::is_type(op) || ov::is_type(op)) { type = op->get_input_element_type(0); } - else if (is_type(op) || is_type(op) || - is_type(op) || is_type(op) || - is_type(op) || is_type(op)) + else if (ov::is_type(op) || ov::is_type(op) || + ov::is_type(op) || ov::is_type(op) || + ov::is_type(op) || ov::is_type(op)) { // Get the type of the second input, not the first // All BinaryElementwiseComparision ops have the same type for inputs diff --git a/ngraph/test/specialize_function.cpp b/ngraph/test/specialize_function.cpp index 93b90997d50..95143814cfa 100644 --- a/ngraph/test/specialize_function.cpp +++ b/ngraph/test/specialize_function.cpp @@ -117,11 +117,11 @@ TEST(specialize_function, et_static_shape_rank_static_dynamic_subst_val) { ASSERT_EQ(g->get_output_shape(0), (Shape{1, 2, 3})); ASSERT_EQ(g->get_output_element_type(0), element::f32); - auto plus_node = as_type_ptr(g->get_results().at(0)->input_value(0).get_node_shared_ptr()); + auto plus_node = ov::as_type_ptr(g->get_results().at(0)->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(plus_node); - auto convert_node = as_type_ptr(plus_node->input_value(1).get_node_shared_ptr()); + auto convert_node = ov::as_type_ptr(plus_node->input_value(1).get_node_shared_ptr()); ASSERT_TRUE(convert_node); - auto const_node = as_type_ptr(convert_node->input_value(0).get_node_shared_ptr()); + auto const_node = ov::as_type_ptr(convert_node->input_value(0).get_node_shared_ptr()); ASSERT_TRUE(const_node); ASSERT_EQ(const_node->get_output_element_type(0), element::i32); diff --git a/ngraph/test/type_prop/loop.cpp b/ngraph/test/type_prop/loop.cpp index ebbae919e86..a92313c19b3 100644 --- a/ngraph/test/type_prop/loop.cpp +++ b/ngraph/test/type_prop/loop.cpp @@ -49,13 +49,13 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes) { for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -71,10 +71,10 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes) { for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } @@ -134,13 +134,13 @@ TEST(type_prop, loop_operation_dowhile_mode_1_iter_static_shapes) { for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -156,10 +156,10 @@ TEST(type_prop, loop_operation_dowhile_mode_1_iter_static_shapes) { for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } @@ -219,13 +219,13 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_static_shapes for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -238,10 +238,10 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_static_shapes for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } @@ -298,13 +298,13 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_dynamic_shape for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -318,10 +318,10 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_dynamic_shape for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } @@ -383,13 +383,13 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_partially_dyn for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -404,10 +404,10 @@ TEST(type_prop, loop_operation_for_and_condition_mode_dynamic_iter_partially_dyn for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } @@ -514,13 +514,13 @@ TEST(type_prop, loop_operation_infinite_loop_mode_dynamic_iter_dynamic_shapes) { for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -536,10 +536,10 @@ TEST(type_prop, loop_operation_infinite_loop_mode_dynamic_iter_dynamic_shapes) { for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } @@ -600,13 +600,13 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -622,10 +622,10 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } @@ -686,13 +686,13 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -708,10 +708,10 @@ TEST(type_prop, loop_operation_for_mode_10_iter_static_shapes_special_body_ports for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } @@ -772,13 +772,13 @@ TEST(type_prop, loop_operation_10_iter_static_shapes_sliced_inputs) { for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -795,10 +795,10 @@ TEST(type_prop, loop_operation_10_iter_static_shapes_sliced_inputs) { for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } @@ -865,13 +865,13 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_batch_shapes_sliced_inputs_c for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -888,10 +888,10 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_batch_shapes_sliced_inputs_c for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } @@ -962,13 +962,13 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_shapes_sliced_inputs_concate for (auto& desc : loop->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -984,10 +984,10 @@ TEST(type_prop, loop_operation_dynamic_iter_dynamic_shapes_sliced_inputs_concate for (auto& desc : loop->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } diff --git a/ngraph/test/type_prop/ti.cpp b/ngraph/test/type_prop/ti.cpp index 1d27dfe60fa..c2c150deb05 100644 --- a/ngraph/test/type_prop/ti.cpp +++ b/ngraph/test/type_prop/ti.cpp @@ -132,13 +132,13 @@ TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2_dynamic) { for (auto& desc : tensor_iterator->get_input_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "InvariantInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "SliceInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } else if (std::strcmp(type_info.name, "MergedInputDescription") == 0) { - auto input_desc = as_type_ptr(desc); + auto input_desc = ov::as_type_ptr(desc); EXPECT_NE(input_desc, nullptr); } } @@ -153,10 +153,10 @@ TEST(type_prop, tensor_iterator_2_slice_inputs_part_size_2_dynamic) { for (auto& desc : tensor_iterator->get_output_descriptions()) { auto type_info = desc->get_type_info(); if (std::strcmp(type_info.name, "ConcatOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } else if (std::strcmp(type_info.name, "BodyOutputDescription") == 0) { - auto output_desc = as_type_ptr(desc); + auto output_desc = ov::as_type_ptr(desc); EXPECT_NE(output_desc, nullptr); } } diff --git a/ngraph/test/util.cpp b/ngraph/test/util.cpp index d72563970b9..83be8232df9 100644 --- a/ngraph/test/util.cpp +++ b/ngraph/test/util.cpp @@ -195,11 +195,11 @@ TEST_F(CloneTest, clone_nodes_full) { auto cloned_nodes = clone_nodes(nodes, node_map); ASSERT_TRUE(CompareNodeVector(nodes, cloned_nodes, node_map)); - ASSERT_NE(nullptr, as_type_ptr(node_map.at(A.get()))); - ASSERT_NE(nullptr, as_type_ptr(node_map.at(B.get()))); - ASSERT_NE(nullptr, as_type_ptr(node_map.at(C.get()))); - ASSERT_NE(nullptr, as_type_ptr(node_map.at(AplusB.get()))); - ASSERT_NE(nullptr, as_type_ptr(node_map.at(AplusBtimesC.get()))); + ASSERT_NE(nullptr, ov::as_type_ptr(node_map.at(A.get()))); + ASSERT_NE(nullptr, ov::as_type_ptr(node_map.at(B.get()))); + ASSERT_NE(nullptr, ov::as_type_ptr(node_map.at(C.get()))); + ASSERT_NE(nullptr, ov::as_type_ptr(node_map.at(AplusB.get()))); + ASSERT_NE(nullptr, ov::as_type_ptr(node_map.at(AplusBtimesC.get()))); auto sorted_nodes = topological_sort(nodes); auto sorted_cloned_nodes = topological_sort(cloned_nodes); diff --git a/ngraph/test/visitors/op/adaptive_max_pool.cpp b/ngraph/test/visitors/op/adaptive_max_pool.cpp index dcb4e3d58b3..730cbc974b0 100644 --- a/ngraph/test/visitors/op/adaptive_max_pool.cpp +++ b/ngraph/test/visitors/op/adaptive_max_pool.cpp @@ -19,7 +19,7 @@ TEST(attributes, adaptive_max_pool_op) { const auto adaptive_pool = make_shared(A, out_shape); NodeBuilder builder(adaptive_pool); - auto g_adaptive_pool = as_type_ptr(builder.create()); + auto g_adaptive_pool = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 1; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); diff --git a/ngraph/test/visitors/op/batch_norm.cpp b/ngraph/test/visitors/op/batch_norm.cpp index 389849ceb5f..e138539189f 100644 --- a/ngraph/test/visitors/op/batch_norm.cpp +++ b/ngraph/test/visitors/op/batch_norm.cpp @@ -38,7 +38,7 @@ TYPED_TEST_P(BatchNormAttrTest, batch_norm_inference_op) { const auto expected_attr_count = 1; NodeBuilder builder(batch_norm); EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); - auto g_batch_norm = as_type_ptr(builder.create()); + auto g_batch_norm = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_batch_norm->get_eps_value(), batch_norm->get_eps_value()); } diff --git a/ngraph/test/visitors/op/broadcast.cpp b/ngraph/test/visitors/op/broadcast.cpp index 75cd1ecf98a..04ba6e2242c 100644 --- a/ngraph/test/visitors/op/broadcast.cpp +++ b/ngraph/test/visitors/op/broadcast.cpp @@ -24,7 +24,7 @@ TEST(attributes, broadcast_v3) { const auto broadcast_v3 = make_shared(arg, shape, broadcast_spec); NodeBuilder builder(broadcast_v3); - auto g_broadcast_v3 = as_type_ptr(builder.create()); + auto g_broadcast_v3 = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_broadcast_v3->get_broadcast_spec(), broadcast_spec); } diff --git a/ngraph/test/visitors/op/bucketize.cpp b/ngraph/test/visitors/op/bucketize.cpp index 3424fc66afe..244e7361ae0 100644 --- a/ngraph/test/visitors/op/bucketize.cpp +++ b/ngraph/test/visitors/op/bucketize.cpp @@ -23,7 +23,7 @@ TEST(attributes, bucketize_v3_op_default_attributes) { auto bucketize = make_shared(data, buckets); NodeBuilder builder(bucketize); - auto g_bucketize = as_type_ptr(builder.create()); + auto g_bucketize = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_bucketize->get_output_type(), bucketize->get_output_type()); EXPECT_EQ(g_bucketize->get_with_right_bound(), bucketize->get_with_right_bound()); @@ -39,7 +39,7 @@ TEST(attributes, bucketize_v3_op_custom_attributes) { auto bucketize = make_shared(data, buckets, output_type, with_right_bound); NodeBuilder builder(bucketize); - auto g_bucketize = as_type_ptr(builder.create()); + auto g_bucketize = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_bucketize->get_output_type(), bucketize->get_output_type()); EXPECT_EQ(g_bucketize->get_with_right_bound(), bucketize->get_with_right_bound()); diff --git a/ngraph/test/visitors/op/constant.cpp b/ngraph/test/visitors/op/constant.cpp index 7cd3d722723..3f6dec98aac 100644 --- a/ngraph/test/visitors/op/constant.cpp +++ b/ngraph/test/visitors/op/constant.cpp @@ -20,7 +20,7 @@ TEST(attributes, constant_op) { vector data{5.0f, 4.0f, 3.0f, 2.0f, 1.0f, 0.0f}; auto k = make_shared(element::f32, Shape{2, 3}, data); NodeBuilder builder(k); - auto g_k = as_type_ptr(builder.create()); + auto g_k = ov::as_type_ptr(builder.create()); g_k->validate_and_infer_types(); ASSERT_TRUE(g_k); EXPECT_EQ(k->get_element_type(), g_k->get_element_type()); @@ -33,7 +33,7 @@ TEST(attributes, constant_op_different_elements) { vector data{5, 4, 3, 2, 1, 0}; auto k = make_shared(element::i64, Shape{2, 3}, data); NodeBuilder builder(k); - auto g_k = as_type_ptr(builder.create()); + auto g_k = ov::as_type_ptr(builder.create()); g_k->validate_and_infer_types(); ASSERT_TRUE(g_k); EXPECT_EQ(k->get_element_type(), g_k->get_element_type()); @@ -47,7 +47,7 @@ TEST(attributes, constant_op_identical_elements) { vector data{5, 5, 5, 5, 5, 5}; auto k = make_shared(element::i64, Shape{2, 3}, data); NodeBuilder builder(k); - auto g_k = as_type_ptr(builder.create()); + auto g_k = ov::as_type_ptr(builder.create()); g_k->validate_and_infer_types(); ASSERT_TRUE(g_k); EXPECT_EQ(k->get_element_type(), g_k->get_element_type()); diff --git a/ngraph/test/visitors/op/convert.cpp b/ngraph/test/visitors/op/convert.cpp index d7e40e771dc..5857a000add 100644 --- a/ngraph/test/visitors/op/convert.cpp +++ b/ngraph/test/visitors/op/convert.cpp @@ -25,6 +25,6 @@ TEST(attributes, convert_op_v0) { EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); // destination_type attribute - const auto g_convert = as_type_ptr(builder.create()); + const auto g_convert = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_convert->get_destination_type(), convert->get_destination_type()); } diff --git a/ngraph/test/visitors/op/convolution_backprop.cpp b/ngraph/test/visitors/op/convolution_backprop.cpp index 4bd8c66ed27..7095b303465 100644 --- a/ngraph/test/visitors/op/convolution_backprop.cpp +++ b/ngraph/test/visitors/op/convolution_backprop.cpp @@ -32,7 +32,7 @@ TEST(attributes, convolution_backprop_op) { dilations, op::PadType::VALID); NodeBuilder builder(convolution); - auto g_convolution = as_type_ptr(builder.create()); + auto g_convolution = ov::as_type_ptr(builder.create()); // attribute count const auto expected_attr_count = 6; @@ -75,7 +75,7 @@ TEST(attributes, convolution_backprop_output_shape_output_padding) { padType, output_padding); NodeBuilder builder(convolution); - const auto g_convolution = as_type_ptr(builder.create()); + const auto g_convolution = ov::as_type_ptr(builder.create()); // attribute count const auto expected_attr_count = 6; diff --git a/ngraph/test/visitors/op/cum_sum.cpp b/ngraph/test/visitors/op/cum_sum.cpp index 66ac11a8b8b..e82d3cb3192 100644 --- a/ngraph/test/visitors/op/cum_sum.cpp +++ b/ngraph/test/visitors/op/cum_sum.cpp @@ -20,7 +20,7 @@ TEST(attributes, cum_sum_op_default_attributes_no_axis_input) { auto cs = make_shared(A); NodeBuilder builder(cs); - auto g_cs = as_type_ptr(builder.create()); + auto g_cs = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 2; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); @@ -38,7 +38,7 @@ TEST(attributes, cum_sum_op_default_attributes) { auto cs = make_shared(A, axis); NodeBuilder builder(cs); - auto g_cs = as_type_ptr(builder.create()); + auto g_cs = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 2; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); @@ -58,7 +58,7 @@ TEST(attributes, cum_sum_op_custom_attributes) { auto cs = make_shared(A, axis, exclusive, reverse); NodeBuilder builder(cs); - auto g_cs = as_type_ptr(builder.create()); + auto g_cs = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 2; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); diff --git a/ngraph/test/visitors/op/deformable_convolution.cpp b/ngraph/test/visitors/op/deformable_convolution.cpp index 26361c53ff1..f76d9b67d03 100644 --- a/ngraph/test/visitors/op/deformable_convolution.cpp +++ b/ngraph/test/visitors/op/deformable_convolution.cpp @@ -26,7 +26,7 @@ TEST(attributes, deformable_convolution_default_attributes) { auto convolution = make_shared(data, offsets, filters, strides, pads_begin, pads_end, dilations); NodeBuilder builder(convolution); - auto g_convolution = as_type_ptr(builder.create()); + auto g_convolution = ov::as_type_ptr(builder.create()); // attribute count const auto expected_attr_count = 8; @@ -66,7 +66,7 @@ TEST(attributes, deformable_convolution_attributes) { 2, true); NodeBuilder builder(convolution); - auto g_convolution = as_type_ptr(builder.create()); + auto g_convolution = ov::as_type_ptr(builder.create()); // attribute count const auto expected_attr_count = 8; diff --git a/ngraph/test/visitors/op/deformable_psroi_pooling.cpp b/ngraph/test/visitors/op/deformable_psroi_pooling.cpp index 5a8cd667eb8..b8104ac0b58 100644 --- a/ngraph/test/visitors/op/deformable_psroi_pooling.cpp +++ b/ngraph/test/visitors/op/deformable_psroi_pooling.cpp @@ -38,7 +38,7 @@ TEST(attributes, deformable_psroi_pooling_op) { trans_std, part_size); NodeBuilder builder(op); - auto g_op = as_type_ptr(builder.create()); + auto g_op = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_op->get_output_dim(), op->get_output_dim()); EXPECT_EQ(g_op->get_spatial_scale(), op->get_spatial_scale()); diff --git a/ngraph/test/visitors/op/depth_to_space.cpp b/ngraph/test/visitors/op/depth_to_space.cpp index 3bbf66f1909..8a9ac3d87b2 100644 --- a/ngraph/test/visitors/op/depth_to_space.cpp +++ b/ngraph/test/visitors/op/depth_to_space.cpp @@ -24,7 +24,7 @@ TEST(attributes, depth_to_space) { const auto dts = std::make_shared(data, mode, block_size); NodeBuilder builder(dts); - auto g_dts = as_type_ptr(builder.create()); + auto g_dts = ov::as_type_ptr(builder.create()); // attribute count const auto expected_attr_count = 2; diff --git a/ngraph/test/visitors/op/detection_output.cpp b/ngraph/test/visitors/op/detection_output.cpp index aba42f4aecb..488bbf423d3 100644 --- a/ngraph/test/visitors/op/detection_output.cpp +++ b/ngraph/test/visitors/op/detection_output.cpp @@ -45,7 +45,7 @@ TEST(attributes, detection_output_op) { auto detection_output = make_shared(box_logits, class_preds, proposals, aux_class_preds, aux_box_pred, attrs); NodeBuilder builder(detection_output); - auto g_detection_output = as_type_ptr(builder.create()); + auto g_detection_output = ov::as_type_ptr(builder.create()); const auto do_attrs = detection_output->get_attrs(); const auto g_do_attrs = g_detection_output->get_attrs(); diff --git a/ngraph/test/visitors/op/einsum.cpp b/ngraph/test/visitors/op/einsum.cpp index 8ec2a8a12be..86e0452027b 100644 --- a/ngraph/test/visitors/op/einsum.cpp +++ b/ngraph/test/visitors/op/einsum.cpp @@ -20,6 +20,6 @@ TEST(attributes, einsum_v7_op) { std::string equation = "ab,bc->ac"; auto einsum = make_shared(OutputVector{input1, input2}, equation); NodeBuilder builder(einsum); - auto g_einsum = as_type_ptr(builder.create()); + auto g_einsum = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_einsum->get_equation(), einsum->get_equation()); } diff --git a/ngraph/test/visitors/op/elu.cpp b/ngraph/test/visitors/op/elu.cpp index 8b7d326121e..ecdd8244551 100644 --- a/ngraph/test/visitors/op/elu.cpp +++ b/ngraph/test/visitors/op/elu.cpp @@ -24,7 +24,7 @@ TEST(attributes, elu_op) { const auto elu = make_shared(data, alpha); NodeBuilder builder(elu); - auto g_elu = as_type_ptr(builder.create()); + auto g_elu = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_elu->get_alpha(), elu->get_alpha()); } diff --git a/ngraph/test/visitors/op/extractimagepatches.cpp b/ngraph/test/visitors/op/extractimagepatches.cpp index d9142a3757a..a19ece5d53c 100644 --- a/ngraph/test/visitors/op/extractimagepatches.cpp +++ b/ngraph/test/visitors/op/extractimagepatches.cpp @@ -27,7 +27,7 @@ TEST(attributes, extractimagepatches_op) { auto extractimagepatches = make_shared(data, sizes, strides, rates, padtype_padding); NodeBuilder builder(extractimagepatches); - auto g_extractimagepatches = as_type_ptr(builder.create()); + auto g_extractimagepatches = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 4; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); diff --git a/ngraph/test/visitors/op/fake_quantize.cpp b/ngraph/test/visitors/op/fake_quantize.cpp index 0ffe875785e..bbba136cfaa 100644 --- a/ngraph/test/visitors/op/fake_quantize.cpp +++ b/ngraph/test/visitors/op/fake_quantize.cpp @@ -30,7 +30,7 @@ TEST(attributes, fake_quantize_op) { const auto fake_quantize = make_shared(data, input_low, input_high, output_low, output_high, levels, auto_broadcast); NodeBuilder builder(fake_quantize); - auto g_fake_quantize = as_type_ptr(builder.create()); + auto g_fake_quantize = ov::as_type_ptr(builder.create()); // attribute count const auto expected_attr_count = 2; diff --git a/ngraph/test/visitors/op/gather.cpp b/ngraph/test/visitors/op/gather.cpp index 5d646bd2a60..b1d4cbdf971 100644 --- a/ngraph/test/visitors/op/gather.cpp +++ b/ngraph/test/visitors/op/gather.cpp @@ -23,7 +23,7 @@ TEST(attributes, gather_v7_op) { auto gather = make_shared(data, indices, axis, batch_dims); NodeBuilder builder(gather); - auto g_gather = as_type_ptr(builder.create()); + auto g_gather = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_gather->get_batch_dims(), gather->get_batch_dims()); } @@ -37,7 +37,7 @@ TEST(attributes, gather_v8_op) { auto gather = make_shared(data, indices, axis, batch_dims); NodeBuilder builder(gather); - auto g_gather = as_type_ptr(builder.create()); + auto g_gather = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_gather->get_batch_dims(), gather->get_batch_dims()); } diff --git a/ngraph/test/visitors/op/gelu.cpp b/ngraph/test/visitors/op/gelu.cpp index ec4b180bfb2..e1b0cb3461a 100644 --- a/ngraph/test/visitors/op/gelu.cpp +++ b/ngraph/test/visitors/op/gelu.cpp @@ -18,7 +18,7 @@ TEST(attributes, gelu_op) { const auto approximation_mode = op::GeluApproximationMode::ERF; const auto gelu = make_shared(data_input, approximation_mode); NodeBuilder builder(gelu); - auto g_gelu = as_type_ptr(builder.create()); + auto g_gelu = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_gelu->get_approximation_mode(), gelu->get_approximation_mode()); } diff --git a/ngraph/test/visitors/op/grn.cpp b/ngraph/test/visitors/op/grn.cpp index df8b5d6554b..9d71e77520e 100644 --- a/ngraph/test/visitors/op/grn.cpp +++ b/ngraph/test/visitors/op/grn.cpp @@ -20,7 +20,7 @@ TEST(attributes, grn_op) { auto grn = make_shared(data, bias); NodeBuilder builder(grn); - auto g_grn = as_type_ptr(builder.create()); + auto g_grn = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 1; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); diff --git a/ngraph/test/visitors/op/group_conv.cpp b/ngraph/test/visitors/op/group_conv.cpp index 4b826e86c64..4ee94e66333 100644 --- a/ngraph/test/visitors/op/group_conv.cpp +++ b/ngraph/test/visitors/op/group_conv.cpp @@ -32,7 +32,7 @@ TEST(attributes, group_conv_op) { dilations, op::PadType::VALID); NodeBuilder builder(group_conv); - auto g_group_conv = as_type_ptr(builder.create()); + auto g_group_conv = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_group_conv->get_strides(), group_conv->get_strides()); EXPECT_EQ(g_group_conv->get_pads_begin(), group_conv->get_pads_begin()); EXPECT_EQ(g_group_conv->get_pads_end(), group_conv->get_pads_end()); @@ -63,7 +63,7 @@ TEST(attributes, group_conv_backprop_data_op) { auto_pad, output_padding); NodeBuilder builder(gcbd); - const auto g_gcbd = as_type_ptr(builder.create()); + const auto g_gcbd = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_gcbd->get_strides(), gcbd->get_strides()); EXPECT_EQ(g_gcbd->get_pads_begin(), gcbd->get_pads_begin()); diff --git a/ngraph/test/visitors/op/interpolate.cpp b/ngraph/test/visitors/op/interpolate.cpp index dab9d978db0..2e1f96ff3b8 100644 --- a/ngraph/test/visitors/op/interpolate.cpp +++ b/ngraph/test/visitors/op/interpolate.cpp @@ -31,7 +31,7 @@ TEST(attributes, interpolate_op) { auto interpolate = make_shared(img, out_shape, interp_atrs); NodeBuilder builder(interpolate); - auto g_interpolate = as_type_ptr(builder.create()); + auto g_interpolate = ov::as_type_ptr(builder.create()); const auto i_attrs = interpolate->get_attrs(); const auto g_i_attrs = g_interpolate->get_attrs(); diff --git a/ngraph/test/visitors/op/lrn.cpp b/ngraph/test/visitors/op/lrn.cpp index 5b2485367da..90f85d4dd1d 100644 --- a/ngraph/test/visitors/op/lrn.cpp +++ b/ngraph/test/visitors/op/lrn.cpp @@ -28,7 +28,7 @@ TEST(attributes, lrn_op) { const auto lrn = make_shared(arg, axes, alpha, beta, bias, size); NodeBuilder builder(lrn); - auto g_lrn = as_type_ptr(builder.create()); + auto g_lrn = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_lrn->get_alpha(), lrn->get_alpha()); EXPECT_EQ(g_lrn->get_beta(), lrn->get_beta()); diff --git a/ngraph/test/visitors/op/lstm_cell.cpp b/ngraph/test/visitors/op/lstm_cell.cpp index 7932b533941..71f3be63f76 100644 --- a/ngraph/test/visitors/op/lstm_cell.cpp +++ b/ngraph/test/visitors/op/lstm_cell.cpp @@ -41,7 +41,7 @@ TEST(attributes, lstm_cell_op) { activations_beta, clip); NodeBuilder builder(lstm_cell); - auto g_lstm_cell = as_type_ptr(builder.create()); + auto g_lstm_cell = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_lstm_cell->get_hidden_size(), lstm_cell->get_hidden_size()); EXPECT_EQ(g_lstm_cell->get_activations(), lstm_cell->get_activations()); diff --git a/ngraph/test/visitors/op/lstm_sequence.cpp b/ngraph/test/visitors/op/lstm_sequence.cpp index 0dd12076876..d9b9d1d976e 100644 --- a/ngraph/test/visitors/op/lstm_sequence.cpp +++ b/ngraph/test/visitors/op/lstm_sequence.cpp @@ -55,7 +55,7 @@ TEST(attributes, lstm_sequence_op) { activations, clip_threshold); NodeBuilder builder(lstm_sequence); - auto g_lstm_sequence = as_type_ptr(builder.create()); + auto g_lstm_sequence = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_lstm_sequence->get_hidden_size(), lstm_sequence->get_hidden_size()); EXPECT_EQ(g_lstm_sequence->get_activations(), lstm_sequence->get_activations()); diff --git a/ngraph/test/visitors/op/matmul.cpp b/ngraph/test/visitors/op/matmul.cpp index e9b9a99c46b..9477333f254 100644 --- a/ngraph/test/visitors/op/matmul.cpp +++ b/ngraph/test/visitors/op/matmul.cpp @@ -26,7 +26,7 @@ TEST(attributes, matmul_op) { auto matmul = make_shared(A, B, transpose_a, transpose_b); NodeBuilder builder(matmul); - auto g_matmul = as_type_ptr(builder.create()); + auto g_matmul = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_matmul->get_transpose_a(), matmul->get_transpose_a()); EXPECT_EQ(g_matmul->get_transpose_b(), matmul->get_transpose_b()); diff --git a/ngraph/test/visitors/op/matrix_nms.cpp b/ngraph/test/visitors/op/matrix_nms.cpp index e4252652aef..c0d499f70fc 100644 --- a/ngraph/test/visitors/op/matrix_nms.cpp +++ b/ngraph/test/visitors/op/matrix_nms.cpp @@ -37,7 +37,7 @@ TEST(attributes, matrix_nms_v8_op_custom_attributes) { auto nms = make_shared(boxes, scores, attrs); NodeBuilder builder(nms); - auto g_nms = as_type_ptr(builder.create()); + auto g_nms = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 11; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); @@ -76,7 +76,7 @@ TEST(attributes, matrix_nms_v8_op_default_attributes) { auto nms = make_shared(boxes, scores, opset8::MatrixNms::Attributes()); NodeBuilder builder(nms); - auto g_nms = as_type_ptr(builder.create()); + auto g_nms = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 11; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); diff --git a/ngraph/test/visitors/op/max_pool.cpp b/ngraph/test/visitors/op/max_pool.cpp index 9ff9d0881a1..1672425612b 100644 --- a/ngraph/test/visitors/op/max_pool.cpp +++ b/ngraph/test/visitors/op/max_pool.cpp @@ -27,7 +27,7 @@ TEST(attributes, max_pool_op) { auto max_pool = make_shared(data, strides, pads_begin, pads_end, kernel, rounding_mode, auto_pad); NodeBuilder builder(max_pool); - auto g_max_pool = as_type_ptr(builder.create()); + auto g_max_pool = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_max_pool->get_strides(), max_pool->get_strides()); EXPECT_EQ(g_max_pool->get_pads_begin(), max_pool->get_pads_begin()); @@ -60,7 +60,7 @@ TEST(attributes, max_pool_v8_op) { auto_pad, index_element_type); NodeBuilder builder(max_pool); - auto g_max_pool = as_type_ptr(builder.create()); + auto g_max_pool = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_max_pool->get_strides(), max_pool->get_strides()); EXPECT_EQ(g_max_pool->get_dilations(), max_pool->get_dilations()); diff --git a/ngraph/test/visitors/op/multiclass_nms.cpp b/ngraph/test/visitors/op/multiclass_nms.cpp index 54d8840fda9..ce89e731463 100644 --- a/ngraph/test/visitors/op/multiclass_nms.cpp +++ b/ngraph/test/visitors/op/multiclass_nms.cpp @@ -36,7 +36,7 @@ TEST(attributes, multiclass_nms_v8_op_custom_attributes) { auto nms = make_shared(boxes, scores, attrs); NodeBuilder builder(nms); - auto g_nms = as_type_ptr(builder.create()); + auto g_nms = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 10; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); @@ -73,7 +73,7 @@ TEST(attributes, multiclass_nms_v8_op_default_attributes) { auto nms = make_shared(boxes, scores, opset8::MulticlassNms::Attributes()); NodeBuilder builder(nms); - auto g_nms = as_type_ptr(builder.create()); + auto g_nms = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 10; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); diff --git a/ngraph/test/visitors/op/mvn.cpp b/ngraph/test/visitors/op/mvn.cpp index 7bbfefbf7af..c0c955bbc11 100644 --- a/ngraph/test/visitors/op/mvn.cpp +++ b/ngraph/test/visitors/op/mvn.cpp @@ -26,7 +26,7 @@ TEST(attributes, mvn_v1_op) { const auto op = make_shared(data, true, false, 0.1); op->set_reduction_axes(axes); NodeBuilder builder(op); - const auto g_op = as_type_ptr(builder.create()); + const auto g_op = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 4; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); @@ -44,7 +44,7 @@ TEST(attributes, mvn_v6_op) { const auto op = make_shared(data, axes, false, 0.1, op::MVNEpsMode::INSIDE_SQRT); NodeBuilder builder(op); - const auto g_op = as_type_ptr(builder.create()); + const auto g_op = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 3; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); diff --git a/ngraph/test/visitors/op/non_max_suppression.cpp b/ngraph/test/visitors/op/non_max_suppression.cpp index e9d9b2cb0b3..5b232302d23 100644 --- a/ngraph/test/visitors/op/non_max_suppression.cpp +++ b/ngraph/test/visitors/op/non_max_suppression.cpp @@ -26,7 +26,7 @@ TEST(attributes, non_max_suppression_op_custom_attributes) { auto nms = make_shared(boxes, scores, box_encoding, sort_result_descending); NodeBuilder builder(nms); - auto g_nms = as_type_ptr(builder.create()); + auto g_nms = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_nms->get_box_encoding(), nms->get_box_encoding()); EXPECT_EQ(g_nms->get_sort_result_descending(), nms->get_sort_result_descending()); @@ -39,7 +39,7 @@ TEST(attributes, non_max_suppression_op_default_attributes) { auto nms = make_shared(boxes, scores); NodeBuilder builder(nms); - auto g_nms = as_type_ptr(builder.create()); + auto g_nms = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_nms->get_box_encoding(), nms->get_box_encoding()); EXPECT_EQ(g_nms->get_sort_result_descending(), nms->get_sort_result_descending()); @@ -56,7 +56,7 @@ TEST(attributes, non_max_suppression_v3_op_custom_attributes) { auto nms = make_shared(boxes, scores, box_encoding, sort_result_descending, output_type); NodeBuilder builder(nms); - auto g_nms = as_type_ptr(builder.create()); + auto g_nms = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_nms->get_box_encoding(), nms->get_box_encoding()); EXPECT_EQ(g_nms->get_sort_result_descending(), nms->get_sort_result_descending()); @@ -70,7 +70,7 @@ TEST(attributes, non_max_suppression_v3_op_default_attributes) { auto nms = make_shared(boxes, scores); NodeBuilder builder(nms); - auto g_nms = as_type_ptr(builder.create()); + auto g_nms = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_nms->get_box_encoding(), nms->get_box_encoding()); EXPECT_EQ(g_nms->get_sort_result_descending(), nms->get_sort_result_descending()); diff --git a/ngraph/test/visitors/op/normalize_l2.cpp b/ngraph/test/visitors/op/normalize_l2.cpp index d86e708ac92..4715c558085 100644 --- a/ngraph/test/visitors/op/normalize_l2.cpp +++ b/ngraph/test/visitors/op/normalize_l2.cpp @@ -19,7 +19,7 @@ void static test_normalize_l2_attributes(float eps, op::EpsMode eps_mode) { auto normalize_l2 = make_shared(data, axes, eps, eps_mode); NodeBuilder builder(normalize_l2); - auto g_normalize_l2 = as_type_ptr(builder.create()); + auto g_normalize_l2 = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 2; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); diff --git a/ngraph/test/visitors/op/one_hot.cpp b/ngraph/test/visitors/op/one_hot.cpp index c59482e2aad..0366c69197e 100644 --- a/ngraph/test/visitors/op/one_hot.cpp +++ b/ngraph/test/visitors/op/one_hot.cpp @@ -27,7 +27,7 @@ TEST(attributes, one_hot_op) { auto one_hot = make_shared(indices, depth, on_value, off_value, axis); NodeBuilder builder(one_hot); - auto g_one_hot = as_type_ptr(builder.create()); + auto g_one_hot = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_one_hot->get_axis(), one_hot->get_axis()); } diff --git a/ngraph/test/visitors/op/pad.cpp b/ngraph/test/visitors/op/pad.cpp index 314bc1a3d64..a2824b41d21 100644 --- a/ngraph/test/visitors/op/pad.cpp +++ b/ngraph/test/visitors/op/pad.cpp @@ -26,7 +26,7 @@ TEST(attributes, pad_op) { auto pad = make_shared(arg, pads_begin, pads_end, pad_mode); NodeBuilder builder(pad); - auto g_pad = as_type_ptr(builder.create()); + auto g_pad = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_pad->get_pad_mode(), pad->get_pad_mode()); } diff --git a/ngraph/test/visitors/op/parameter.cpp b/ngraph/test/visitors/op/parameter.cpp index 8eab7293ef8..cfa1c8d210e 100644 --- a/ngraph/test/visitors/op/parameter.cpp +++ b/ngraph/test/visitors/op/parameter.cpp @@ -19,7 +19,7 @@ TEST(attributes, parameter_op) { auto parameter = std::make_shared(element::f32, PartialShape{Dimension{1}, Dimension{4}}); NodeBuilder builder(parameter); - auto g_parameter = as_type_ptr(builder.create()); + auto g_parameter = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 2; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); diff --git a/ngraph/test/visitors/op/prior_box.cpp b/ngraph/test/visitors/op/prior_box.cpp index 3e7f072e35c..7ff922c62ce 100644 --- a/ngraph/test/visitors/op/prior_box.cpp +++ b/ngraph/test/visitors/op/prior_box.cpp @@ -37,7 +37,7 @@ TEST(attributes, prior_box_op) { auto prior_box = make_shared(layer_shape, image_shape, attrs); NodeBuilder builder(prior_box); - auto g_prior_box = as_type_ptr(builder.create()); + auto g_prior_box = ov::as_type_ptr(builder.create()); const auto prior_box_attrs = prior_box->get_attrs(); const auto g_prior_box_attrs = g_prior_box->get_attrs(); diff --git a/ngraph/test/visitors/op/prior_box_clustered.cpp b/ngraph/test/visitors/op/prior_box_clustered.cpp index 4544616fc14..d99ab449ecf 100644 --- a/ngraph/test/visitors/op/prior_box_clustered.cpp +++ b/ngraph/test/visitors/op/prior_box_clustered.cpp @@ -29,7 +29,7 @@ TEST(attributes, prior_box_clustered_op) { auto pbc = make_shared(layer_shape, image_shape, attrs); NodeBuilder builder(pbc); - auto g_pbc = as_type_ptr(builder.create()); + auto g_pbc = ov::as_type_ptr(builder.create()); const auto pbc_attrs = pbc->get_attrs(); const auto g_pbc_attrs = g_pbc->get_attrs(); const auto expected_attr_count = 8; diff --git a/ngraph/test/visitors/op/proposal.cpp b/ngraph/test/visitors/op/proposal.cpp index dbb61dec8d0..35a6bf48ee0 100644 --- a/ngraph/test/visitors/op/proposal.cpp +++ b/ngraph/test/visitors/op/proposal.cpp @@ -40,7 +40,7 @@ TEST(attributes, proposal_op) { auto proposal = make_shared(class_probs, class_logits, image_shape, attrs); NodeBuilder builder(proposal); - auto g_proposal = as_type_ptr(builder.create()); + auto g_proposal = ov::as_type_ptr(builder.create()); const auto proposal_attrs = proposal->get_attrs(); const auto g_proposal_attrs = g_proposal->get_attrs(); diff --git a/ngraph/test/visitors/op/psroi_pooling.cpp b/ngraph/test/visitors/op/psroi_pooling.cpp index 5082efdc023..41b65a7d335 100644 --- a/ngraph/test/visitors/op/psroi_pooling.cpp +++ b/ngraph/test/visitors/op/psroi_pooling.cpp @@ -37,7 +37,7 @@ TEST(attributes, psroi_pooling_op) { spatial_bins_y, mode); NodeBuilder builder(psroi_pool); - auto g_psroi_pool = as_type_ptr(builder.create()); + auto g_psroi_pool = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_psroi_pool->get_output_dim(), psroi_pool->get_output_dim()); EXPECT_EQ(g_psroi_pool->get_group_size(), psroi_pool->get_group_size()); diff --git a/ngraph/test/visitors/op/random_uniform.cpp b/ngraph/test/visitors/op/random_uniform.cpp index 8f020a2683a..0e6a1949154 100644 --- a/ngraph/test/visitors/op/random_uniform.cpp +++ b/ngraph/test/visitors/op/random_uniform.cpp @@ -22,7 +22,7 @@ TEST(attributes, random_uniform_op) { const auto random_uniform = make_shared(out_shape, min_val, max_val, element::Type_t::f32, 150, 10); NodeBuilder builder(random_uniform); - auto g_random_uniform = as_type_ptr(builder.create()); + auto g_random_uniform = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 3; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); diff --git a/ngraph/test/visitors/op/reduce_ops.hpp b/ngraph/test/visitors/op/reduce_ops.hpp index d70b498fbf4..7fc3c2e914b 100644 --- a/ngraph/test/visitors/op/reduce_ops.hpp +++ b/ngraph/test/visitors/op/reduce_ops.hpp @@ -39,7 +39,7 @@ TYPED_TEST_P(ReduceOpsAttrTest, reduce_ops) NodeBuilder builder(reduce_op); const auto expected_attr_count = 1; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); - auto g_reduce_op = as_type_ptr(builder.create()); + auto g_reduce_op = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_reduce_op->get_keep_dims(), reduce_op->get_keep_dims()); } diff --git a/ngraph/test/visitors/op/region_yolo.cpp b/ngraph/test/visitors/op/region_yolo.cpp index 0a3eb85ce6e..536774c4d8e 100644 --- a/ngraph/test/visitors/op/region_yolo.cpp +++ b/ngraph/test/visitors/op/region_yolo.cpp @@ -39,7 +39,7 @@ TEST(attributes, region_yolo_op) { end_axis, anchors); NodeBuilder builder(region_yolo); - auto g_region_yolo = as_type_ptr(builder.create()); + auto g_region_yolo = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_region_yolo->get_num_coords(), region_yolo->get_num_coords()); EXPECT_EQ(g_region_yolo->get_num_classes(), region_yolo->get_num_classes()); diff --git a/ngraph/test/visitors/op/reorg_yolo.cpp b/ngraph/test/visitors/op/reorg_yolo.cpp index 3b11bc547b8..3ad1fc7318e 100644 --- a/ngraph/test/visitors/op/reorg_yolo.cpp +++ b/ngraph/test/visitors/op/reorg_yolo.cpp @@ -22,7 +22,7 @@ TEST(attributes, reorg_yolo_op_stride) { const auto op = make_shared(data, 2); NodeBuilder builder(op); - const auto g_op = as_type_ptr(builder.create()); + const auto g_op = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_op->get_strides(), op->get_strides()); } @@ -33,7 +33,7 @@ TEST(attributes, reorg_yolo_op_strides) { const auto op = make_shared(data, Strides{2}); NodeBuilder builder(op); - const auto g_op = as_type_ptr(builder.create()); + const auto g_op = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_op->get_strides(), op->get_strides()); } diff --git a/ngraph/test/visitors/op/reshape.cpp b/ngraph/test/visitors/op/reshape.cpp index 2877f99fe39..c21841623a4 100644 --- a/ngraph/test/visitors/op/reshape.cpp +++ b/ngraph/test/visitors/op/reshape.cpp @@ -25,7 +25,7 @@ TEST(attributes, reshape_op) { auto reshape = make_shared(data, pattern, special_zero); NodeBuilder builder(reshape); - auto g_reshape = as_type_ptr(builder.create()); + auto g_reshape = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 1; diff --git a/ngraph/test/visitors/op/reverse.cpp b/ngraph/test/visitors/op/reverse.cpp index af7130f1def..48c24c178ac 100644 --- a/ngraph/test/visitors/op/reverse.cpp +++ b/ngraph/test/visitors/op/reverse.cpp @@ -23,7 +23,7 @@ TEST(attributes, reverse_op_enum_mode) { auto reverse = make_shared(data, reversed_axes, opset1::Reverse::Mode::INDEX); NodeBuilder builder(reverse); - auto g_reverse = as_type_ptr(builder.create()); + auto g_reverse = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_reverse->get_mode(), reverse->get_mode()); } @@ -37,7 +37,7 @@ TEST(attributes, reverse_op_string_mode) { auto reverse = make_shared(data, reversed_axes, mode); NodeBuilder builder(reverse); - auto g_reverse = as_type_ptr(builder.create()); + auto g_reverse = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_reverse->get_mode(), reverse->get_mode()); } diff --git a/ngraph/test/visitors/op/reverse_sequence.cpp b/ngraph/test/visitors/op/reverse_sequence.cpp index 55b11d6b730..75613e26a6c 100644 --- a/ngraph/test/visitors/op/reverse_sequence.cpp +++ b/ngraph/test/visitors/op/reverse_sequence.cpp @@ -26,7 +26,7 @@ TEST(attributes, reverse_sequence_op) { auto reverse_sequence = make_shared(data, seq_indices, batch_axis, seq_axis); NodeBuilder builder(reverse_sequence); - auto g_reverse_sequence = as_type_ptr(builder.create()); + auto g_reverse_sequence = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_reverse_sequence->get_origin_batch_axis(), reverse_sequence->get_origin_batch_axis()); EXPECT_EQ(g_reverse_sequence->get_origin_sequence_axis(), reverse_sequence->get_origin_sequence_axis()); diff --git a/ngraph/test/visitors/op/rnn_cell.cpp b/ngraph/test/visitors/op/rnn_cell.cpp index 2a1bf47b868..ff46fba29d8 100644 --- a/ngraph/test/visitors/op/rnn_cell.cpp +++ b/ngraph/test/visitors/op/rnn_cell.cpp @@ -33,7 +33,7 @@ TEST(attributes, rnn_cell_op_custom_attributes) { make_shared(X, H, W, R, hidden_size, activations, activations_alpha, activations_beta, clip); NodeBuilder builder(rnn_cell); - auto g_rnn_cell = as_type_ptr(builder.create()); + auto g_rnn_cell = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_rnn_cell->get_hidden_size(), rnn_cell->get_hidden_size()); EXPECT_EQ(g_rnn_cell->get_clip(), rnn_cell->get_clip()); @@ -54,7 +54,7 @@ TEST(attributes, rnn_cell_op_default_attributes) { auto rnn_cell = make_shared(X, H, W, R, hidden_size); NodeBuilder builder(rnn_cell); - auto g_rnn_cell = as_type_ptr(builder.create()); + auto g_rnn_cell = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_rnn_cell->get_hidden_size(), rnn_cell->get_hidden_size()); EXPECT_EQ(g_rnn_cell->get_clip(), rnn_cell->get_clip()); diff --git a/ngraph/test/visitors/op/roi_pooling.cpp b/ngraph/test/visitors/op/roi_pooling.cpp index cc7baee5499..553b3420aaa 100644 --- a/ngraph/test/visitors/op/roi_pooling.cpp +++ b/ngraph/test/visitors/op/roi_pooling.cpp @@ -23,7 +23,7 @@ TEST(attributes, roi_pooling_op) { const auto op = make_shared(data, coords, Shape{5, 5}, 0.123, "bilinear"); NodeBuilder builder(op); - const auto g_op = as_type_ptr(builder.create()); + const auto g_op = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_op->get_output_size(), op->get_output_size()); EXPECT_EQ(g_op->get_spatial_scale(), op->get_spatial_scale()); diff --git a/ngraph/test/visitors/op/round.cpp b/ngraph/test/visitors/op/round.cpp index ccb6004ce08..7d4efd98f1b 100644 --- a/ngraph/test/visitors/op/round.cpp +++ b/ngraph/test/visitors/op/round.cpp @@ -21,7 +21,7 @@ void static test_mode(opset5::Round::RoundMode mode) { auto data = make_shared(element::f32, Shape{200}); auto round = make_shared(data, mode); NodeBuilder builder(round); - auto g_round = as_type_ptr(builder.create()); + auto g_round = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_round->get_mode(), round->get_mode()); } diff --git a/ngraph/test/visitors/op/select.cpp b/ngraph/test/visitors/op/select.cpp index 428897cef3b..c3a5925a88c 100644 --- a/ngraph/test/visitors/op/select.cpp +++ b/ngraph/test/visitors/op/select.cpp @@ -26,6 +26,6 @@ TEST(attributes, select) { const auto expected_attr_count = 1; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); - auto g_select = as_type_ptr(builder.create()); + auto g_select = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_select->get_autob(), select->get_autob()); } diff --git a/ngraph/test/visitors/op/shuffle_channels.cpp b/ngraph/test/visitors/op/shuffle_channels.cpp index 25af8fad757..173d451019a 100644 --- a/ngraph/test/visitors/op/shuffle_channels.cpp +++ b/ngraph/test/visitors/op/shuffle_channels.cpp @@ -20,7 +20,7 @@ TEST(attributes, shuffle_channels_op) { auto groups = 2; auto shuffle_channels = make_shared(data, axis, groups); NodeBuilder builder(shuffle_channels); - auto g_shuffle_channels = as_type_ptr(builder.create()); + auto g_shuffle_channels = ov::as_type_ptr(builder.create()); const auto expected_attr_count = 2; EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); diff --git a/ngraph/test/visitors/op/softmax.cpp b/ngraph/test/visitors/op/softmax.cpp index 70064fd6495..41423e11272 100644 --- a/ngraph/test/visitors/op/softmax.cpp +++ b/ngraph/test/visitors/op/softmax.cpp @@ -22,7 +22,7 @@ TEST(attributes, softmax_op) { auto axis = 0; auto softmax = make_shared(data, axis); NodeBuilder builder(softmax); - auto g_softmax = as_type_ptr(builder.create()); + auto g_softmax = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_softmax->get_axis(), softmax->get_axis()); } diff --git a/ngraph/test/visitors/op/space_to_depth.cpp b/ngraph/test/visitors/op/space_to_depth.cpp index d64f46cf88d..f03bbe38079 100644 --- a/ngraph/test/visitors/op/space_to_depth.cpp +++ b/ngraph/test/visitors/op/space_to_depth.cpp @@ -25,7 +25,7 @@ TEST(attributes, space_to_depth_op) { auto space_to_depth = make_shared(data, mode, block_size); NodeBuilder builder(space_to_depth); - auto g_space_to_depth = as_type_ptr(builder.create()); + auto g_space_to_depth = ov::as_type_ptr(builder.create()); // attribute count const auto expected_attr_count = 2; diff --git a/ngraph/test/visitors/op/split.cpp b/ngraph/test/visitors/op/split.cpp index e6e3edfa429..8f79676abe6 100644 --- a/ngraph/test/visitors/op/split.cpp +++ b/ngraph/test/visitors/op/split.cpp @@ -23,7 +23,7 @@ TEST(attributes, split_op) { auto num_splits = 2; auto split = make_shared(data, axis, num_splits); NodeBuilder builder(split); - auto g_split = as_type_ptr(builder.create()); + auto g_split = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_split->get_num_splits(), split->get_num_splits()); } diff --git a/ngraph/test/visitors/op/strided_slice.cpp b/ngraph/test/visitors/op/strided_slice.cpp index 107504e1890..d430c18f040 100644 --- a/ngraph/test/visitors/op/strided_slice.cpp +++ b/ngraph/test/visitors/op/strided_slice.cpp @@ -39,7 +39,7 @@ TEST(attributes, strided_slice_op) { shrink_axis_mask, ellipsis_mask); NodeBuilder builder(strided_slice); - auto g_strided_slice = as_type_ptr(builder.create()); + auto g_strided_slice = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_strided_slice->get_begin_mask(), strided_slice->get_begin_mask()); EXPECT_EQ(g_strided_slice->get_end_mask(), strided_slice->get_end_mask()); diff --git a/ngraph/test/visitors/op/topk.cpp b/ngraph/test/visitors/op/topk.cpp index 63cab25c71f..8985957d52b 100644 --- a/ngraph/test/visitors/op/topk.cpp +++ b/ngraph/test/visitors/op/topk.cpp @@ -27,7 +27,7 @@ TEST(attributes, topk_op) { auto topk = make_shared(data, k, axis, mode, sort_type); NodeBuilder builder(topk); - auto g_topk = as_type_ptr(builder.create()); + auto g_topk = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_topk->get_axis(), topk->get_axis()); EXPECT_EQ(g_topk->get_mode(), topk->get_mode()); diff --git a/ngraph/test/visitors/user_op.cpp b/ngraph/test/visitors/user_op.cpp index 152df20c93b..61290870422 100644 --- a/ngraph/test/visitors/user_op.cpp +++ b/ngraph/test/visitors/user_op.cpp @@ -423,7 +423,7 @@ TEST(attributes, user_op) { saver.register_node(data, "data"); saver.register_node(result, "result"); builder.save_node(oracle); - auto g_oracle = as_type_ptr(builder.create()); + auto g_oracle = ov::as_type_ptr(builder.create()); EXPECT_EQ(g_oracle->get_turing_model(), oracle->get_turing_model()); EXPECT_EQ(g_oracle->get_element_type(), oracle->get_element_type()); From 9c5e7654d9b14f5c0d026833282a8d9c707b2d34 Mon Sep 17 00:00:00 2001 From: Bartosz Lesniewski Date: Fri, 20 Aug 2021 05:53:37 +0200 Subject: [PATCH 2/5] Revise CTCLoss OP (#6953) * Add visitor test to CTCLoss * Add CTC Loss SSLT * Add CTC Loss template tests * Use ngraph rtti macros * Code style fix --- .../functional/op_reference/ctc_loss.cpp | 174 ++++++++++++++++++ .../serialization/single_layer/ctc_loss.cpp | 43 +++++ ngraph/core/include/ngraph/op/ctc_loss.hpp | 6 +- ngraph/core/src/op/ctc_loss.cpp | 2 +- ngraph/test/CMakeLists.txt | 1 + ngraph/test/visitors/op/ctc_loss.cpp | 40 ++++ 6 files changed, 261 insertions(+), 5 deletions(-) create mode 100644 docs/template_plugin/tests/functional/op_reference/ctc_loss.cpp create mode 100644 inference-engine/tests/functional/inference_engine/serialization/single_layer/ctc_loss.cpp create mode 100644 ngraph/test/visitors/op/ctc_loss.cpp diff --git a/docs/template_plugin/tests/functional/op_reference/ctc_loss.cpp b/docs/template_plugin/tests/functional/op_reference/ctc_loss.cpp new file mode 100644 index 00000000000..5579e17800f --- /dev/null +++ b/docs/template_plugin/tests/functional/op_reference/ctc_loss.cpp @@ -0,0 +1,174 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include +#include +#include +#include +#include + +#include "base_reference_test.hpp" + +using namespace reference_tests; +using namespace ngraph; +using namespace InferenceEngine; + +namespace { + +struct CTCLossParams { + CTCLossParams(const bool collapseRepeated, const bool mergeRepeated, const bool findUnique, const Tensor& logitsTensor, const Tensor& logitsLenTensor, + const Tensor& labelsTensor, const Tensor& labelsLenTensor, const Tensor& blankIdxTensor, const Tensor& expectedTensor) + : preprocessCollapseRepeated(collapseRepeated), + ctcMergeRepeated(mergeRepeated), + unique(findUnique), + logits(logitsTensor), + logitsLen(logitsLenTensor), + labels(labelsTensor), + labelsLen(labelsLenTensor), + blankIdx(blankIdxTensor), + expected(expectedTensor) {} + + bool preprocessCollapseRepeated; + bool ctcMergeRepeated; + bool unique; + Tensor logits; + Tensor logitsLen; + Tensor labels; + Tensor labelsLen; + Tensor blankIdx; + Tensor expected; +}; + +class ReferenceCTCLossLayerTest : public testing::TestWithParam, public CommonReferenceTest { +public: + void SetUp() override { + auto params = GetParam(); + function = CreateFunction(params); + inputData = {params.logits.data, params.logitsLen.data, params.labels.data, params.labelsLen.data, params.blankIdx.data}; + refOutData = {params.expected.data}; + } + static std::string getTestCaseName(const testing::TestParamInfo& obj) { + auto param = obj.param; + std::ostringstream result; + result << "fl_pr=" << param.logits.type << "_"; + result << "int_pr=" << param.logitsLen.type << "_"; + result << "collapse=" << param.preprocessCollapseRepeated << "_"; + result << "merge=" << param.ctcMergeRepeated << "_"; + result << "unique=" << param.unique << "_"; + result << "logits_shape=" << param.logits.shape << "_"; + result << "logits_len_shape=" << param.logitsLen.shape << "_"; + result << "labels_shape=" << param.labels.shape << "_"; + result << "labels_len_shape=" << param.labelsLen.shape << "_"; + result << "blank_idx_shape=" << param.blankIdx.shape << "_"; + return result.str(); + } + +private: + static std::shared_ptr CreateFunction(const CTCLossParams& params) { + const auto A = std::make_shared(params.logits.type, params.logits.shape); // logits + const auto B = std::make_shared(params.logitsLen.type, params.logitsLen.shape); // logitsLen + const auto C = std::make_shared(params.labels.type, params.labels.shape); // labels + const auto D = std::make_shared(params.labelsLen.type, params.labelsLen.shape); // labelsLen + const auto E = std::make_shared(params.blankIdx.type, params.blankIdx.shape); // blankIdx + + const auto ctcLoss = std::make_shared(A, B, C, D, E, params.preprocessCollapseRepeated, params.ctcMergeRepeated, params.unique); + return std::make_shared(NodeVector {ctcLoss}, ParameterVector {A, B, C, D, E}); + } +}; + +TEST_P(ReferenceCTCLossLayerTest, CompareWithRefs) { + Exec(); +} + +INSTANTIATE_TEST_SUITE_P( + smoke_CTCLoss_With_Hardcoded_Refs, ReferenceCTCLossLayerTest, + ::testing::Values(CTCLossParams(false, false, false, // collapse repeated, merge repeated, unique + Tensor({2, 3, 3}, element::f32, std::vector {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits + Tensor({2}, element::i32, std::vector {3, 3}), // logitsLen + Tensor({2, 3}, element::i32, std::vector {0, 1, 2, 1, 1, 1}), // labels + Tensor({2}, element::i32, std::vector {2, 1}), // labelsLen + Tensor({}, element::i32, std::vector {2}), // blankIdx + Tensor({2}, element::f32, std::vector {1.41223f, 14.1359f})), // refOut + CTCLossParams(false, false, true, // collapse repeated, merge repeated, unique + Tensor({2, 3, 3}, element::f32, std::vector {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits + Tensor({2}, element::i32, std::vector {3, 3}), // logitsLen + Tensor({2, 3}, element::i32, std::vector {0, 1, 2, 1, 1, 1}), // labels + Tensor({2}, element::i32, std::vector {2, 1}), // labelsLen + Tensor({}, element::i32, std::vector {2}), // blankIdx + Tensor({2}, element::f32, std::vector {1.41223f, 14.1359f})), // refOut + CTCLossParams(false, true, false, // collapse repeated, merge repeated, unique + Tensor({2, 3, 3}, element::f32, std::vector {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits + Tensor({2}, element::i32, std::vector {3, 3}), // logitsLen + Tensor({2, 3}, element::i32, std::vector {0, 1, 2, 1, 1, 1}), // labels + Tensor({2}, element::i32, std::vector {2, 1}), // labelsLen + Tensor({}, element::i32, std::vector {2}), // blankIdx + Tensor({2}, element::f32, std::vector {1.41156f, 13.2745f})), // refOut + CTCLossParams(true, false, false, // collapse repeated, merge repeated, unique + Tensor({2, 3, 3}, element::f32, std::vector {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits + Tensor({2}, element::i32, std::vector {3, 3}), // logitsLen + Tensor({2, 3}, element::i32, std::vector {0, 1, 2, 1, 1, 1}), // labels + Tensor({2}, element::i32, std::vector {2, 1}), // labelsLen + Tensor({}, element::i32, std::vector {2}), // blankIdx + Tensor({2}, element::f32, std::vector {1.41223f, 14.1359f})), // refOut + CTCLossParams(false, true, true, // collapse repeated, merge repeated, unique + Tensor({2, 3, 3}, element::f32, std::vector {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits + Tensor({2}, element::i32, std::vector {3, 3}), // logitsLen + Tensor({2, 3}, element::i32, std::vector {0, 1, 2, 1, 1, 1}), // labels + Tensor({2}, element::i32, std::vector {2, 1}), // labelsLen + Tensor({}, element::i32, std::vector {2}), // blankIdx + Tensor({2}, element::f32, std::vector {1.41156f, 13.2745f})), // refOut + CTCLossParams(true, true, true, // collapse repeated, merge repeated, unique + Tensor({2, 3, 3}, element::f32, std::vector {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits + Tensor({2}, element::i32, std::vector {3, 3}), // logitsLen + Tensor({2, 3}, element::i32, std::vector {0, 1, 2, 1, 1, 1}), // labels + Tensor({2}, element::i32, std::vector {2, 1}), // labelsLen + Tensor({}, element::i32, std::vector {2}), // blankIdx + Tensor({2}, element::f32, std::vector {1.41223f, 13.2745f})), // refOut + // floating point type - float16 + CTCLossParams(false, false, false, // collapse repeated, merge repeated, unique + Tensor({2, 3, 3}, element::f16, std::vector {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits + Tensor({2}, element::i32, std::vector {3, 3}), // logitsLen + Tensor({2, 3}, element::i32, std::vector {0, 1, 2, 1, 1, 1}), // labels + Tensor({2}, element::i32, std::vector {2, 1}), // labelsLen + Tensor({}, element::i32, std::vector {2}), // blankIdx + Tensor({2}, element::f16, std::vector {1.41223f, 14.1359f})), // refOut + CTCLossParams(false, false, true, // collapse repeated, merge repeated, unique + Tensor({2, 3, 3}, element::f16, std::vector {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits + Tensor({2}, element::i32, std::vector {3, 3}), // logitsLen + Tensor({2, 3}, element::i32, std::vector {0, 1, 2, 1, 1, 1}), // labels + Tensor({2}, element::i32, std::vector {2, 1}), // labelsLen + Tensor({}, element::i32, std::vector {2}), // blankIdx + Tensor({2}, element::f16, std::vector {1.41223f, 14.1359f})), // refOut + CTCLossParams(false, true, false, // collapse repeated, merge repeated, unique + Tensor({2, 3, 3}, element::f16, std::vector {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits + Tensor({2}, element::i32, std::vector {3, 3}), // logitsLen + Tensor({2, 3}, element::i32, std::vector {0, 1, 2, 1, 1, 1}), // labels + Tensor({2}, element::i32, std::vector {2, 1}), // labelsLen + Tensor({}, element::i32, std::vector {2}), // blankIdx + Tensor({2}, element::f16, std::vector {1.41156f, 13.2745f})), // refOut + CTCLossParams(true, false, false, // collapse repeated, merge repeated, unique + Tensor({2, 3, 3}, element::f16, std::vector {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits + Tensor({2}, element::i32, std::vector {3, 3}), // logitsLen + Tensor({2, 3}, element::i32, std::vector {0, 1, 2, 1, 1, 1}), // labels + Tensor({2}, element::i32, std::vector {2, 1}), // labelsLen + Tensor({}, element::i32, std::vector {2}), // blankIdx + Tensor({2}, element::f16, std::vector {1.41223f, 14.1359f})), // refOut + CTCLossParams(false, true, true, // collapse repeated, merge repeated, unique + Tensor({2, 3, 3}, element::f16, std::vector {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits + Tensor({2}, element::i32, std::vector {3, 3}), // logitsLen + Tensor({2, 3}, element::i32, std::vector {0, 1, 2, 1, 1, 1}), // labels + Tensor({2}, element::i32, std::vector {2, 1}), // labelsLen + Tensor({}, element::i32, std::vector {2}), // blankIdx + Tensor({2}, element::f16, std::vector {1.41156f, 13.2745f})), // refOut + CTCLossParams(true, true, true, // collapse repeated, merge repeated, unique + Tensor({2, 3, 3}, element::f16, std::vector {0, 1, 8, 5, 5, 2, 0, 7, 7, 10, 4, 5, 9, 0, 0, 5, 7, 0}), // logits + Tensor({2}, element::i32, std::vector {3, 3}), // logitsLen + Tensor({2, 3}, element::i32, std::vector {0, 1, 2, 1, 1, 1}), // labels + Tensor({2}, element::i32, std::vector {2, 1}), // labelsLen + Tensor({}, element::i32, std::vector {2}), // blankIdx + Tensor({2}, element::f16, std::vector {1.41223f, 13.2745f}))), // refOut + ReferenceCTCLossLayerTest::getTestCaseName); +} // namespace diff --git a/inference-engine/tests/functional/inference_engine/serialization/single_layer/ctc_loss.cpp b/inference-engine/tests/functional/inference_engine/serialization/single_layer/ctc_loss.cpp new file mode 100644 index 00000000000..7055b2c3e96 --- /dev/null +++ b/inference-engine/tests/functional/inference_engine/serialization/single_layer/ctc_loss.cpp @@ -0,0 +1,43 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include + +#include "shared_test_classes/single_layer/ctc_loss.hpp" + +using namespace LayerTestsDefinitions; + +namespace { +TEST_P(CTCLossLayerTest, Serialize) { Serialize(); } + +const std::vector fPrecisions = { + InferenceEngine::Precision::FP32, + InferenceEngine::Precision::FP16}; +const std::vector iPrecisions = { + InferenceEngine::Precision::I32, + InferenceEngine::Precision::I64}; + +const std::vector preprocessCollapseRepeated = {true, false}; +const std::vector ctcMergeRepeated = {true, false}; +const std::vector unique = {true, false}; + +const auto ctcLossArgsSubset1 = ::testing::Combine( + ::testing::Values(std::vector({2, 3, 3})), // logits shape + ::testing::ValuesIn(std::vector>({{2, 3}, {3, 3}})), // logits length + ::testing::ValuesIn(std::vector>>( + {{{0, 1, 0}, {1, 0, 1}}, {{0, 1, 2}, {1, 1, 1}}})), // labels + ::testing::ValuesIn(std::vector>({{2, 2}, {2, 1}})), // labels length + ::testing::Values(2), // blank index + ::testing::ValuesIn(preprocessCollapseRepeated), + ::testing::ValuesIn(ctcMergeRepeated), + ::testing::ValuesIn(unique)); + +INSTANTIATE_TEST_SUITE_P(smoke_CTCLossSerialization, CTCLossLayerTest, + ::testing::Combine( + ctcLossArgsSubset1, + ::testing::ValuesIn(fPrecisions), + ::testing::ValuesIn(iPrecisions), + ::testing::Values(CommonTestUtils::DEVICE_CPU)), + CTCLossLayerTest::getTestCaseName); +} // namespace diff --git a/ngraph/core/include/ngraph/op/ctc_loss.hpp b/ngraph/core/include/ngraph/op/ctc_loss.hpp index ea42b7186b0..b4ddf80d5eb 100644 --- a/ngraph/core/include/ngraph/op/ctc_loss.hpp +++ b/ngraph/core/include/ngraph/op/ctc_loss.hpp @@ -11,10 +11,8 @@ namespace op { namespace v4 { class NGRAPH_API CTCLoss : public Op { public: - static constexpr NodeTypeInfo type_info{"CTCLoss", 0}; - const NodeTypeInfo& get_type_info() const override { - return type_info; - } + NGRAPH_RTTI_DECLARATION; + CTCLoss() = default; /// \brief Constructs a CTCLoss operation /// diff --git a/ngraph/core/src/op/ctc_loss.cpp b/ngraph/core/src/op/ctc_loss.cpp index 8f81a69459e..6f0da3bfc08 100644 --- a/ngraph/core/src/op/ctc_loss.cpp +++ b/ngraph/core/src/op/ctc_loss.cpp @@ -9,7 +9,7 @@ using namespace std; using namespace ngraph; -constexpr NodeTypeInfo op::v4::CTCLoss::type_info; +NGRAPH_RTTI_DEFINITION(op::v4::CTCLoss, "CTCLoss", 4); op::v4::CTCLoss::CTCLoss(const Output& logits, const Output& logit_length, diff --git a/ngraph/test/CMakeLists.txt b/ngraph/test/CMakeLists.txt index 89197aee7ae..8a7c7e1a0ba 100644 --- a/ngraph/test/CMakeLists.txt +++ b/ngraph/test/CMakeLists.txt @@ -256,6 +256,7 @@ set(SRC visitors/op/convolution_backprop.cpp visitors/op/cos.cpp visitors/op/cosh.cpp + visitors/op/ctc_loss.cpp visitors/op/cum_sum.cpp visitors/op/deformable_convolution.cpp visitors/op/deformable_psroi_pooling.cpp diff --git a/ngraph/test/visitors/op/ctc_loss.cpp b/ngraph/test/visitors/op/ctc_loss.cpp new file mode 100644 index 00000000000..0e35c6af64d --- /dev/null +++ b/ngraph/test/visitors/op/ctc_loss.cpp @@ -0,0 +1,40 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include "gtest/gtest.h" +#include "ngraph/ngraph.hpp" +#include "ngraph/op/util/attr_types.hpp" +#include "ngraph/opsets/opset1.hpp" +#include "ngraph/opsets/opset3.hpp" +#include "ngraph/opsets/opset4.hpp" +#include "ngraph/opsets/opset5.hpp" +#include "util/visitor.hpp" + +using namespace std; +using namespace ngraph; +using ngraph::test::NodeBuilder; +using ngraph::test::ValueMap; + +TEST(attributes, ctc_loss) { + NodeBuilder::get_ops().register_factory(); + + auto logits = make_shared(element::f32, Shape{10, 120, 28}); + auto logit_length = make_shared(element::i32, Shape{10}); + auto labels = make_shared(element::i32, Shape{10, 120}); + auto label_length = make_shared(element::i32, Shape{10}); + auto blank_index = make_shared(element::i32, Shape{}); + + auto ctc_loss = make_shared(logits, logit_length, labels, label_length, blank_index); + NodeBuilder builder(ctc_loss); + auto g_ctc_loss = as_type_ptr(builder.create()); + + // attribute count + const auto expected_attr_count = 3; + EXPECT_EQ(builder.get_value_map_size(), expected_attr_count); + + // CTC Loss attributes + EXPECT_EQ(g_ctc_loss->get_preprocess_collapse_repeated(), ctc_loss->get_preprocess_collapse_repeated()); + EXPECT_EQ(g_ctc_loss->get_ctc_merge_repeated(), ctc_loss->get_ctc_merge_repeated()); + EXPECT_EQ(g_ctc_loss->get_unique(), ctc_loss->get_unique()); +} From df17cba68fab87c5482e7591e954efcb521c9b8d Mon Sep 17 00:00:00 2001 From: Bartosz Lesniewski Date: Fri, 20 Aug 2021 05:54:12 +0200 Subject: [PATCH 3/5] Enable PriorBoxClustered tests (#7078) --- .../tests/functional/inference_engine/skip_tests_config.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/inference-engine/tests/functional/inference_engine/skip_tests_config.cpp b/inference-engine/tests/functional/inference_engine/skip_tests_config.cpp index aff04cee6e5..124d02ba9d0 100644 --- a/inference-engine/tests/functional/inference_engine/skip_tests_config.cpp +++ b/inference-engine/tests/functional/inference_engine/skip_tests_config.cpp @@ -9,9 +9,6 @@ std::vector disabledTestPatterns() { return { - // TODO: FIX BUG 33375 - // Disabled due to rare sporadic failures. - ".*TransformationTests\\.ConstFoldingPriorBoxClustered.*", // TODO: task 32568, enable after supporting constants outputs in plugins ".*TransformationTests\\.ConstFoldingPriorBox.*", // azure is failing after #6199 From 7aeec6ffe4d47088c135b863c184db47cc9eb890 Mon Sep 17 00:00:00 2001 From: Katarzyna Mitrus Date: Fri, 20 Aug 2021 05:55:13 +0200 Subject: [PATCH 4/5] CumSum spec revision (#6966) * Update detailed description * Update exclusive attribute description * Update Inputs/Outpu description * Update types * Update descriptions * Update data input rank info --- docs/ops/arithmetic/CumSum_3.md | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/docs/ops/arithmetic/CumSum_3.md b/docs/ops/arithmetic/CumSum_3.md index 66897acdba8..4ae6f8bde56 100644 --- a/docs/ops/arithmetic/CumSum_3.md +++ b/docs/ops/arithmetic/CumSum_3.md @@ -6,13 +6,15 @@ **Short description**: *CumSum* performs cumulative summation of the input elements along the given axis. -**Detailed description**: By default, it will do the sum inclusively meaning the first element is copied as is. Through an "exclusive" attribute, this behavior can change to exclude the first element. It can also perform summation in the opposite direction of the axis. For that, set reverse attribute to `true`. +**Detailed description**: *CumSum* performs cumulative summation of the input elements along the `axis` specified by the second input. By default, the `j-th` output element is the inclusive sum of the first `j` elements in the given sequence, and the first element in the sequence is copied to the output as is. +In the `exclusive` mode the `j-th` output element is the sum of the first `j-1` elements and the first element in the output sequence is `0`. +To perform the summation in the opposite direction of the axis, set reverse attribute to `true`. **Attributes**: * *exclusive* - * **Description**: If the attribute is set to `true` then an exclusive sum in which the top element is not included is returned. In other terms, if set to `true`, the `j-th` output element would be the sum of the first `(j-1)` elements. Otherwise, it would be the sum of the first `j` elements. +* **Description**: If the attribute is set to `true`, then exclusive sums are returned, the `j-th` element is not included in the `j-th` sum. Otherwise, the inclusive sum of the first `j` elements for the `j-th` element is calculated. * **Range of values**: * `false` - include the top element * `true` - do not include the top element @@ -32,19 +34,19 @@ **Inputs** -* **1**: An tensor of type *T*. **Required.** +* **1**: A tensor of type *T* and rank greater or equal to 1. **Required.** -* **2**: Scalar axis of type *T_AXIS*. Negative value means counting dimensions from the back. Default value is 0. **Optional.** +* **2**: Axis index along which the cumulative sum is performed. A scalar of type *T_AXIS*. Negative value means counting dimensions from the back. Default value is `0`. **Optional.** **Outputs** -* **1**: Output tensor with cumulative sums of the input's elements. A tensor of type *T* of the same shape as 1st input. +* **1**: Output tensor with cumulative sums of the input elements. A tensor of type *T* of the same shape as the first input. **Types** * *T*: any numeric type. -* *T_AXIS*: any integer number. +* *T_AXIS*: `int64` or `int32`. **Examples** From 2fefe1164d694cfa36cba7a74f734d2d4bedd237 Mon Sep 17 00:00:00 2001 From: Anton Pankratv Date: Fri, 20 Aug 2021 07:17:56 +0300 Subject: [PATCH 5/5] Added common.hpp file with aliases (#7158) --- .../include/openvino/runtime/common.hpp | 23 ++++++ .../include/openvino/runtime/core.hpp | 54 +++++++------- .../src/inference_engine/src/ie_core.cpp | 73 +++++++++---------- 3 files changed, 83 insertions(+), 67 deletions(-) create mode 100644 inference-engine/src/inference_engine/include/openvino/runtime/common.hpp diff --git a/inference-engine/src/inference_engine/include/openvino/runtime/common.hpp b/inference-engine/src/inference_engine/include/openvino/runtime/common.hpp new file mode 100644 index 00000000000..9c0c2e93192 --- /dev/null +++ b/inference-engine/src/inference_engine/include/openvino/runtime/common.hpp @@ -0,0 +1,23 @@ +// Copyright (C) 2018-2021 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +/** + * @brief This is a header file for the OpenVINO Runtime common aliases that depend only from external API + * + * @file openvino/runtime/common.hpp + */ +#pragma once + +#include +#include + +namespace ov { +namespace ie = InferenceEngine; +namespace runtime { +/** + * @brief This type of map is commonly used to pass set of parameters + */ +using ConfigMap = std::map; +} // namespace runtime +} // namespace ov \ No newline at end of file diff --git a/inference-engine/src/inference_engine/include/openvino/runtime/core.hpp b/inference-engine/src/inference_engine/include/openvino/runtime/core.hpp index 0ececc87aa5..e54babcc3f3 100644 --- a/inference-engine/src/inference_engine/include/openvino/runtime/core.hpp +++ b/inference-engine/src/inference_engine/include/openvino/runtime/core.hpp @@ -15,6 +15,7 @@ #include #include +#include "common.hpp" #include "cpp/ie_executable_network.hpp" #include "ie_plugin_config.hpp" #include "ie_version.hpp" @@ -57,7 +58,7 @@ public: * @param deviceName Device name to identify plugin * @return A vector of versions */ - std::map get_versions(const std::string& deviceName) const; + std::map get_versions(const std::string& deviceName) const; #ifdef ENABLE_UNICODE_PATH_SUPPORT /** @@ -101,7 +102,7 @@ public: * @return Function */ std::shared_ptr read_model(const std::string& model, - const std::shared_ptr& weights) const; + const std::shared_ptr& weights) const; /** * @brief Creates an executable network from a network object. @@ -115,9 +116,9 @@ public: * operation * @return An executable network reference */ - InferenceEngine::ExecutableNetwork compile_model(const std::shared_ptr& network, - const std::string& deviceName, - const std::map& config = {}); + ie::ExecutableNetwork compile_model(const std::shared_ptr& network, + const std::string& deviceName, + const ConfigMap& config = {}); /** * @brief Reads model and creates an executable network from IR or ONNX file @@ -132,9 +133,9 @@ public: * * @return An executable network reference */ - InferenceEngine::ExecutableNetwork compile_model(const std::string& modelPath, - const std::string& deviceName, - const std::map& config = {}); + ie::ExecutableNetwork compile_model(const std::string& modelPath, + const std::string& deviceName, + const ConfigMap& config = {}); /** * @brief Creates an executable network from a network object within a specified remote context. @@ -144,15 +145,15 @@ public: * operation * @return An executable network object */ - InferenceEngine::ExecutableNetwork compile_model(const std::shared_ptr& network, - const std::shared_ptr& context, - const std::map& config = {}); + ie::ExecutableNetwork compile_model(const std::shared_ptr& network, + const std::shared_ptr& context, + const ConfigMap& config = {}); /** * @brief Registers extension * @param extension Pointer to already loaded extension */ - void add_extension(const std::shared_ptr& extension); + void add_extension(const std::shared_ptr& extension); /** * @brief Creates an executable network from a previously exported network @@ -162,9 +163,9 @@ public: * operation* * @return An executable network reference */ - InferenceEngine::ExecutableNetwork import_model(std::istream& networkModel, - const std::string& deviceName, - const std::map& config = {}); + ie::ExecutableNetwork import_model(std::istream& networkModel, + const std::string& deviceName, + const ConfigMap& config = {}); /** * @brief Creates an executable network from a previously exported network within a specified @@ -176,9 +177,9 @@ public: * operation * @return An executable network reference */ - InferenceEngine::ExecutableNetwork import_model(std::istream& networkModel, - const std::shared_ptr& context, - const std::map& config = {}); + ie::ExecutableNetwork import_model(std::istream& networkModel, + const std::shared_ptr& context, + const ConfigMap& config = {}); /** * @brief Query device if it supports specified network with specified configuration @@ -188,9 +189,9 @@ public: * @param config Optional map of pairs: (config parameter name, config parameter value) * @return An object containing a map of pairs a layer name -> a device name supporting this layer. */ - InferenceEngine::QueryNetworkResult query_model(const std::shared_ptr& network, - const std::string& deviceName, - const std::map& config = {}) const; + ie::QueryNetworkResult query_model(const std::shared_ptr& network, + const std::string& deviceName, + const ConfigMap& config = {}) const; /** * @brief Sets configuration for device, acceptable keys can be found in ie_plugin_config.hpp @@ -200,7 +201,7 @@ public: * * @param config Map of pairs: (config parameter name, config parameter value) */ - void set_config(const std::map& config, const std::string& deviceName = {}); + void set_config(const ConfigMap& config, const std::string& deviceName = {}); /** * @brief Gets configuration dedicated to device behaviour. @@ -211,7 +212,7 @@ public: * @param name - config key. * @return Value of config corresponding to config key. */ - InferenceEngine::Parameter get_config(const std::string& deviceName, const std::string& name) const; + ie::Parameter get_config(const std::string& deviceName, const std::string& name) const; /** * @brief Gets general runtime metric for dedicated hardware. @@ -223,7 +224,7 @@ public: * @param name - metric name to request. * @return Metric value corresponding to metric key. */ - InferenceEngine::Parameter get_metric(const std::string& deviceName, const std::string& name) const; + ie::Parameter get_metric(const std::string& deviceName, const std::string& name) const; /** * @brief Returns devices available for neural networks inference @@ -290,15 +291,14 @@ public: * @param params Map of device-specific shared context parameters. * @return A shared pointer to a created remote context. */ - std::shared_ptr create_context(const std::string& deviceName, - const InferenceEngine::ParamMap& params); + std::shared_ptr create_context(const std::string& deviceName, const ie::ParamMap& params); /** * @brief Get a pointer to default(plugin-supplied) shared context object for specified accelerator device. * @param deviceName - A name of a device to get create shared context from. * @return A shared pointer to a default remote context. */ - std::shared_ptr get_default_context(const std::string& deviceName); + std::shared_ptr get_default_context(const std::string& deviceName); }; } // namespace runtime } // namespace ov diff --git a/inference-engine/src/inference_engine/src/ie_core.cpp b/inference-engine/src/inference_engine/src/ie_core.cpp index 5792e6388e0..0c63b75b2bf 100644 --- a/inference-engine/src/inference_engine/src/ie_core.cpp +++ b/inference-engine/src/inference_engine/src/ie_core.cpp @@ -1218,7 +1218,7 @@ Core::Core(const std::string& xmlConfigFile) { register_plugins(core_detail::parseXmlConfig(xmlConfigFile)); } -std::map Core::get_versions(const std::string& deviceName) const { +std::map Core::get_versions(const std::string& deviceName) const { return _impl->GetVersions(deviceName); } @@ -1232,49 +1232,45 @@ std::shared_ptr Core::read_model(const std::wstring& modelPath std::shared_ptr Core::read_model(const std::string& modelPath, const std::string& binPath) const { return _impl->ReadNetwork(modelPath, binPath).getFunction(); } -std::shared_ptr Core::read_model(const std::string& model, - const InferenceEngine::Blob::CPtr& weights) const { +std::shared_ptr Core::read_model(const std::string& model, const ie::Blob::CPtr& weights) const { return _impl->ReadNetwork(model, weights).getFunction(); } -InferenceEngine::ExecutableNetwork Core::compile_model(const std::shared_ptr& network, - const std::string& deviceName, - const std::map& config) { - auto exec = _impl->LoadNetwork(InferenceEngine::CNNNetwork(std::const_pointer_cast(network)), - deviceName, - config); +ie::ExecutableNetwork Core::compile_model(const std::shared_ptr& network, + const std::string& deviceName, + const ConfigMap& config) { + auto exec = + _impl->LoadNetwork(ie::CNNNetwork(std::const_pointer_cast(network)), deviceName, config); return {exec, exec}; } -InferenceEngine::ExecutableNetwork Core::compile_model(const std::string& modelPath, - const std::string& deviceName, - const std::map& config) { +ie::ExecutableNetwork Core::compile_model(const std::string& modelPath, + const std::string& deviceName, + const ConfigMap& config) { auto exec = _impl->LoadNetwork(modelPath, deviceName, config); return {exec, exec}; } -InferenceEngine::ExecutableNetwork Core::compile_model(const std::shared_ptr& network, - const InferenceEngine::RemoteContext::Ptr& context, - const std::map& config) { - auto exec = _impl->LoadNetwork(InferenceEngine::CNNNetwork(std::const_pointer_cast(network)), - context, - config); +ie::ExecutableNetwork Core::compile_model(const std::shared_ptr& network, + const ie::RemoteContext::Ptr& context, + const ConfigMap& config) { + auto exec = _impl->LoadNetwork(ie::CNNNetwork(std::const_pointer_cast(network)), context, config); return {exec, exec}; } -void Core::add_extension(const InferenceEngine::IExtensionPtr& extension) { +void Core::add_extension(const ie::IExtensionPtr& extension) { _impl->AddExtension(extension); } -InferenceEngine::ExecutableNetwork Core::import_model(std::istream& networkModel, - const std::string& deviceName, - const std::map& config) { +ie::ExecutableNetwork Core::import_model(std::istream& networkModel, + const std::string& deviceName, + const ConfigMap& config) { OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model"); auto exec = _impl->ImportNetwork(networkModel, deviceName, config); return {exec, exec}; } -InferenceEngine::ExecutableNetwork Core::import_model(std::istream& networkModel, - const InferenceEngine::RemoteContext::Ptr& context, - const std::map& config) { +ie::ExecutableNetwork Core::import_model(std::istream& networkModel, + const ie::RemoteContext::Ptr& context, + const ConfigMap& config) { OV_ITT_SCOPED_TASK(ov::itt::domains::IE, "Core::import_model"); using ExportMagic = std::array; @@ -1296,14 +1292,12 @@ InferenceEngine::ExecutableNetwork Core::import_model(std::istream& networkModel return {exec, exec}; } -InferenceEngine::QueryNetworkResult Core::query_model(const std::shared_ptr& network, - const std::string& deviceName, - const std::map& config) const { - return _impl->QueryNetwork(InferenceEngine::CNNNetwork(std::const_pointer_cast(network)), - deviceName, - config); +ie::QueryNetworkResult Core::query_model(const std::shared_ptr& network, + const std::string& deviceName, + const ConfigMap& config) const { + return _impl->QueryNetwork(ie::CNNNetwork(std::const_pointer_cast(network)), deviceName, config); } -void Core::set_config(const std::map& config, const std::string& deviceName) { +void Core::set_config(const ConfigMap& config, const std::string& deviceName) { // HETERO case if (deviceName.find("HETERO:") == 0) { IE_THROW() << "SetConfig is supported only for HETERO itself (without devices). " @@ -1337,7 +1331,7 @@ void Core::set_config(const std::map& config, const st } } -InferenceEngine::Parameter Core::get_config(const std::string& deviceName, const std::string& name) const { +ie::Parameter Core::get_config(const std::string& deviceName, const std::string& name) const { // HETERO case { if (deviceName.find("HETERO:") == 0) { @@ -1363,13 +1357,13 @@ InferenceEngine::Parameter Core::get_config(const std::string& deviceName, const auto parsed = core_detail::parseDeviceNameIntoConfig(deviceName); // we need to return a copy of Parameter object which is created on Core side, - // not in InferenceEngine plugin side, which can be unloaded from Core in a parallel thread + // not in ie plugin side, which can be unloaded from Core in a parallel thread // TODO: remove this WA after *-31417 is resolved return core_detail::copyParameterValue( _impl->GetCPPPluginByName(parsed._deviceName).GetConfig(name, parsed._config)); } -InferenceEngine::Parameter Core::get_metric(const std::string& deviceName, const std::string& name) const { +ie::Parameter Core::get_metric(const std::string& deviceName, const std::string& name) const { return _impl->GetMetric(deviceName, name); } @@ -1382,7 +1376,7 @@ void Core::register_plugin(const std::string& pluginName, const std::string& dev } void Core::unload_plugin(const std::string& deviceName) { - InferenceEngine::DeviceIDParser parser(deviceName); + ie::DeviceIDParser parser(deviceName); std::string devName = parser.getDeviceName(); _impl->UnloadPluginByName(devName); @@ -1392,8 +1386,7 @@ void Core::register_plugins(const std::string& xmlConfigFile) { _impl->RegisterPluginsInRegistry(xmlConfigFile); } -InferenceEngine::RemoteContext::Ptr Core::create_context(const std::string& deviceName, - const InferenceEngine::ParamMap& params) { +ie::RemoteContext::Ptr Core::create_context(const std::string& deviceName, const ie::ParamMap& params) { if (deviceName.find("HETERO") == 0) { IE_THROW() << "HETERO device does not support remote context"; } @@ -1408,7 +1401,7 @@ InferenceEngine::RemoteContext::Ptr Core::create_context(const std::string& devi return _impl->GetCPPPluginByName(parsed._deviceName).CreateContext(parsed._config); } -InferenceEngine::RemoteContext::Ptr Core::get_default_context(const std::string& deviceName) { +ie::RemoteContext::Ptr Core::get_default_context(const std::string& deviceName) { if (deviceName.find("HETERO") == 0) { IE_THROW() << "HETERO device does not support remote context"; } @@ -1419,7 +1412,7 @@ InferenceEngine::RemoteContext::Ptr Core::get_default_context(const std::string& IE_THROW() << "AUTO device does not support remote context"; } - auto parsed = core_detail::parseDeviceNameIntoConfig(deviceName, InferenceEngine::ParamMap()); + auto parsed = core_detail::parseDeviceNameIntoConfig(deviceName, ie::ParamMap()); return _impl->GetCPPPluginByName(parsed._deviceName).GetDefaultContext(parsed._config); }