diff --git a/src/frontends/pytorch/CMakeLists.txt b/src/frontends/pytorch/CMakeLists.txt index 77e43b81ddd..8efe7e3e892 100644 --- a/src/frontends/pytorch/CMakeLists.txt +++ b/src/frontends/pytorch/CMakeLists.txt @@ -2,10 +2,4 @@ # SPDX-License-Identifier: Apache-2.0 # -if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") - # 'argument': conversion from 'size_t' to 'int', possible loss of data - ie_add_compiler_flags(/wd4267) - ie_add_compiler_flags(/wd4244) -endif() - add_subdirectory(src) diff --git a/src/frontends/pytorch/src/op/eye.cpp b/src/frontends/pytorch/src/op/eye.cpp index d538c09f1df..20a8fd2a80b 100644 --- a/src/frontends/pytorch/src/op/eye.cpp +++ b/src/frontends/pytorch/src/op/eye.cpp @@ -22,7 +22,7 @@ OutputVector translate_eye(NodeContext& context) { // num rows and cols should be integer, but at the moment conversion their data type can be unknown yet x = context.mark_node(std::make_shared(x, element::i64)); Output y; - size_t dtype_id; + int dtype_id; auto dtype = element::f32; // aten::eye support only main diagonal auto diagonal = context.mark_node(v0::Constant::create(element::i64, Shape{}, {0})); diff --git a/src/frontends/pytorch/src/op/repeat_interleave.cpp b/src/frontends/pytorch/src/op/repeat_interleave.cpp index f98ad760a0d..06d8333e04c 100644 --- a/src/frontends/pytorch/src/op/repeat_interleave.cpp +++ b/src/frontends/pytorch/src/op/repeat_interleave.cpp @@ -26,7 +26,7 @@ OutputVector generate_indices_from_repeats_tensor(const NodeContext& context, co OutputVector all_indices; for (size_t i = 0; i < repeats.size(); i++) { Shape indices_shape{static_cast(repeats.at(i))}; - std::vector indices_vec(repeats.at(i), i); + std::vector indices_vec(repeats.at(i), static_cast(i)); auto indices = context.mark_node(v0::Constant::create(element::i32, indices_shape, indices_vec)); all_indices.push_back(indices); } diff --git a/src/frontends/pytorch/src/pt_framework_node.hpp b/src/frontends/pytorch/src/pt_framework_node.hpp index 55bc9609852..05db41c190a 100644 --- a/src/frontends/pytorch/src/pt_framework_node.hpp +++ b/src/frontends/pytorch/src/pt_framework_node.hpp @@ -47,7 +47,7 @@ public: auto op = std::make_shared(m_decoder, inputs, get_output_size()); for (size_t body_index = 0; body_index < m_bodies.size(); ++body_index) { - op->set_function(body_index, get_function(body_index)->clone()); + op->set_function(static_cast(body_index), get_function(static_cast(body_index))->clone()); for (const auto& m_input_descr : m_input_descriptions[body_index]) { op->m_input_descriptions[body_index].push_back(m_input_descr->copy()); } diff --git a/src/frontends/pytorch/src/transforms/append_list_unpack_replacer.cpp b/src/frontends/pytorch/src/transforms/append_list_unpack_replacer.cpp index 275eac6fbb9..dba6bef1825 100644 --- a/src/frontends/pytorch/src/transforms/append_list_unpack_replacer.cpp +++ b/src/frontends/pytorch/src/transforms/append_list_unpack_replacer.cpp @@ -62,11 +62,12 @@ AppendListUnpackReplacer::AppendListUnpackReplacer() { auto getitem_index_ptr = getitem_node->input_value(1).get_node_shared_ptr(); auto getitem_index_const = std::dynamic_pointer_cast(getitem_index_ptr); auto index_val = getitem_index_const->cast_vector(); - auto index = 0; - if (index_val[0] >= 0) { - index = index_val[0]; - } else { - index = inputs.size() + index_val[0]; + if (index_val.size() != 1) { + return false; + } + auto index = index_val[0]; + if (index_val[0] < 0) { + index = inputs.size() + index; } auto axis_0 = ov::op::v0::Constant::create(element::i64, Shape{}, {0}); auto split = std::make_shared(inputs[index], axis_0, list_unpack->get_output_size()); diff --git a/src/frontends/pytorch/src/transforms/aten_getitem_replacer.cpp b/src/frontends/pytorch/src/transforms/aten_getitem_replacer.cpp index c198131b124..68e5837b436 100644 --- a/src/frontends/pytorch/src/transforms/aten_getitem_replacer.cpp +++ b/src/frontends/pytorch/src/transforms/aten_getitem_replacer.cpp @@ -93,15 +93,16 @@ AtenGetItemReplacer::AtenGetItemReplacer() { } else { auto getitem_index_ptr = getitem->input_value(1).get_node_shared_ptr(); auto getitem_index_const = std::dynamic_pointer_cast(getitem_index_ptr); - auto index_val = getitem_index_const->cast_vector(); auto split = std::make_shared(torch_split->get_input_source_output(0), torch_split->get_input_source_output(2), torch_split->get_input_source_output(1)); - auto index = 0; - if (index_val[0] >= 0) { - index = index_val[0]; - } else { - index = split->outputs().size() + index_val[0]; + auto index_val = getitem_index_const->cast_vector(); + if (index_val.size() != 1) { + return false; + } + auto index = index_val[0]; + if (index < 0) { + index = split->outputs().size() + index; } OutputVector res{split->outputs()[index]}; copy_runtime_info({getitem, input_node}, split); diff --git a/src/frontends/pytorch/src/utils.cpp b/src/frontends/pytorch/src/utils.cpp index 57afeaf32b7..98246fb750a 100644 --- a/src/frontends/pytorch/src/utils.cpp +++ b/src/frontends/pytorch/src/utils.cpp @@ -24,7 +24,7 @@ void num_inputs_check(const NodeContext& context, size_t min_inputs, size_t max_ Output make_optional_bias(const Output& base_op, const NodeContext& context, - size_t bias_input_idx, + int bias_input_idx, const std::vector& unsqueeze_dims) { using std::make_shared; @@ -105,7 +105,7 @@ Output reshape_kernel_for_group(const NodeContext& context, const Output(kernel, new_kernel_shape, false); } -std::shared_ptr get_axes_range(const NodeContext& context, size_t input_id) { +std::shared_ptr get_axes_range(const NodeContext& context, int input_id) { auto x = context.get_input(input_id); auto start = std::make_shared(element::i32, Shape{}, 0); auto step = std::make_shared(element::i32, Shape{}, 1); @@ -122,15 +122,15 @@ std::shared_ptr numel(const NodeContext& context, const Output& x) { }; namespace { -const std::unordered_map TORCH_TO_OV_TYPE{{0, element::u8}, - {1, element::i8}, - {2, element::i16}, - {3, element::i32}, - {4, element::i64}, - {5, element::f16}, - {6, element::f32}, - {7, element::f64}, - {11, element::boolean}}; +const std::unordered_map TORCH_TO_OV_TYPE{{0, element::u8}, + {1, element::i8}, + {2, element::i16}, + {3, element::i32}, + {4, element::i64}, + {5, element::f16}, + {6, element::f32}, + {7, element::f64}, + {11, element::boolean}}; const std::unordered_map TORCH_AUTO_PAD_TO_OV{{"valid", ov::op::PadType::VALID}, {"same", ov::op::PadType::SAME_UPPER}}; @@ -222,7 +222,7 @@ OutputVector make_framework_node(NodeContext* context) { } // Number of body outputs can be higher then number of pt node outputs, e.g. in case of loop first body output is // condition, we have to skip such outputs. - int num_skip_body_outputs = + auto num_skip_body_outputs = num_body_outs > context->get_output_size() ? num_body_outs - context->get_output_size() : 0; // We need to reduce number of outputs, because some outputs are outputs from body @@ -232,7 +232,7 @@ OutputVector make_framework_node(NodeContext* context) { context->get_output_size() - num_body_outs + num_skip_body_outputs); fw_node->set_friendly_name(context->get_op_type()); for (size_t i = 0; i < bodies.size(); ++i) { - fw_node->set_function(i, bodies[i]); + fw_node->set_function(static_cast(i), bodies[i]); } // Connect inputs with external context diff --git a/src/frontends/pytorch/src/utils.hpp b/src/frontends/pytorch/src/utils.hpp index df3c96a78d6..545daada1f6 100644 --- a/src/frontends/pytorch/src/utils.hpp +++ b/src/frontends/pytorch/src/utils.hpp @@ -22,7 +22,7 @@ void num_inputs_check(const NodeContext& context, size_t min_inputs, size_t max_ Output make_optional_bias(const Output& base_op, const NodeContext& context, - size_t bias_input_idx, + int bias_input_idx, const std::vector& unsqueeze_dims = {}); Output reshape_channelwise(const NodeContext& context, @@ -36,7 +36,7 @@ std::tuple, Output> get_shape_rank(const NodeContext& context Output reshape_kernel_for_group(const NodeContext& context, const Output& kernel, int64_t groups); -std::shared_ptr get_axes_range(const NodeContext& context, size_t input_id); +std::shared_ptr get_axes_range(const NodeContext& context, int input_id); std::shared_ptr numel(const NodeContext& context, const Output& x);