[PT FE] Remove warnings suppression in frontend code (#15676)
* [PT FE] Remove warnings suppression in frontend code * Fix code style * Fix linux build * Fix build * Fix typo
This commit is contained in:
parent
5da21f5649
commit
efe3b27f5b
@ -2,10 +2,4 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
||||
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
|
||||
# 'argument': conversion from 'size_t' to 'int', possible loss of data
|
||||
ie_add_compiler_flags(/wd4267)
|
||||
ie_add_compiler_flags(/wd4244)
|
||||
endif()
|
||||
|
||||
add_subdirectory(src)
|
||||
|
@ -22,7 +22,7 @@ OutputVector translate_eye(NodeContext& context) {
|
||||
// num rows and cols should be integer, but at the moment conversion their data type can be unknown yet
|
||||
x = context.mark_node(std::make_shared<v0::Convert>(x, element::i64));
|
||||
Output<Node> y;
|
||||
size_t dtype_id;
|
||||
int dtype_id;
|
||||
auto dtype = element::f32;
|
||||
// aten::eye support only main diagonal
|
||||
auto diagonal = context.mark_node(v0::Constant::create(element::i64, Shape{}, {0}));
|
||||
|
@ -26,7 +26,7 @@ OutputVector generate_indices_from_repeats_tensor(const NodeContext& context, co
|
||||
OutputVector all_indices;
|
||||
for (size_t i = 0; i < repeats.size(); i++) {
|
||||
Shape indices_shape{static_cast<size_t>(repeats.at(i))};
|
||||
std::vector<int32_t> indices_vec(repeats.at(i), i);
|
||||
std::vector<int32_t> indices_vec(repeats.at(i), static_cast<int32_t>(i));
|
||||
auto indices = context.mark_node(v0::Constant::create(element::i32, indices_shape, indices_vec));
|
||||
all_indices.push_back(indices);
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ public:
|
||||
auto op = std::make_shared<PtFrameworkNode>(m_decoder, inputs, get_output_size());
|
||||
|
||||
for (size_t body_index = 0; body_index < m_bodies.size(); ++body_index) {
|
||||
op->set_function(body_index, get_function(body_index)->clone());
|
||||
op->set_function(static_cast<int>(body_index), get_function(static_cast<int>(body_index))->clone());
|
||||
for (const auto& m_input_descr : m_input_descriptions[body_index]) {
|
||||
op->m_input_descriptions[body_index].push_back(m_input_descr->copy());
|
||||
}
|
||||
|
@ -62,11 +62,12 @@ AppendListUnpackReplacer::AppendListUnpackReplacer() {
|
||||
auto getitem_index_ptr = getitem_node->input_value(1).get_node_shared_ptr();
|
||||
auto getitem_index_const = std::dynamic_pointer_cast<ov::op::v0::Constant>(getitem_index_ptr);
|
||||
auto index_val = getitem_index_const->cast_vector<int64_t>();
|
||||
auto index = 0;
|
||||
if (index_val[0] >= 0) {
|
||||
index = index_val[0];
|
||||
} else {
|
||||
index = inputs.size() + index_val[0];
|
||||
if (index_val.size() != 1) {
|
||||
return false;
|
||||
}
|
||||
auto index = index_val[0];
|
||||
if (index_val[0] < 0) {
|
||||
index = inputs.size() + index;
|
||||
}
|
||||
auto axis_0 = ov::op::v0::Constant::create(element::i64, Shape{}, {0});
|
||||
auto split = std::make_shared<ov::op::v1::Split>(inputs[index], axis_0, list_unpack->get_output_size());
|
||||
|
@ -93,15 +93,16 @@ AtenGetItemReplacer::AtenGetItemReplacer() {
|
||||
} else {
|
||||
auto getitem_index_ptr = getitem->input_value(1).get_node_shared_ptr();
|
||||
auto getitem_index_const = std::dynamic_pointer_cast<ov::op::v0::Constant>(getitem_index_ptr);
|
||||
auto index_val = getitem_index_const->cast_vector<int64_t>();
|
||||
auto split = std::make_shared<ov::op::v1::VariadicSplit>(torch_split->get_input_source_output(0),
|
||||
torch_split->get_input_source_output(2),
|
||||
torch_split->get_input_source_output(1));
|
||||
auto index = 0;
|
||||
if (index_val[0] >= 0) {
|
||||
index = index_val[0];
|
||||
} else {
|
||||
index = split->outputs().size() + index_val[0];
|
||||
auto index_val = getitem_index_const->cast_vector<int64_t>();
|
||||
if (index_val.size() != 1) {
|
||||
return false;
|
||||
}
|
||||
auto index = index_val[0];
|
||||
if (index < 0) {
|
||||
index = split->outputs().size() + index;
|
||||
}
|
||||
OutputVector res{split->outputs()[index]};
|
||||
copy_runtime_info({getitem, input_node}, split);
|
||||
|
@ -24,7 +24,7 @@ void num_inputs_check(const NodeContext& context, size_t min_inputs, size_t max_
|
||||
|
||||
Output<Node> make_optional_bias(const Output<Node>& base_op,
|
||||
const NodeContext& context,
|
||||
size_t bias_input_idx,
|
||||
int bias_input_idx,
|
||||
const std::vector<int>& unsqueeze_dims) {
|
||||
using std::make_shared;
|
||||
|
||||
@ -105,7 +105,7 @@ Output<Node> reshape_kernel_for_group(const NodeContext& context, const Output<N
|
||||
return make_shared<opset10::Reshape>(kernel, new_kernel_shape, false);
|
||||
}
|
||||
|
||||
std::shared_ptr<Node> get_axes_range(const NodeContext& context, size_t input_id) {
|
||||
std::shared_ptr<Node> get_axes_range(const NodeContext& context, int input_id) {
|
||||
auto x = context.get_input(input_id);
|
||||
auto start = std::make_shared<opset10::Constant>(element::i32, Shape{}, 0);
|
||||
auto step = std::make_shared<opset10::Constant>(element::i32, Shape{}, 1);
|
||||
@ -122,15 +122,15 @@ std::shared_ptr<Node> numel(const NodeContext& context, const Output<Node>& x) {
|
||||
};
|
||||
|
||||
namespace {
|
||||
const std::unordered_map<int, element::Type> TORCH_TO_OV_TYPE{{0, element::u8},
|
||||
{1, element::i8},
|
||||
{2, element::i16},
|
||||
{3, element::i32},
|
||||
{4, element::i64},
|
||||
{5, element::f16},
|
||||
{6, element::f32},
|
||||
{7, element::f64},
|
||||
{11, element::boolean}};
|
||||
const std::unordered_map<int64_t, element::Type> TORCH_TO_OV_TYPE{{0, element::u8},
|
||||
{1, element::i8},
|
||||
{2, element::i16},
|
||||
{3, element::i32},
|
||||
{4, element::i64},
|
||||
{5, element::f16},
|
||||
{6, element::f32},
|
||||
{7, element::f64},
|
||||
{11, element::boolean}};
|
||||
|
||||
const std::unordered_map<std::string, ov::op::PadType> TORCH_AUTO_PAD_TO_OV{{"valid", ov::op::PadType::VALID},
|
||||
{"same", ov::op::PadType::SAME_UPPER}};
|
||||
@ -222,7 +222,7 @@ OutputVector make_framework_node(NodeContext* context) {
|
||||
}
|
||||
// Number of body outputs can be higher then number of pt node outputs, e.g. in case of loop first body output is
|
||||
// condition, we have to skip such outputs.
|
||||
int num_skip_body_outputs =
|
||||
auto num_skip_body_outputs =
|
||||
num_body_outs > context->get_output_size() ? num_body_outs - context->get_output_size() : 0;
|
||||
|
||||
// We need to reduce number of outputs, because some outputs are outputs from body
|
||||
@ -232,7 +232,7 @@ OutputVector make_framework_node(NodeContext* context) {
|
||||
context->get_output_size() - num_body_outs + num_skip_body_outputs);
|
||||
fw_node->set_friendly_name(context->get_op_type());
|
||||
for (size_t i = 0; i < bodies.size(); ++i) {
|
||||
fw_node->set_function(i, bodies[i]);
|
||||
fw_node->set_function(static_cast<int>(i), bodies[i]);
|
||||
}
|
||||
|
||||
// Connect inputs with external context
|
||||
|
@ -22,7 +22,7 @@ void num_inputs_check(const NodeContext& context, size_t min_inputs, size_t max_
|
||||
|
||||
Output<Node> make_optional_bias(const Output<Node>& base_op,
|
||||
const NodeContext& context,
|
||||
size_t bias_input_idx,
|
||||
int bias_input_idx,
|
||||
const std::vector<int>& unsqueeze_dims = {});
|
||||
|
||||
Output<Node> reshape_channelwise(const NodeContext& context,
|
||||
@ -36,7 +36,7 @@ std::tuple<Output<Node>, Output<Node>> get_shape_rank(const NodeContext& context
|
||||
|
||||
Output<Node> reshape_kernel_for_group(const NodeContext& context, const Output<Node>& kernel, int64_t groups);
|
||||
|
||||
std::shared_ptr<Node> get_axes_range(const NodeContext& context, size_t input_id);
|
||||
std::shared_ptr<Node> get_axes_range(const NodeContext& context, int input_id);
|
||||
|
||||
std::shared_ptr<Node> numel(const NodeContext& context, const Output<Node>& x);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user