Fix sign-compare warnings in PDPD FE (#15635)

* fix warnings in /paddle

* fix bmp_reader.c

* add -Wsign-compare

* fix code style

* fix sum.cpp

* remove paddle sign-compare

* fix return in input_model.cpp

* fix bmp_reader.c
This commit is contained in:
Haiqi Pan 2023-02-14 16:11:59 +08:00 committed by GitHub
parent 3d6474a4a3
commit 67fff4adcc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 19 additions and 22 deletions

View File

@ -96,7 +96,7 @@ int readBmpImage(const char* fileName, BitMap* image) {
int i;
int image_height = image->height;
for (i = 0; i < image_height; i++) {
unsigned int storeAt = image->infoHeader.height < 0 ? i : (unsigned int)image_height - 1 - i;
int storeAt = image->infoHeader.height < 0 ? i : image_height - 1 - i;
cnt = fread(image->data + row_size * storeAt, row_size, sizeof(unsigned char), input);
if (cnt != sizeof(unsigned char)) {
printf("[BMP] file read error\n");

View File

@ -150,7 +150,7 @@ size_t read_image_from_file(const char* img_path, unsigned char* img_data, size_
if (fp) {
fseek(fp, 0, SEEK_END);
if (ftell(fp) >= size) {
if ((size_t)ftell(fp) >= size) {
fseek(fp, 0, SEEK_SET);
read_size = fread(img_data, 1, size, fp);
}

View File

@ -2,9 +2,6 @@
# SPDX-License-Identifier: Apache-2.0
#
if(CMAKE_COMPILER_IS_GNUCXX)
ie_add_compiler_flags(-Wno-sign-compare)
endif()
add_subdirectory(src)

View File

@ -275,7 +275,7 @@ std::map<int32_t, std::shared_ptr<ov::Model>> FrontEnd::convert_each_node_recurs
// TODO: figure a way to safely handle unused outputs
if (named_outputs.count(port.parameter())) {
const auto& ng_outputs = named_outputs.at(port.parameter());
FRONT_END_OP_CONVERSION_CHECK(ng_outputs.size() == port.arguments_size(),
FRONT_END_OP_CONVERSION_CHECK(ng_outputs.size() == (size_t)port.arguments_size(),
"The number of output tensors must be equal to "
"the number of outputs of the OV node.");
for (size_t idx = 0; idx < ng_outputs.size(); ++idx) {

View File

@ -163,9 +163,7 @@ bool read_tensor(std::istream& is, char* data, size_t len) {
std::vector<char> dims_struct(dims_len);
is.read(&dims_struct[0], dims_len);
is.read(data, len);
if (is.gcount() != len)
return false;
return true;
return (size_t)is.gcount() == len;
}
template <typename T>
@ -365,7 +363,7 @@ void InputModel::InputModelImpl::createTempConsts() {
var_place->set_partial_shape(tensor_ps);
Shape shape(tensor_ps.size(), 0);
for (auto i = 0; i < tensor_ps.size(); i++) {
for (size_t i = 0; i < tensor_ps.size(); i++) {
const auto& dim = tensor_ps[i];
if (dim.is_static()) {
shape[i] = dim.get_length();

View File

@ -48,7 +48,7 @@ std::shared_ptr<Node> op::internal::ConditionalBlock::clone_with_new_inputs(cons
return make_shared<ConditionalBlock>(new_args.at(0), m_is_scalar_condition, m_sub_block_index, m_output_infos);
} else {
OutputVector inputs_args;
for (auto i = 0; i < new_args.size() - 1; i++) {
for (size_t i = 0; i < new_args.size() - 1; i++) {
inputs_args.push_back(new_args[i]);
}
return make_shared<ConditionalBlock>(inputs_args,
@ -66,7 +66,7 @@ bool op::internal::ConditionalBlock::visit_attributes(AttributeVisitor& visitor)
}
void op::internal::ConditionalBlock::validate_and_infer_types() {
for (auto i = 0; i < m_output_infos.size(); i++) {
for (size_t i = 0; i < m_output_infos.size(); i++) {
set_output_type(i, m_output_infos[i].first, m_output_infos[i].second);
}
}

View File

@ -32,7 +32,7 @@ bool op::internal::While::visit_attributes(AttributeVisitor& visitor) {
}
void op::internal::While::validate_and_infer_types() {
for (auto i = 0; i < m_output_infos.size(); i++) {
for (size_t i = 0; i < m_output_infos.size(); i++) {
set_output_type(i, m_output_infos[i].first, m_output_infos[i].second);
}
}

View File

@ -50,7 +50,7 @@ ov::frontend::paddle::pass::TransformIf::TransformIf(std::vector<std::shared_ptr
// openvino If requires both then and else branch at the same time.
ParameterVector params;
ResultVector results;
for (auto i = 0; i < then_branch->get_output_size(); i++) {
for (size_t i = 0; i < then_branch->get_output_size(); i++) {
const auto param = std::make_shared<Parameter>(then_branch->get_output_element_type(i),
then_branch->get_output_partial_shape(i));
param->set_friendly_name(then_branch->get_output_op(i)->get_output_tensor(0).get_any_name());
@ -96,7 +96,7 @@ ov::frontend::paddle::pass::TransformIf::TransformIf(std::vector<std::shared_ptr
auto else_results = else_branch->get_results();
auto then_results = then_branch->get_results();
for (auto i = 0; i < else_results.size(); i++) {
for (size_t i = 0; i < else_results.size(); i++) {
if_node->set_output(then_results[i], else_results[i]);
}
replace_node(conditional_block, if_node);

View File

@ -22,7 +22,7 @@ NamedOutputs meshgrid(const NodeContext& node) {
}
const auto out_shape = std::make_shared<default_opset::Concat>(dims, 0);
OutputVector outs;
for (auto i = 0; i < inputs.size(); i++) {
for (size_t i = 0; i < inputs.size(); i++) {
const auto& input = inputs[i];
const auto out =
std::make_shared<default_opset::Broadcast>(input,

View File

@ -103,7 +103,7 @@ NamedOutputs slice_op(const NodeContext& node, const bool& stride_input) {
const auto decreased_node = std::make_shared<default_opset::Squeeze>(stride_slice_node, squeeze_index_node);
const auto input_rank = input_shape.rank().get_length();
if (input_rank == decrease_axis.size()) {
if ((size_t)input_rank == decrease_axis.size()) {
auto restore_node = std::make_shared<default_opset::Reshape>(
decreased_node,
std::make_shared<default_opset::Constant>(element::i64, Shape{1}, 1),

View File

@ -12,7 +12,7 @@ namespace op {
NamedOutputs sum(const NodeContext& node) {
auto data = node.get_ng_inputs("X");
auto sum = data[0].get_node_shared_ptr();
for (int i = 1; i < data.size(); i++) {
for (size_t i = 1; i < data.size(); i++) {
sum = std::make_shared<default_opset::Add>(sum, data[i]);
}
return node.default_single_output_mapping({sum}, {"Out"});

View File

@ -47,13 +47,14 @@ const std::map<std::string, std::vector<std::shared_ptr<InPortPlace>>>& OpPlace:
std::shared_ptr<OutPortPlace> OpPlace::get_output_port_paddle(const std::string& outputName,
int outputPortIndex) const {
FRONT_END_GENERAL_CHECK(outputPortIndex <= m_output_ports.at(outputName).size(),
FRONT_END_GENERAL_CHECK((size_t)outputPortIndex <= m_output_ports.at(outputName).size(),
"outputPortIndex is out of bounds.");
return m_output_ports.at(outputName)[outputPortIndex];
}
std::shared_ptr<InPortPlace> OpPlace::get_input_port_paddle(const std::string& inputName, int inputPortIndex) const {
FRONT_END_GENERAL_CHECK(inputPortIndex <= m_input_ports.at(inputName).size(), "inputPortIndex is out of bounds.");
FRONT_END_GENERAL_CHECK((size_t)inputPortIndex <= m_input_ports.at(inputName).size(),
"inputPortIndex is out of bounds.");
return m_input_ports.at(inputName)[inputPortIndex];
}
@ -145,13 +146,14 @@ std::vector<Place::Ptr> OpPlace::get_consuming_ports() const {
}
Place::Ptr OpPlace::get_output_port(const std::string& outputName, int outputPortIndex) const {
FRONT_END_GENERAL_CHECK(outputPortIndex <= m_output_ports.at(outputName).size(),
FRONT_END_GENERAL_CHECK((size_t)outputPortIndex <= m_output_ports.at(outputName).size(),
"outputPortIndex is Out of bounds.");
return m_output_ports.at(outputName)[outputPortIndex];
}
Place::Ptr OpPlace::get_input_port(const std::string& inputName, int inputPortIndex) const {
FRONT_END_GENERAL_CHECK(inputPortIndex <= m_input_ports.at(inputName).size(), "inputPortIndex is out of bounds.");
FRONT_END_GENERAL_CHECK((size_t)inputPortIndex <= m_input_ports.at(inputName).size(),
"inputPortIndex is out of bounds.");
return m_input_ports.at(inputName)[inputPortIndex];
}