Fix for warnings spotted by clang compiler (#11384)

This commit is contained in:
Maksim Derbasov 2022-04-01 16:10:51 +03:00 committed by GitHub
parent 3d92c8c4c7
commit 56df3962e3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 28 additions and 33 deletions

View File

@ -2963,7 +2963,7 @@ TEST_P(conv_int8_activation_eltwise_quantize_onednn, bsv32_fsv32) {
input_layout("input", get_input_layout(p)),
data("weights", get_mem(get_weights_layout(p), -1, 1)),
data("bias", get_mem(get_bias_layout(p))),
data("eltwise_data", get_mem(eltwise_layout, -0.5, 0.5)),
data("eltwise_data", get_mem(eltwise_layout, -1, 1)),
data("in_lo", get_mem(get_per_channel_layout(p), min_random, 0)),
data("in_hi", get_mem(get_per_channel_layout(p), 1, max_random)),
data("out_lo", get_mem(get_single_element_layout(p), -127)),

View File

@ -66,7 +66,6 @@ TEST(test_can_fuse_reorder, reorder_for_mixed_type_convolution_fsv32_onednn)
auto& node = node_ptr->as<reorder>();
auto& input = node.input();
for (auto usr : node_ptr->get_users()) {
auto temp = usr->get_output_layout();
EXPECT_EQ(false, lo.can_fuse_reorder(input, *usr, node.input().get_output_layout().format, usr->get_output_layout().format));
}
}
@ -108,7 +107,6 @@ TEST(test_can_fuse_reorder, reorder_for_mixed_type_convolution_fsv32_cldnn)
auto& node = node_ptr->as<reorder>();
auto& input = node.input();
for (auto usr : node_ptr->get_users()) {
auto temp = usr->get_output_layout();
EXPECT_EQ(true, lo.can_fuse_reorder(input, *usr, node.input().get_output_layout().format, usr->get_output_layout().format));
}
}
@ -186,7 +184,6 @@ TEST_P(test_fused_reorder_deep_depth, no_removal_for_deep_depth_conv)
auto& node = node_ptr->as<reorder>();
auto& input = node.input();
for (auto usr : node_ptr->get_users()) {
auto temp = usr->get_output_layout();
EXPECT_EQ(p.expected_result, lo.can_fuse_reorder(input, *usr, node.input().get_output_layout().format, usr->get_output_layout().format));
}
}
@ -237,7 +234,6 @@ TEST_P(test_can_fuse_reorder_cldnn, reorder_for_firstconv_cldnn)
auto& node = node_ptr->as<reorder>();
auto& input = node.input();
for (auto usr : node_ptr->get_users()) {
auto temp = usr->get_output_layout();
EXPECT_EQ(p.expected_result, lo.can_fuse_reorder(input, *usr, node.input().get_output_layout().format, usr->get_output_layout().format));
}
}
@ -285,7 +281,6 @@ TEST_P(test_can_fuse_reorder_onednn, reorder_for_firstconv_onednn)
auto& node = node_ptr->as<reorder>();
auto& input = node.input();
for (auto usr : node_ptr->get_users()) {
auto temp = usr->get_output_layout();
EXPECT_EQ(p.expected_result, lo.can_fuse_reorder(input, *usr, node.input().get_output_layout().format, usr->get_output_layout().format));
}
}

View File

@ -652,7 +652,7 @@ TEST_P(OVClassGetMetricTest_OPTIMIZATION_CAPABILITIES, GetMetricAndPrintNoThrow)
TEST_P(OVClassGetMetricTest_MAX_BATCH_SIZE, GetMetricAndPrintNoThrow) {
ov::Core ie;
uint32_t max_batch_size;
uint32_t max_batch_size = 0;
ASSERT_NO_THROW(max_batch_size = ie.get_property(deviceName, ov::max_batch_size));
@ -680,7 +680,7 @@ TEST_P(OVClassGetMetricTest_DEVICE_TYPE, GetMetricAndPrintNoThrow) {
TEST_P(OVClassGetMetricTest_RANGE_FOR_ASYNC_INFER_REQUESTS, GetMetricAndPrintNoThrow) {
ov::Core ie = createCoreWithTemplate();
unsigned int start, end, step;
unsigned int start{0}, end{0}, step{0};
ASSERT_NO_THROW(std::tie(start, end, step) = ie.get_property(deviceName, ov::range_for_async_infer_requests));

View File

@ -887,7 +887,7 @@ TEST_F(myriadLayersTests_nightly, SmallConv_CorruptInputBug) {
{
ie_fp16 *dst = input->buffer().as<ie_fp16 *>();
for (int i = 0; i < input->size(); ++i) {
float val = static_cast<float>(std::rand()) / RAND_MAX;
float val = static_cast<float>(std::rand()) / static_cast<float>(RAND_MAX);
dst[i] = PrecisionUtils::f32tof16(val);
}
}

View File

@ -860,7 +860,7 @@ public:
gen_confidence.resize(NUM_CONF);
for (size_t i = 0; i < NUM_CONF; ++i) {
gen_confidence[i] = static_cast<float>(std::rand()) / RAND_MAX;
gen_confidence[i] = static_cast<float>(std::rand()) / static_cast<float>(RAND_MAX);
}
InferenceEngine::Core ie;

View File

@ -140,7 +140,7 @@ static const std::map<const char*, kernel> s_kernels = {
};
void genRandomDataPow(Blob::Ptr blob) {
float scale = 2.0f / RAND_MAX;
float scale = 2.0f / float(RAND_MAX);
/* fill by random data in the range (-1, 1)*/
auto * blobRawDataFp16 = blob->buffer().as<ie_fp16 *>();
size_t count = blob->size();
@ -160,7 +160,7 @@ void genRandomDataLogic(Blob::Ptr blob) {
size_t count = blob->size();
const auto TrueVal = PrecisionUtils::f32tof16(1.f);
const auto FalseVal = PrecisionUtils::f32tof16(0.f);
float scale = 1.0f / RAND_MAX;
float scale = 1.0f / float(RAND_MAX);
for (size_t indx = 0; indx < count; ++indx) {
float val = rand() * scale;
blobRawDataFp16[indx] = val <.5f ? FalseVal : TrueVal;
@ -315,7 +315,7 @@ protected:
std::vector<float> coeff;
for (int i = 0; i < count; i++)
coeff.push_back(withCoefs ? ((float)rand() / RAND_MAX) * 2.0f : 1.0f);
coeff.push_back(withCoefs ? (float(rand()) / float(RAND_MAX)) * 2.0f : 1.0f);
if (withCoefs) {
_params["coeff"] = std::to_string(coeff[0]);
for (int i = 1; i < count; i++)

View File

@ -39,8 +39,8 @@ static void generateData(Blob::Ptr inputBoxesBlob,
// boxes generator
auto genXY = [](int min, int max, int minSize, int maxSize)
{
int a = min + maxSize * (float(std::rand()) / RAND_MAX);
int b = min + maxSize * (float(std::rand()) / RAND_MAX);
int a = min + maxSize * (float(std::rand()) / float(RAND_MAX));
int b = min + maxSize * (float(std::rand()) / float(RAND_MAX));
if (b < a)
std::swap(a, b);
if (b - a < minSize)
@ -81,14 +81,14 @@ static void generateData(Blob::Ptr inputBoxesBlob,
{
for (int class_idx = 0; class_idx < numClasses; ++class_idx)
{
float dx = 0.5*layerParams.deltas_weights[0] + layerParams.deltas_weights[0] * (float(std::rand()) / RAND_MAX);
float dy = 0.5*layerParams.deltas_weights[1] + layerParams.deltas_weights[1] * (float(std::rand()) / RAND_MAX);
float dx = 0.5*layerParams.deltas_weights[0] + layerParams.deltas_weights[0] * (float(std::rand()) / float(RAND_MAX));
float dy = 0.5*layerParams.deltas_weights[1] + layerParams.deltas_weights[1] * (float(std::rand()) / float(RAND_MAX));
const float minD = 0.95;
const float maxD = 1.10;
float d_log_w = std::log(layerParams.deltas_weights[2] * (minD + (maxD - minD) * (float(std::rand()) / RAND_MAX)));
float d_log_h = std::log(layerParams.deltas_weights[3] * (minD + (maxD - minD) * (float(std::rand()) / RAND_MAX)));
float d_log_w = std::log(layerParams.deltas_weights[2] * (minD + (maxD - minD) * (float(std::rand()) / float(RAND_MAX))));
float d_log_h = std::log(layerParams.deltas_weights[3] * (minD + (maxD - minD) * (float(std::rand()) / float(RAND_MAX))));
ie_fp16* ideltas = &inputDeltas[(roi_idx * numClasses + class_idx) * 4];

View File

@ -49,8 +49,8 @@ static void genInputs(InferenceEngine::BlobMap inputMap,
// boxes generator
auto genXY = [](int min, int max, int maxSize) {
int a = min + maxSize * (static_cast<float>(rand()) / RAND_MAX);
int b = a + maxSize * (static_cast<float>(rand()) / RAND_MAX) + 1;
int a = min + maxSize * (static_cast<float>(rand()) / static_cast<float>(RAND_MAX));
int b = a + maxSize * (static_cast<float>(rand()) / static_cast<float>(RAND_MAX)) + 1;
if (b > max) {
const int d = b - max;
@ -84,13 +84,13 @@ static void genInputs(InferenceEngine::BlobMap inputMap,
for (int h = 0; h < iScoresDims[1]; ++h) {
for (int w = 0; w < iScoresDims[0]; ++w) {
const float maxDelta = 16.0f;
float dx = maxDelta * (static_cast<float>(std::rand()) / RAND_MAX);
float dy = maxDelta * (static_cast<float>(std::rand()) / RAND_MAX);
float dx = maxDelta * (static_cast<float>(std::rand()) / static_cast<float>(RAND_MAX));
float dy = maxDelta * (static_cast<float>(std::rand()) / static_cast<float>(RAND_MAX));
const float maxlogDelta = 1000.f / 128;
const float minlogDelta = 0.65;
float d_log_w = std::log(minlogDelta + (maxlogDelta - minlogDelta) * (static_cast<float>(std::rand()) / RAND_MAX));
float d_log_h = std::log(minlogDelta + (maxlogDelta - minlogDelta) * (static_cast<float>(std::rand()) / RAND_MAX));
float d_log_w = std::log(minlogDelta + (maxlogDelta - minlogDelta) * (static_cast<float>(std::rand()) / static_cast<float>(RAND_MAX)));
float d_log_h = std::log(minlogDelta + (maxlogDelta - minlogDelta) * (static_cast<float>(std::rand()) / static_cast<float>(RAND_MAX)));
ie_fp16* ideltas = &inputDeltas[idx * step_hw * 4];

View File

@ -35,8 +35,8 @@ static void genInputs(InferenceEngine::BlobMap inputMap) {
// boxes generator
auto genXY = [](int min, int max, int maxSize) {
int a = min + maxSize * (float(rand()) / RAND_MAX);
int b = a + maxSize * (float(rand()) / RAND_MAX) + 1;
int a = min + maxSize * (float(rand()) / float(RAND_MAX));
int b = a + maxSize * (float(rand()) / float(RAND_MAX)) + 1;
if (b > max) {
const int d = b - max;
@ -50,8 +50,8 @@ static void genInputs(InferenceEngine::BlobMap inputMap) {
{
const int minS = 200;
const int maxS = 880;
const int W = minS + maxS * (float(rand()) / RAND_MAX);
const int H = minS + maxS * (float(rand()) / RAND_MAX);
const int W = minS + maxS * (float(rand()) / float(RAND_MAX));
const int H = minS + maxS * (float(rand()) / float(RAND_MAX));
const int X0 = 0, X1 = W, SX = (X1 - X0 + 1) * 3 / 5;
const int Y0 = 0, Y1 = H, SY = (Y1 - Y0 + 1) * 3 / 5;

View File

@ -22,7 +22,7 @@ protected:
const auto getRandomValue = [&generator]() {
// Each third value will be a zero for test NonZero functionality
return generator() % 3 ? float(generator()) / generator.max() * 255.f : 0.f;
return generator() % 3 ? float(generator()) / float(generator.max()) * 255.f : 0.f;
};
size_t count = blob->size();

View File

@ -195,7 +195,7 @@ void zeroWeightsRange(uint16_t* ptr, size_t weightsSize) {
void defaultWeightsRange(uint16_t* ptr, size_t weightsSize) {
ASSERT_NE(ptr, nullptr);
float scale = 2.0f / RAND_MAX;
float scale = 2.0f / float(RAND_MAX);
for (size_t count = 0 ; count < weightsSize; ++count) {
float val = rand();
val = val * scale - 1.0f;
@ -205,7 +205,7 @@ void defaultWeightsRange(uint16_t* ptr, size_t weightsSize) {
void smallWeightsRange(uint16_t* ptr, size_t weightsSize) {
ASSERT_NE(ptr, nullptr);
float scale = 2.0f / RAND_MAX;
float scale = 2.0f / float(RAND_MAX);
for (size_t count = 0 ; count < weightsSize; ++count) {
float val = rand();
val = (val * scale - 1.0f) / 512;
@ -265,7 +265,7 @@ bool fromBinaryFile(std::string input_binary, InferenceEngine::Blob::Ptr blob) {
WeightsBlob* GenWeights(size_t sz, float min_val, float max_val) {
// TODO: pass seed as parameter
float scale = (max_val - min_val) / RAND_MAX;
float scale = (max_val - min_val) / float(RAND_MAX);
WeightsBlob *weights = new WeightsBlob({InferenceEngine::Precision::U8, {(sz) * sizeof(uint16_t)}, InferenceEngine::C});
weights->allocate();
uint16_t *inputBlobRawDataFp16 = weights->data().as<uint16_t *>();