[GPU] Fix warnings (#16196)

* fix 1

* fix 2-10

* fixed code style

* fixed win plugin

* fixed linux plugin

* fixed a part of tests

* fixed test fot linux

* fixed pooling_gpu_test fot linux

* fixed pooling_gpu_test fot linux

* fix after review and enable wd4267 in makefile

* fix after review

* errors of unit test are fixed
This commit is contained in:
Andrei Gorbachev 2023-03-16 05:29:16 +00:00 committed by GitHub
parent 05866f05ea
commit 2f3ae4518e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
91 changed files with 328 additions and 272 deletions

View File

@ -29,7 +29,7 @@ static MemBandwidthPressure MemBandwidthPressureTolerance(
const float memThresholdAssumeLimited = MemBandwidthPressure::LIMITED) {
int total_convs = 0, mem_limited_convs = 0, compute_convs = 0, total_gemms = 0, mem_limited_gemms = 0,
total_deconvs = 0, compute_deconvs = 0, mem_limited_deconvs = 0;
auto memLimitedFactor = [&](int size_data_moved, int datatype_size = 4) -> float {
auto memLimitedFactor = [&](size_t size_data_moved, int datatype_size = 4) -> float {
return (cache_size / (size_data_moved * datatype_size));
};
auto isLowPrecision = [&](ngraph::element::Type type) -> bool {
@ -57,7 +57,7 @@ static MemBandwidthPressure MemBandwidthPressureTolerance(
const bool isBF16orFP16 = isHalfPrecision(type1);
const int data_type_size = isINT8 ? 1 : isBF16orFP16 ? 2 : 4;
int dataSizeInput = 0, dataSizeOutput = 0;
size_t dataSizeInput = 0, dataSizeOutput = 0;
if (!std::strcmp("MatMul", node_name)) {
const auto input0 = node->input(0);
const auto input1 = node->input(1);
@ -103,7 +103,7 @@ static MemBandwidthPressure MemBandwidthPressureTolerance(
std::accumulate(shapeInput.begin(), shapeInput.end(), size_t(1), std::multiplies<size_t>());
dataSizeOutput =
std::accumulate(shapeOutput.begin(), shapeOutput.end(), size_t(1), std::multiplies<size_t>());
const auto factor = memLimitedFactor(dataSizeInput + dataSizeOutput, data_type_size);
const auto factor = memLimitedFactor(static_cast<int>(dataSizeInput + dataSizeOutput), data_type_size);
mem_limited_convs += factor < memThresholdAssumeLimited;
worst_case = std::min(factor, worst_case);
}
@ -124,7 +124,7 @@ static MemBandwidthPressure MemBandwidthPressureTolerance(
std::accumulate(shapeInput.begin(), shapeInput.end(), size_t(1), std::multiplies<size_t>());
dataSizeOutput =
std::accumulate(shapeOutput.begin(), shapeOutput.end(), size_t(1), std::multiplies<size_t>());
const auto factor = memLimitedFactor(dataSizeInput + dataSizeOutput, data_type_size);
const auto factor = memLimitedFactor(static_cast<int>(dataSizeInput + dataSizeOutput), data_type_size);
mem_limited_deconvs += factor < memThresholdAssumeLimited;
worst_case = std::min(factor, worst_case);
}

View File

@ -59,7 +59,7 @@ public:
cldnn::engine& get_engine() const { return m_context->get_engine(); }
const ExecutionConfig& get_config() const { return m_config; }
int GetMaxDynamicBatchSize() const { return m_config.get_property(ov::intel_gpu::max_dynamic_batch); }
size_t GetMaxDynamicBatchSize() const { return m_config.get_property(ov::intel_gpu::max_dynamic_batch);}
const std::map<std::string, cldnn::layout>& GetInputLayouts() const { return m_program->GetInputLayouts(); }
const InferenceEngine::InputsDataMap GetNetworkInputs() const { return m_program->GetNetworkInputs(); }
const InferenceEngine::OutputsDataMap GetNetworkOutputs() const { return m_program->GetNetworkOutputs(); }

View File

@ -103,7 +103,9 @@ struct arg_max_min : public primitive_base<arg_max_min> {
values_first == rhs_casted.values_first;
}
uint32_t get_output_nums() const { return (input_size() == 3 ? 2 : output_size()); }
size_t get_output_nums() const {
return (input_size() == 3 ? 2 : output_size());
}
bool has_second_output() const { return get_output_nums() == 2; }
bool use_multiple_outputs() const { return input_size() != 3; }

View File

@ -88,7 +88,7 @@ std::vector<layout> broadcast_inst::calc_output_layouts(broadcast_node const& /*
if (input1.is_static()) {
output_rank = input1.get_dim(0); // target shape rank is set as second input.
}
output_shapes[0] = ShapeType::dynamic(std::max(output_rank, static_cast<int>(1)));
output_shapes[0] = ShapeType::dynamic(std::max(output_rank, 1));
}
format output_format = format::adjust_to_rank(input0_layout.format, output_shapes[0].size());

View File

@ -42,13 +42,13 @@ layout convolution_inst::calc_output_layout(convolution_node const& node, kernel
output_type = data_types::f32;
}
uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1;
uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1;
uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1;
auto stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1;
auto stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1;
auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1;
uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1;
uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1;
uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1;
auto dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1;
auto dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1;
auto dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1;
// TODO: Consider moving general parameter verification to arguments constructor.
CLDNN_ERROR_LESS_OR_EQUAL_THAN(desc->id,
@ -249,13 +249,13 @@ std::vector<layout> convolution_inst::calc_output_layouts(convolution_node const
output_type = data_types::f32;
}
uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1;
uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1;
uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1;
auto stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1;
auto stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1;
auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1;
uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1;
uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1;
uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1;
auto dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1;
auto dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1;
auto dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1;
// TODO: Consider moving general parameter verification to arguments constructor.
CLDNN_ERROR_LESS_OR_EQUAL_THAN(desc->id,

View File

@ -80,14 +80,17 @@ layout deconvolution_inst::calc_output_layout(deconvolution_node const& node, ke
3,
"As for now, deconvolutions with more than 3 dimensions are not supported");
int32_t x = off_factor * pad[pad.size() - 1] + (input_layout.spatial(0) - 1) * strd[strd.size() - 1] + weights_layout.spatial(0);
int32_t x = static_cast<int32_t>(
off_factor * pad[pad.size() - 1] + (input_layout.spatial(0) - 1) * strd[strd.size() - 1] + weights_layout.spatial(0));
int32_t y = 1;
if (spatial_dims > 1) {
y = off_factor * pad[pad.size() - 2] + (input_layout.spatial(1) - 1) * strd[strd.size() - 2] + weights_layout.spatial(1);
y = static_cast<int32_t>(
off_factor * pad[pad.size() - 2] + (input_layout.spatial(1) - 1) * strd[strd.size() - 2] + weights_layout.spatial(1));
}
int32_t z = 1;
if (spatial_dims > 2) {
z = off_factor * pad[pad.size() - 3] + (input_layout.spatial(2) - 1) * strd[strd.size() - 3] + weights_layout.spatial(2);
z = static_cast<int32_t>(
off_factor * pad[pad.size() - 3] + (input_layout.spatial(2) - 1) * strd[strd.size() - 3] + weights_layout.spatial(2));
}
tensor output_size(input_layout.batch(),

View File

@ -107,7 +107,7 @@ layout fully_connected_inst::calc_output_layout(fully_connected_node const& node
auto reshape_to_2d = [](const ov::PartialShape& shape, int64_t feature) {
auto staticShape = shape.to_shape();
size_t total = std::accumulate(staticShape.begin(), staticShape.end(), 1, std::multiplies<size_t>());
size_t total = std::accumulate(staticShape.begin(), staticShape.end(), static_cast<size_t>(1), std::multiplies<size_t>());
std::vector<int64_t> reshapeSize = { static_cast<int64_t>(total) / feature, feature };
return reshapeSize;
};

View File

@ -17,7 +17,10 @@ layout gather_inst::calc_output_layout(gather_node const& node, kernel_impl_para
auto desc = impl_param.typed_desc<gather>();
auto input_layout = impl_param.get_input_layout();
std::vector<tensor::value_type> dims_converted(desc->output_shape.begin(), desc->output_shape.end());
std::vector<tensor::value_type> dims_converted;
for (auto dim : desc->output_shape) {
dims_converted.push_back(static_cast<tensor::value_type>(dim));
}
// extend shape to 4d
for (size_t i = dims_converted.size(); i < 4; i++)
dims_converted.push_back(1);

View File

@ -225,7 +225,7 @@ void pre_replace_deconv::run(program& p) {
p.rename(deconv_node, rename_id);
// reshape weights
int pixel_shuffle_size = scale_factor * scale_factor;
auto pixel_shuffle_size = static_cast<tensor::value_type>(scale_factor * scale_factor);
int kernel_size = 5;
tensor target_weights_size = { pixel_shuffle_size, filter_layout.feature(), kernel_size, kernel_size };
auto target_weights_layout = layout{ weights_layout.data_type, weights_layout.format, target_weights_size };
@ -252,7 +252,7 @@ void pre_replace_deconv::run(program& p) {
static_cast<int>(filter_layout.feature()),
static_cast<int>(filter_layout.spatial(0)),
static_cast<int>(filter_layout.spatial(1)),
scale_factor,
static_cast<int>(scale_factor),
subpixel_weights);
if (weights_data_type == data_types::f16) {

View File

@ -102,7 +102,7 @@ void prepare_padding::run(program& p) {
// WA for this format. sliding window needs to be fixed --perf degradation for IncepctionV1 type models
tensor size(1);
for (size_t i = 0; i < prim->size.size(); i++) {
size.spatial[i] = prim->size[prim->size.size() - i - 1];
size.spatial[i] = static_cast<tensor::value_type>(prim->size[prim->size.size() - i - 1]);
}
if (node->get_output_layout().format == format::b_fs_yx_fsv16)
@ -183,13 +183,13 @@ void prepare_padding::run(program& p) {
auto pad = conv->pad;
auto stride = conv->stride;
auto dilation = conv->dilation;
uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1;
uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1;
uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1;
uint32_t stride_z = stride.size() >= 3 ? static_cast<uint32_t>(stride[stride.size() - 3]) : 1;
uint32_t stride_y = stride.size() >= 2 ? static_cast<uint32_t>(stride[stride.size() - 2]) : 1;
uint32_t stride_x = stride.size() >= 1 ? static_cast<uint32_t>(stride[stride.size() - 1]) : 1;
uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1;
uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1;
uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1;
uint32_t dilation_z = dilation.size() >= 3 ? static_cast<uint32_t>(dilation[dilation.size() - 3]) : 1;
uint32_t dilation_y = dilation.size() >= 2 ? static_cast<uint32_t>(dilation[dilation.size() - 2]) : 1;
uint32_t dilation_x = dilation.size() >= 1 ? static_cast<uint32_t>(dilation[dilation.size() - 1]) : 1;
tensor::value_type pad_z = pad.size() >= 3 ? pad[pad.size() - 3] : 0;
tensor::value_type pad_y = pad.size() >= 2 ? pad[pad.size() - 2] : 0;
@ -277,9 +277,15 @@ void prepare_padding::run(program& p) {
auto padding_begin_x = std::max<tensor::value_type>(pad_x, 0);
auto padding_begin_y = std::max<tensor::value_type>(pad_y, 0);
auto padding_begin_z = std::max<tensor::value_type>(pad_z, 0);
auto padding_end_x = std::max<tensor::value_type>(input_limit_x - prev_prim_output_layout.spatial(0), 0);
auto padding_end_y = std::max<tensor::value_type>(input_limit_y - prev_prim_output_layout.spatial(1), 0);
auto padding_end_z = std::max<tensor::value_type>(input_limit_z - prev_prim_output_layout.spatial(2), 0);
auto padding_end_x = std::max<tensor::value_type>(
static_cast<tensor::value_type>(input_limit_x) - prev_prim_output_layout.spatial(0),
0);
auto padding_end_y = std::max<tensor::value_type>(
static_cast<tensor::value_type>(input_limit_y) - prev_prim_output_layout.spatial(1),
0);
auto padding_end_z = std::max<tensor::value_type>(
static_cast<tensor::value_type>(input_limit_z) - prev_prim_output_layout.spatial(2),
0);
cldnn::padding needed_padding({0, 0, padding_begin_x, padding_begin_y, padding_begin_z}, {0, 0, padding_end_x, padding_end_y, padding_end_z}, 0);
needed_padding = padding::max(prev_prim_output_layout.data_padding, needed_padding);

View File

@ -275,7 +275,7 @@ void prepare_primitive_fusing::fuse_bias(program &p) {
for (size_t i = 0; i < const_shape.size(); ++i) {
if (const_shape[i] != 1) {
count_elements_not_one++;
idx_element_not_one = i;
idx_element_not_one = static_cast<int32_t>(i);
}
if (count_elements_not_one > 1)
break;

View File

@ -66,7 +66,7 @@ public:
const auto& mode = primitive->mode;
const auto& sort_type = primitive->sort;
const auto& values_first = primitive->values_first;
const auto& outputs_num = (primitive->input_size() == 3 ? 2 : primitive->output_size());
const auto& outputs_num = primitive->input_size() == 3 ? 2 : primitive->output_size();
auto argm_params = get_default_params<kernel_selector::arg_max_min_params>(impl_param);
auto argm_optional_params =

View File

@ -59,14 +59,14 @@ public:
uint32_t pad_x = std::max<std::ptrdiff_t>(pad.size() >= 1 ? pad[pad.size() - 1] : 0, 0);
params.padding = {pad_x, pad_y, pad_z};
uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1;
uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1;
uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1;
uint32_t stride_z = stride.size() >= 3 ? static_cast<uint32_t>(stride[stride.size() - 3]) : 1;
uint32_t stride_y = stride.size() >= 2 ? static_cast<uint32_t>(stride[stride.size() - 2]) : 1;
uint32_t stride_x = stride.size() >= 1 ? static_cast<uint32_t>(stride[stride.size() - 1]) : 1;
params.stride = {stride_x, stride_y, stride_z};
uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1;
uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1;
uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1;
uint32_t dilation_z = dilation.size() >= 3 ? static_cast<uint32_t>(dilation[dilation.size() - 3]) : 1;
uint32_t dilation_y = dilation.size() >= 2 ? static_cast<uint32_t>(dilation[dilation.size() - 2]) : 1;
uint32_t dilation_x = dilation.size() >= 1 ? static_cast<uint32_t>(dilation[dilation.size() - 1]) : 1;
params.dilation = {dilation_x, dilation_y, dilation_z};
return {params, optional_params};

View File

@ -13,8 +13,8 @@ namespace ocl {
namespace {
kernel_selector::concat_axis convert_axis(int64_t axis, size_t rank) {
unsigned cldnn_axis = axis >= 0 ? axis : axis + static_cast<int64_t>(rank);
if (cldnn_axis >= rank)
auto cldnn_axis = axis >= 0 ? axis : axis + static_cast<int64_t>(rank);
if (cldnn_axis >= static_cast<int64_t>(rank))
IE_THROW() << "Concatenation axis exceeds number of dimensions";
// Difference in dimension ordering between IE and GPU plugin,

View File

@ -78,14 +78,14 @@ public:
uint32_t pad_x = std::max<std::ptrdiff_t>(pad.size() >= 1 ? pad[pad.size() - 1] : 0, 0);
conv_params.padding = {pad_x, pad_y, pad_z};
uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1;
uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1;
uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1;
uint32_t stride_z = stride.size() >= 3 ? static_cast<uint32_t>(stride[stride.size() - 3]) : 1;
uint32_t stride_y = stride.size() >= 2 ? static_cast<uint32_t>(stride[stride.size() - 2]) : 1;
uint32_t stride_x = stride.size() >= 1 ? static_cast<uint32_t>(stride[stride.size() - 1]) : 1;
conv_params.stride = {stride_x, stride_y, stride_z};
uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1;
uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1;
uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1;
uint32_t dilation_z = dilation.size() >= 3 ? static_cast<uint32_t>(dilation[dilation.size() - 3]) : 1;
uint32_t dilation_y = dilation.size() >= 2 ? static_cast<uint32_t>(dilation[dilation.size() - 2]) : 1;
uint32_t dilation_x = dilation.size() >= 1 ? static_cast<uint32_t>(dilation[dilation.size() - 1]) : 1;
conv_params.dilation = {dilation_x, dilation_y, dilation_z};
if ((impl_param.input_layouts[0].data_type == data_types::u8 ||

View File

@ -45,7 +45,7 @@ public:
auto runtime_offset = convert_data_tensor(impl_param.get_input_layout(), impl_param.input_offsets[0]).GetFirstElementOffset();
kernel_selector::ScalarDescriptor s;
s.t = kernel_selector::ScalarDescriptor::Types::UINT32;
s.v.u32 = runtime_offset;
s.v.u32 = static_cast<uint32_t>(runtime_offset);
OPENVINO_ASSERT(_kernel_data.kernels[0].params.scalars.size() == 1,
"[GPU] Scalar field for runtime offset is not added for crop shape agnostic impl");
_kernel_data.kernels[0].params.scalars[0] = s;

View File

@ -60,14 +60,14 @@ public:
uint32_t pad_x = std::max<std::ptrdiff_t>(pad.size() >= 1 ? pad[pad.size() - 1] : 0, 0);
params.padding = {pad_x, pad_y, pad_z};
uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1;
uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1;
uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1;
uint32_t stride_z = stride.size() >= 3 ? static_cast<uint32_t>(stride[stride.size() - 3]) : 1;
uint32_t stride_y = stride.size() >= 2 ? static_cast<uint32_t>(stride[stride.size() - 2]) : 1;
uint32_t stride_x = stride.size() >= 1 ? static_cast<uint32_t>(stride[stride.size() - 1]) : 1;
params.stride = {stride_x, stride_y, stride_z};
uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1;
uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1;
uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1;
uint32_t dilation_z = dilation.size() >= 3 ? static_cast<uint32_t>(dilation[dilation.size() - 3]) : 1;
uint32_t dilation_y = dilation.size() >= 2 ? static_cast<uint32_t>(dilation[dilation.size() - 2]) : 1;
uint32_t dilation_x = dilation.size() >= 1 ? static_cast<uint32_t>(dilation[dilation.size() - 1]) : 1;
params.dilation = {dilation_x, dilation_y, dilation_z};
return {params, optional_params};

View File

@ -102,14 +102,14 @@ public:
params.padding = {pad_x, pad_y, pad_z};
uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1;
uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1;
uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1;
uint32_t stride_z = stride.size() >= 3 ? static_cast<uint32_t>(stride[stride.size() - 3]) : 1;
uint32_t stride_y = stride.size() >= 2 ? static_cast<uint32_t>(stride[stride.size() - 2]) : 1;
uint32_t stride_x = stride.size() >= 1 ? static_cast<uint32_t>(stride[stride.size() - 1]) : 1;
params.stride = {stride_x, stride_y, stride_z};
uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1;
uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1;
uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1;
uint32_t dilation_z = dilation.size() >= 3 ? static_cast<uint32_t>(dilation[dilation.size() - 3]) : 1;
uint32_t dilation_y = dilation.size() >= 2 ? static_cast<uint32_t>(dilation[dilation.size() - 2]) : 1;
uint32_t dilation_x = dilation.size() >= 1 ? static_cast<uint32_t>(dilation[dilation.size() - 1]) : 1;
params.dilation = {dilation_x, dilation_y, dilation_z};
params.kernelSize = { (uint32_t)kernel_size.spatial[0],

View File

@ -41,7 +41,8 @@ public:
auto reshape_to_2d = [](const ov::PartialShape& shape, const ov::Dimension& feature) {
if (shape.is_static()) {
auto static_shape = shape.to_shape();
size_t total = std::accumulate(static_shape.begin(), static_shape.end(), 1, std::multiplies<size_t>());
size_t total =
std::accumulate(static_shape.begin(), static_shape.end(), size_t(1), std::multiplies<size_t>());
auto dim = feature.is_static() ? feature.get_length() : static_cast<int64_t>(static_shape.back());
return ov::PartialShape{ static_cast<int64_t>(total) / dim, dim };
} else {

View File

@ -55,7 +55,7 @@ struct gather_nonzero_impl : typed_primitive_impl_ocl<gather_nonzero> {
auto optional_params = get_default_optional_params<kernel_selector::gather_nonzero_optional_params>(impl_param.get_program());
params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(1)));
params.ov_input_rank = impl_param.get_input_layout().get_partial_shape().size();
params.ov_input_rank = static_cast<uint32_t>(impl_param.get_input_layout().get_partial_shape().size());
return {params, optional_params};
}

View File

@ -24,7 +24,7 @@ inline std::vector<uint16_t> convert_permute_order(const std::vector<uint16_t>&
// 1. Switch permute order values for spatial dims
for (auto const& o : ie_order_aligned) {
if (o >= 2)
cldnn_order.push_back(1 + ie_order_aligned.size() - o);
cldnn_order.push_back(1 + static_cast<uint16_t>(ie_order_aligned.size()) - o);
else
cldnn_order.push_back(o);
}

View File

@ -138,9 +138,9 @@ public:
else
pp.divMode = cldnn_2_kernel_divider_mode(primitive->mode);
uint32_t kernel_z = kernel.size() >= 3 ? kernel[kernel.size() - 3] : 1;
uint32_t kernel_y = kernel.size() >= 2 ? kernel[kernel.size() - 2] : 1;
uint32_t kernel_x = kernel.size() >= 1 ? kernel[kernel.size() - 1] : 1;
uint32_t kernel_z = kernel.size() >= 3 ? static_cast<uint32_t>(kernel[kernel.size() - 3]) : 1;
uint32_t kernel_y = kernel.size() >= 2 ? static_cast<uint32_t>(kernel[kernel.size() - 2]) : 1;
uint32_t kernel_x = kernel.size() >= 1 ? static_cast<uint32_t>(kernel[kernel.size() - 1]) : 1;
pp.poolSize = {kernel_x, kernel_y, kernel_z};
uint32_t pad_z = std::max<std::ptrdiff_t>(pads_begin.size() >= 3 ? pads_begin[pads_begin.size() - 3] : 0, 0);
@ -148,14 +148,14 @@ public:
uint32_t pad_x = std::max<std::ptrdiff_t>(pads_begin.size() >= 1 ? pads_begin[pads_begin.size() - 1] : 0, 0);
pp.poolPad = {pad_x, pad_y, pad_z};
uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1;
uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1;
uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1;
uint32_t stride_z = stride.size() >= 3 ? static_cast<uint32_t>(stride[stride.size() - 3]) : 1;
uint32_t stride_y = stride.size() >= 2 ? static_cast<uint32_t>(stride[stride.size() - 2]) : 1;
uint32_t stride_x = stride.size() >= 1 ? static_cast<uint32_t>(stride[stride.size() - 1]) : 1;
pp.poolStride = {stride_x, stride_y, stride_z};
uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1;
uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1;
uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1;
uint32_t dilation_z = dilation.size() >= 3 ? static_cast<uint32_t>(dilation[dilation.size() - 3]) : 1;
uint32_t dilation_y = dilation.size() >= 2 ? static_cast<uint32_t>(dilation[dilation.size() - 2]) : 1;
uint32_t dilation_x = dilation.size() >= 1 ? static_cast<uint32_t>(dilation[dilation.size() - 1]) : 1;
pp.poolDilation = {dilation_x, dilation_y, dilation_z};
return {params, optional_params};

View File

@ -71,7 +71,7 @@ struct prior_box_impl : typed_primitive_impl_ocl<prior_box> {
params.widths = primitive->widths;
params.heights = primitive->heights;
const auto output_shape = impl_param.get_output_layout().get_shape();
params.num_priors_4 = output_shape[1] / (params.width * params.height);
params.num_priors_4 = static_cast<uint32_t>(output_shape[1] / (params.width * params.height));
params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(1)));
return {params, {}};

View File

@ -22,7 +22,7 @@ static std::vector<uint16_t> convert_axes(std::vector<int64_t> axes, size_t rank
if (axis < 0)
axis = axis + rank;
converted_axes.push_back(rank + 1 - axis);
converted_axes.push_back(static_cast<uint16_t>(rank + 1 - axis));
}
return converted_axes;

View File

@ -79,7 +79,9 @@ inline std::vector<int32_t> convert_pads(const std::vector<size_t>& pad, size_t
if (pad.empty()) {
new_pad = std::vector<int32_t>(rank, 0);
} else {
new_pad = std::vector<int32_t>(pad.begin(), pad.end());
for (auto p : pad) {
new_pad.push_back(static_cast<int32_t>(p));
}
if (new_pad.size() > 2)
std::reverse(new_pad.begin() + 2, new_pad.end());
for (size_t i = new_pad.size(); i < rank || i < 4; ++i)

View File

@ -48,7 +48,11 @@ std::vector<std::int32_t> extractIntegerData(const data_node& node, const stream
std::vector<std::int32_t> extractShape(kernel_selector::Tensor::DataTensor& tensor) {
auto logical_dims = tensor.LogicalDims();
// LogicalDims method returns dims in reversed order
return {logical_dims.rbegin(), logical_dims.rend()};
std::vector<int32_t> reverse_logical_dims;
for (auto it = logical_dims.rbegin(); it != logical_dims.rend(); ++it) {
reverse_logical_dims.push_back(static_cast<int32_t>(*it));
}
return reverse_logical_dims;
}
} // namespace

View File

@ -34,8 +34,8 @@ protected:
int input_idx = DNNL_ARG_MULTIPLE_SRC;
for (size_t i = 0; i < instance.inputs_memory_count(); i++) {
auto& input = instance.input_memory(i);
auto offset = onednn::get_f_offset(instance.get_input_layout(), _pd.dnnl::primitive_desc_base::src_desc(i));
args.insert({input_idx++, input.get_onednn_memory(_pd.dnnl::primitive_desc_base::src_desc(i), offset)});
auto offset = onednn::get_f_offset(instance.get_input_layout(), _pd.dnnl::primitive_desc_base::src_desc(static_cast<uint8_t>(i)));
args.insert({input_idx++, input.get_onednn_memory(_pd.dnnl::primitive_desc_base::src_desc(static_cast<uint8_t>(i)), offset)});
}
{

View File

@ -24,7 +24,8 @@ struct fully_connected_onednn : typed_primitive_onednn_impl<fully_connected> {
private:
static std::vector<int64_t> reshape_to_2d(const ov::PartialShape& shape, int64_t feature) {
auto staticShape = shape.to_shape();
size_t total = std::accumulate(staticShape.begin(), staticShape.end(), 1, std::multiplies<size_t>());
size_t total =
std::accumulate(staticShape.begin(), staticShape.end(), static_cast<size_t>(1), std::multiplies<size_t>());
std::vector<int64_t> reshapeSize = { static_cast<int64_t>(total) / feature, feature };
return reshapeSize;
}

View File

@ -104,9 +104,9 @@ inline tensor calc_sliding_window_output_range<swor_mode::all>(const tensor& inp
auto stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1;
auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1;
tensor::value_type dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1;
tensor::value_type dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1;
tensor::value_type dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1;
tensor::value_type dilation_z = dilation.size() >= 3 ? static_cast<int32_t>(dilation[dilation.size() - 3]) : 1;
tensor::value_type dilation_y = dilation.size() >= 2 ? static_cast<int32_t>(dilation[dilation.size() - 2]) : 1;
tensor::value_type dilation_x = dilation.size() >= 1 ? static_cast<int32_t>(dilation[dilation.size() - 1]) : 1;
auto pad_z = pad.size() >= 3 ? pad[pad.size() - 3] : 0;
auto pad_y = pad.size() >= 2 ? pad[pad.size() - 2] : 0;
@ -161,9 +161,9 @@ inline tensor calc_sliding_window_output_range<swor_mode::exceed_once>(const ten
int64_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1;
int64_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1;
tensor::value_type dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1;
tensor::value_type dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1;
tensor::value_type dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1;
tensor::value_type dilation_z = dilation.size() >= 3 ? static_cast<int32_t>(dilation[dilation.size() - 3]) : 1;
tensor::value_type dilation_y = dilation.size() >= 2 ? static_cast<int32_t>(dilation[dilation.size() - 2]) : 1;
tensor::value_type dilation_x = dilation.size() >= 1 ? static_cast<int32_t>(dilation[dilation.size() - 1]) : 1;
int64_t pad_z = pad.size() >= 3 ? pad[pad.size() - 3] : 0;
int64_t pad_y = pad.size() >= 2 ? pad[pad.size() - 2] : 0;
@ -347,9 +347,9 @@ inline tensor calc_sliding_window_needed_input_range(const tensor& output_size,
auto stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1;
auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1;
tensor::value_type dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1;
tensor::value_type dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1;
tensor::value_type dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1;
tensor::value_type dilation_z = dilation.size() >= 3 ? static_cast<int32_t>(dilation[dilation.size() - 3]) : 1;
tensor::value_type dilation_y = dilation.size() >= 2 ? static_cast<int32_t>(dilation[dilation.size() - 2]) : 1;
tensor::value_type dilation_x = dilation.size() >= 1 ? static_cast<int32_t>(dilation[dilation.size() - 1]) : 1;
auto pad_z = pad.size() >= 3 ? pad[pad.size() - 3] : 0;
auto pad_y = pad.size() >= 2 ? pad[pad.size() - 2] : 0;

View File

@ -826,11 +826,11 @@ static bool is_node_for_onednn(deconvolution_node const& node) {
static bool is_node_for_onednn(fully_connected_node const& node) {
auto fc_prim = node.get_primitive();
auto ps = node.get_output_layout().get_partial_shape();
int non_spatial_count = 2 + (fc_prim->input_size == 3 ? 1 : 0);
int rank = ps.size();
size_t non_spatial_count = 2 + (fc_prim->input_size == 3 ? 1 : 0);
size_t rank = ps.size();
// OneDnn doesn't support spatial dimensions for output
for (int i = non_spatial_count; i < rank; i++) {
for (auto i = non_spatial_count; i < rank; i++) {
if (ps[i].is_dynamic() || ps[i] != 1) {
return false;
}

View File

@ -540,8 +540,7 @@ void network::save(cldnn::BinaryOutputBuffer& ob) {
}
}
int exec_order_size;
exec_order_size = _exec_order.size();
int exec_order_size = _exec_order.size();
ob << exec_order_size;
for (const auto& p_inst : _exec_order) {

View File

@ -47,13 +47,13 @@ layout pooling_inst::calc_output_layout(parent::typed_node const& node, kernel_i
}
}
uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1;
uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1;
uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1;
auto stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1;
auto stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1;
auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1;
uint32_t kernel_z = window_size.size() >= 3 ? window_size[window_size.size() - 3] : 1;
uint32_t kernel_y = window_size.size() >= 2 ? window_size[window_size.size() - 2] : 1;
uint32_t kernel_x = window_size.size() >= 1 ? window_size[window_size.size() - 1] : 1;
auto kernel_z = window_size.size() >= 3 ? window_size[window_size.size() - 3] : 1;
auto kernel_y = window_size.size() >= 2 ? window_size[window_size.size() - 2] : 1;
auto kernel_x = window_size.size() >= 1 ? window_size[window_size.size() - 1] : 1;
// TODO: Consider moving general parameter verification to arguments constructor.
CLDNN_ERROR_LESS_OR_EQUAL_THAN(desc->id,
@ -127,7 +127,7 @@ layout pooling_inst::calc_output_layout(parent::typed_node const& node, kernel_i
// TODO: Check compatibility of output size calculation (with caffe).
tensor size(1);
for (size_t i = 0; i < window_size.size(); i++) {
size.spatial[i] = window_size[window_size.size() - i - 1];
size.spatial[i] = static_cast<tensor::value_type>(window_size[window_size.size() - i - 1]);
}
auto output_range = calc_sliding_window_output_range<swor_mode::exceed_once_data>(input_layout.get_tensor(),
size,

View File

@ -300,7 +300,7 @@ bool primitive_inst::update_impl() {
size_t offset = 0;
for (size_t i = 0; i < _node->get_dependencies().size(); i++) {
if (_node->get_dependency(i).get_output_layout().is_dynamic()) {
auto input_shape = _node->type()->extend_input_shape_to_6d(params, i);
auto input_shape = _node->type()->extend_input_shape_to_6d(params, static_cast<uint32_t>(i));
for (size_t j = 0; j < input_shape.size(); j++)
lock[offset++] = static_cast<int32_t>(input_shape[j]);
}
@ -308,7 +308,7 @@ bool primitive_inst::update_impl() {
for (size_t i = 0; i < _node->get_output_layouts().size(); i++) {
if (_node->get_output_layout(i).is_dynamic()) {
auto output_shape = _node->type()->extend_output_shape_to_6d(params, i);
auto output_shape = _node->type()->extend_output_shape_to_6d(params, static_cast<uint32_t>(i));
for (size_t j = 0; j < output_shape.size(); j++)
lock[offset++] = static_cast<int32_t>(output_shape[j]);
}

View File

@ -364,7 +364,7 @@ bool program::analyze_output_size_handling_need() {
tensor size(1);
for (size_t i = 0; i < prim->size.size(); i++) {
size.spatial[i] = prim->size[prim->size.size() - i - 1];
size.spatial[i] = static_cast<tensor::value_type>(prim->size[prim->size.size() - i - 1]);
}
// TODO: Check compatibility of output size calculation (with caffe).
auto primInputSize = prim_node.input().get_output_layout().get_tensor();

View File

@ -927,7 +927,7 @@ void program_node::init_onednn_primitive_attributes() {
if (fused_desc->activation_function == cldnn::activation_func::relu_negative_slope
&& !fused_desc->additional_params_input.empty()) {
auto dep_idx = cldnn_post_ops[idx].dep_start_idx;
int oc_dim = desc.output_layout.get_tensor().feature.size();
int oc_dim = static_cast<int>(desc.output_layout.get_tensor().feature.size());
post_ops.append_prelu(1 << oc_dim);
update_onednn_post_op_list(onednn_post_op_type::binary_relu, dep_idx);
} else if (fused_desc->activation_function == cldnn::activation_func::hard_sigmoid) {
@ -936,7 +936,7 @@ void program_node::init_onednn_primitive_attributes() {
} else if (fused_desc->activation_function == cldnn::activation_func::hsigmoid) {
// hard_sigmoid(x,a,b) = clamp(ax+b, 0, 1)
// hsigmoid(x) = clamp(val+3, 0, 6) / 6 = clamp(val/6+0.5, 0, 1) = hard_sigmoid(val, 1/6, 1/2)
post_ops.append_eltwise(dnnl::algorithm::eltwise_hardsigmoid, 1./6, 1./2);
post_ops.append_eltwise(dnnl::algorithm::eltwise_hardsigmoid, 1.f/6, 1.f/2);
update_onednn_post_op_list(onednn_post_op_type::eltwise_hardsigmoid, empty_mem);
} else if (fused_desc->activation_function == cldnn::activation_func::negative) {
post_ops.append_eltwise(dnnl::algorithm::eltwise_linear, -1, 0);

View File

@ -25,7 +25,7 @@ static std::vector<uint16_t> convert_axes(std::vector<int64_t> axes, size_t rank
if (axis < 0)
axis = axis + rank;
converted_axes.push_back(rank + 1 - axis);
converted_axes.push_back(static_cast<uint16_t>(rank + 1 - axis));
}
return converted_axes;

View File

@ -18,7 +18,10 @@ layout strided_slice_inst::calc_output_layout(strided_slice_node const& node, ke
auto input_layout = impl_param.get_input_layout();
auto output_format = format::get_default_format(desc->out_size.size());
auto out_shape = desc->out_size;
std::vector<tensor::value_type> dims_converted(out_shape.begin(), out_shape.end());
std::vector<tensor::value_type> dims_converted;
for (auto dim : out_shape) {
dims_converted.push_back(static_cast<tensor::value_type>(dim));
}
// extend shape to 4d
for (size_t i = dims_converted.size(); i < 4; i++) {
dims_converted.push_back(1);

View File

@ -89,7 +89,10 @@ KernelsData kernel_selector_base::GetNaiveBestKernel(const KernelList& all_impls
if (!params.is_shape_agnostic) {
for (size_t k = 0; k < kds[0].kernels.size(); ++k) {
auto gws = kds[0].kernels[k].params.workGroups.global;
kernelsData[0].kernels[k].skip_execution = (std::accumulate(gws.begin(), gws.end(), 1, std::multiplies<size_t>()) == 0);
kernelsData[0].kernels[k].skip_execution = (std::accumulate(gws.begin(),
gws.end(),
static_cast<size_t>(1),
std::multiplies<size_t>()) == 0);
}
}
break;

View File

@ -522,13 +522,13 @@ struct FusedOpsConfiguration {
FusedOpsConfiguration& SetShuffleVarName(std::string val) { shuffle_var_name = val; return *this; }
bool IsPostReorderFused(void) const { return orig_output_layout != DataLayout::DataLayoutCount; }
int GetDimIndexFromOrder(Tensor::DataChannelName val) const {
int dims_num = bfzyx_idx_order.size();
size_t dims_num = bfzyx_idx_order.size();
if (val == Tensor::DataChannelName::BATCH && dims_num >= 1) {
return 0;
} else if (val == Tensor::DataChannelName::FEATURE && dims_num >= 2) {
return 1;
} else if (dims_num >= 3 && dims_num - static_cast<int>(val) - 1 >= 0) {
return bfzyx_idx_order.size() - static_cast<int>(val) - 1;
return static_cast<int>(bfzyx_idx_order.size()) - static_cast<int>(val) - 1;
} else {
return -1;
}

View File

@ -240,7 +240,7 @@ std::vector<size_t> GetOptimalLocalWorkGroupSizes(std::vector<size_t> gws, const
if (axis_by_gws[layout_order[axis_idx]] != unused_axis) {
bool is_already_exists = false;
if (axis_idx > 0) {
for (int i = axis_idx - 1; i >= 0; i--) {
for (int i = static_cast<int>(axis_idx) - 1; i >= 0; i--) {
if (axis_by_gws[layout_order[axis_idx]] == axis_by_gws[layout_order[i]]) {
is_already_exists = true;
break;

View File

@ -15,7 +15,7 @@ inline std::string GetInputTypeStr(uint32_t idx) {
JitConstants BorderKernelBase::GetJitConstants(const border_params& params) const {
JitConstants jit = MakeBaseParamsJitConstants(params);
size_t input_offset = 1;
uint32_t input_offset = 1;
if (params.begin_type == base_params::ArgType::Input) {
jit.AddConstant(MakeJitConstant("BEGIN_TYPE", GetInputTypeStr(input_offset)));
input_offset += 1;

View File

@ -88,7 +88,7 @@ KernelsData ConvertColorKernelBase::GetCommonKernelsData(const Params& params, c
auto jit = CreateJit(kernelName, cldnn_jit, entry_point);
auto& kernel = kd.kernels[0];
size_t number_of_inputs = prim_params.inputs.size();
uint32_t number_of_inputs = static_cast<uint32_t>(prim_params.inputs.size());
FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point,
"", false, false, number_of_inputs);

View File

@ -44,7 +44,7 @@ KernelsData CTCLossKernelRef::GetKernelsData(const Params& params, const optiona
{},
false,
false,
kernel_params.inputs.size());
static_cast<int>(kernel_params.inputs.size()));
return {kernel_data};
}

View File

@ -229,7 +229,7 @@ KernelsData DetectionOutputKernelRef::GetKernelsData(const Params& params, const
kd.internalBufferDataType = GetUnitType(detectOutParams);
for (size_t i = 0; i < kKernelsNum; i++) {
DispatchData dispatchData = SetDefault(detectOutParams, i);
DispatchData dispatchData = SetDefault(detectOutParams, static_cast<int>(i));
auto cldnnJit = GetJitConstants(detectOutParams);
auto entryPoint = GetEntryPoint(kernelName, detectOutParams.layerID, params, options, i);
cldnnJit.AddConstant(MakeJitConstant("BUFFER_STRIDE", buffer_stride));
@ -256,7 +256,7 @@ KernelsData DetectionOutputKernelRef::GetKernelsData(const Params& params, const
cldnnJit.AddConstant(MakeJitConstant("USE_LOCAL_MEMORY_FOR_STACK", true));
cldnnJit.AddConstants({MakeJitConstant("DO_STAGE_" + std::to_string(i) + "_MXNET", "true"),
MakeJitConstant("LOCAL_WORK_NUM", dispatchData.lws[2]),
MakeJitConstant("PARTITION_STEP", GetPartitionStep(dispatchData.lws[2]))});
MakeJitConstant("PARTITION_STEP", GetPartitionStep(static_cast<int>(dispatchData.lws[2])))});
} else {
// Limit local memory usage for two buffers: __range [LWS1 * LWS2 * 2 * 4 (int size) bytes]
// stack [LWS1 * LWS2 * 100 (stack_size) * 4 (int size) bytes]
@ -267,7 +267,7 @@ KernelsData DetectionOutputKernelRef::GetKernelsData(const Params& params, const
cldnnJit.AddConstants({MakeJitConstant("DO_STAGE_" + std::to_string(i) + "_CAFFE", "true"),
MakeJitConstant("LOCAL_CLASS_NUM", dispatchData.lws[1]),
MakeJitConstant("LOCAL_WORK_NUM", dispatchData.lws[2]),
MakeJitConstant("PARTITION_STEP", GetPartitionStep(dispatchData.lws[2]))});
MakeJitConstant("PARTITION_STEP", GetPartitionStep(static_cast<int>(dispatchData.lws[2])))});
}
} else if (i == 2) {
if (detectOutParams.detectOutParams.decrease_label_id) {

View File

@ -111,7 +111,7 @@ KernelsData ExperimentalDetectronROIFeatureExtractorRef::GetKernelsData(const Pa
auto jit = CreateJit(kernelName, cldnn_jit, entry_point);
auto& kernel = kd.kernels[0];
FillCLKernelData(kernel, dispatch_data, params.engineInfo, kernelName, jit, entry_point, "", false, false, org_params.number_of_inputs);
FillCLKernelData(kernel, dispatch_data, params.engineInfo, kernelName, jit, entry_point, "", false, false, static_cast<int>(org_params.number_of_inputs));
return {kd};
}

View File

@ -180,7 +180,7 @@ CommonDispatchData GatherKernelRef::SetDefault(const gather_params& params) cons
auto out_layout = params.outputs[0].GetLayout();
std::vector<std::vector<Tensor::DataChannelName>> dims_by_gws;
int rank = params.outputs[0].Dimentions();
auto rank = params.outputs[0].Dimentions();
if (rank == 4) {
dispatchData.gws = {output.X().v, output.Y().v, output.Feature().v * output.Batch().v};
dims_by_gws = {{Tensor::DataChannelName::X},

View File

@ -49,8 +49,8 @@ MatrixNmsKernelRef::DispatchData SetDefault(const matrix_nms_params& params, siz
}
std::tuple<int, int> GetMaxBoxes(const matrix_nms_params& params) {
const int classes_num = params.inputs[1].Feature().v;
const int boxes_num = params.inputs[0].Feature().v;
const int classes_num = static_cast<const int>(params.inputs[1].Feature().v);
const int boxes_num = static_cast<const int>(params.inputs[0].Feature().v);
int max_boxes_per_class{boxes_num};
if (params.nms_top_k >= 0)
@ -79,8 +79,8 @@ KernelsData MatrixNmsKernelRef::GetKernelsData(const Params& params, const optio
constexpr size_t BOX_INFO_SIZE{16};
const int batches_num = new_params.inputs[1].Batch().v;
const int classes_num = new_params.inputs[1].Feature().v;
const int batches_num = static_cast<const int>(new_params.inputs[1].Batch().v);
const int classes_num = static_cast<const int>(new_params.inputs[1].Feature().v);
int max_boxes_per_class, max_boxes_per_batch;
std::tie(max_boxes_per_class, max_boxes_per_batch) = GetMaxBoxes(new_params);

View File

@ -87,7 +87,7 @@ JitConstants MulticlassNmsKernelRef::GetJitConstants(const multiclass_nms_params
int64_t max_output_boxes_per_class = 0;
if (params.nms_top_k >= 0) {
max_output_boxes_per_class = std::min<int>(num_boxes, params.nms_top_k);
max_output_boxes_per_class = std::min<int>(static_cast<int>(num_boxes), params.nms_top_k);
} else {
max_output_boxes_per_class = num_boxes;
}

View File

@ -90,7 +90,7 @@ static size_t get_vec_size(const resample_params &params) {
}
static int get_feature_slice_size(const resample_params &params) {
return 16 * get_vec_size(params);
return static_cast<int>(16 * get_vec_size(params));
}
ResampleKernelBase::DispatchData ResampleKernelOpt::SetDefault(const kernel_selector::resample_params &arg) const {

View File

@ -130,7 +130,7 @@ static std::string GetInputBlockND(const scatter_nd_update_params& params, size_
block_nd_s[rank] = "1";
size_t input_offset = num * 6;
for (int32_t idx = (rank - 1); idx >= 0; --idx) {
for (int32_t idx = rank - 1; idx >= 0; --idx) {
block_nd[idx] = input_dims[idx] * block_nd[idx + 1];
size_t dim_offset = idx < 2 ? idx : idx + 6 - rank;

View File

@ -592,7 +592,7 @@ void InferRequest::setup_stream_graph() {
auto& streamGraphs = static_cast<CompiledModel*>(_exeNetwork.get())->m_graphs;
if (nullptr != streamExecutor) {
streamID = streamExecutor->GetStreamId();
int numGraphs = streamGraphs.size();
auto numGraphs = streamGraphs.size();
streamID = streamID % numGraphs;
}
m_graph = streamGraphs[streamID];

View File

@ -127,7 +127,7 @@ void checkInputBlob(const Blob::Ptr &blob,
checkAlloc(nv12_ptr->uv(), str_input_not_allocated);
} else if (auto batched_ptr = blob->as<BatchedBlob>()) {
for (size_t i = 0; i < batched_ptr->size(); i++) {
auto nv12_ptr = getNV12BlobOrException(batched_ptr, i);
auto nv12_ptr = getNV12BlobOrException(batched_ptr, static_cast<int>(i));
checkAlloc(nv12_ptr->y(), str_input_not_allocated);
checkAlloc(nv12_ptr->uv(), str_input_not_allocated);
}
@ -289,7 +289,7 @@ void InferRequestLegacy::SetBlob(const std::string& name, const Blob::Ptr& data)
auto batched_ptr = data->as<BatchedBlob>();
bool is_batched = batched_ptr != nullptr;
bool is_nv12 = nv12_ptr != nullptr;
int expected_batch = is_batched ? desc.getDims()[0] : 1;
auto expected_batch = is_batched ? static_cast<int>(desc.getDims()[0]) : 1;
if (ColorFormat::NV12 == foundInput->getPreProcess().getColorFormat() &&
m_graph->get_config().get_property(ov::intel_gpu::nv12_two_inputs)) {
// try extracting Y and UV remote blobs from it
@ -297,12 +297,12 @@ void InferRequestLegacy::SetBlob(const std::string& name, const Blob::Ptr& data)
// that should then go into biplanar NV12 reorder
if (is_nv12 || is_batched) {
int num_blobs = is_batched ? batched_ptr->size() : 1;
auto num_blobs = is_batched ? static_cast<int>(batched_ptr->size()) : 1;
for (auto i = 0; i < expected_batch; i++) {
std::string y_name = name + "_Y" + std::to_string(i);
std::string uv_name = name + "_UV" + std::to_string(i);
if (is_batched) {
int idx = i < num_blobs ? i : num_blobs-1;
int idx = i < num_blobs ? i : static_cast<int>(num_blobs)-1;
nv12_ptr = getNV12BlobOrException(batched_ptr, idx);
}
@ -352,7 +352,7 @@ void InferRequestLegacy::SetBlob(const std::string& name, const Blob::Ptr& data)
if (m_graph->GetMaxDynamicBatchSize() > 1) {
const auto batch_idx = m_graph->GetInputDynBatchDims()[name].first;
if (batch_idx >= 0)
SetBatch(blobDesc.getDims()[batch_idx]);
SetBatch(static_cast<int>(blobDesc.getDims()[batch_idx]));
}
} else {
size_t blobSize = desc.getLayout() != SCALAR
@ -531,7 +531,7 @@ void InferRequestLegacy::SetGraph(std::shared_ptr<Graph> graph) {
}
if (m_graph->GetMaxDynamicBatchSize() > 1) {
SetBatch(m_graph->GetMaxDynamicBatchSize());
SetBatch(static_cast<int>(m_graph->GetMaxDynamicBatchSize()));
allocate_inputs_dynamic();
allocate_outputs_dynamic();
} else {
@ -546,7 +546,7 @@ void InferRequestLegacy::SetBatch(int new_batch) {
if (m_graph->GetMaxDynamicBatchSize() < 0)
IE_THROW() << "Dynamic batch is not enabled.";
if (new_batch < 1 || new_batch > m_graph->GetMaxDynamicBatchSize()) {
if (new_batch < 1 || static_cast<size_t>(new_batch) > m_graph->GetMaxDynamicBatchSize()) {
IE_THROW() << "Invalid dynamic batch size " << new_batch <<
" for this request. Got: " << new_batch << ". Expected value in range [1;" << m_graph->GetMaxDynamicBatchSize() << "]";
}
@ -735,9 +735,9 @@ void InferRequestLegacy::enqueue() {
bool is_nv12 = nv12_ptr != nullptr;
if (is_nv12 || is_batched) {
int num_blobs = is_batched ? batched_ptr->size() : 1;
int num_blobs = is_batched ? static_cast<int>(batched_ptr->size()) : 1;
int expected_batch = is_batched
? _networkInputs.at(inputName)->getTensorDesc().getDims()[0]
? static_cast<int>(_networkInputs.at(inputName)->getTensorDesc().getDims()[0])
: 1;
for (auto i = 0; i < expected_batch; i++) {
std::string y_name = inputName + "_Y" + std::to_string(i);
@ -890,7 +890,7 @@ void InferRequestLegacy::setup_stream_graph() {
auto& streamGraphs = static_cast<CompiledModel*>(_exeNetwork.get())->m_graphs;
if (nullptr != streamExecutor) {
streamID = streamExecutor->GetStreamId();
int numGraphs = streamGraphs.size();
auto numGraphs = streamGraphs.size();
streamID = streamID % numGraphs;
}
m_graph = streamGraphs[streamID];
@ -904,7 +904,7 @@ void InferRequestLegacy::setup_stream_graph() {
// extract new batch size from blob
const auto batch_idx = m_graph->GetInputDynBatchDims()[input.first].first;
if (batch_idx >= 0) {
SetBatch(_inputs[input.first]->getTensorDesc().getDims()[batch_idx]);
SetBatch(static_cast<int>(_inputs[input.first]->getTensorDesc().getDims()[batch_idx]));
break;
}
}

View File

@ -154,7 +154,7 @@ void createClDnnConstant(Program& p, const ngraph::Shape& constDims, const std::
auto constFormat = cldnn::format::get_default_format(constDims.size());
if (props.needsBatchInterpretation) {
constTensor.batch[0] = constTensor.count();
constTensor.batch[0] = static_cast<cldnn::tensor::value_type>(constTensor.count());
constTensor.feature[0] = 1;
}

View File

@ -221,7 +221,7 @@ static void CreateGroupConvolutionBackpropDataOp(Program& p, const std::shared_p
}
}
uint32_t groups = op->get_input_shape(1)[0];
uint32_t groups = static_cast<uint32_t>(op->get_input_shape(1)[0]);
auto weightsName = inputs[1];
auto weights_node = op->get_input_node_shared_ptr(1);

View File

@ -45,7 +45,7 @@ static void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptr<ngr
}
}
uint32_t blank_index = op->get_input_shape(0).back() - 1;
uint32_t blank_index = static_cast<uint32_t>(op->get_input_shape(0).back() - 1);
if (reordered_inputs.size() == 3) {
auto blank_index_node = std::dynamic_pointer_cast<ngraph::op::v0::Constant>(op->get_input_node_shared_ptr(2));
if (!blank_index_node) {

View File

@ -199,10 +199,10 @@ void CreateCustomOp(Program& p, const std::shared_ptr<ngraph::Node>& op, CustomL
IE_THROW() << "Invalid input tensor for index: " << iidx;
auto inputDims = op->get_input_shape(iidx);
xDim = inputDims[inputDims.size() - 1];
yDim = dims.size() > 1 ? inputDims[inputDims.size() - 2] : 0;
featureDim = dims.size() > 2 ? inputDims[inputDims.size() - 3] : 0;
batchDim = dims.size() > 3 ? inputDims[inputDims.size() - 4]: 0;
xDim = static_cast<int>(inputDims[inputDims.size() - 1]);
yDim = dims.size() > 1 ? static_cast<int>(inputDims[inputDims.size() - 2]) : 0;
featureDim = dims.size() > 2 ? static_cast<int>(inputDims[inputDims.size() - 3]) : 0;
batchDim = dims.size() > 3 ? static_cast<int>(inputDims[inputDims.size() - 4]) : 0;
}
const std::map<char, int> vars = {
{ 'b', batchDim } , { 'B', batchDim },

View File

@ -43,8 +43,8 @@ static void CreateDetectionOutputOp(Program& p, const std::shared_ptr<ngraph::op
float eta = 1.0f;
int keep_top_k = attrs.keep_top_k[0];
bool variance_encoded_in_target = attrs.variance_encoded_in_target;
int input_width = attrs.input_width;
int input_height = attrs.input_height;
int input_width = static_cast<int>(attrs.input_width);
int input_height = static_cast<int>(attrs.input_height);
bool normalized = attrs.normalized;
std::string code_type = attrs.code_type;
bool clip_before_nms = attrs.clip_before_nms;

View File

@ -29,7 +29,7 @@ void createDft(Program& p,
IE_THROW() << "Unsupported parameter nodes type in " << friendly_name << " (" << op->get_type_name() << ")";
}
auto axes = axes_constant->cast_vector<int64_t>();
uint8_t axis_correction = op->get_input_shape(0).size();
uint8_t axis_correction = static_cast<uint8_t>(op->get_input_shape(0).size());
if (direction != cldnn::dft_direction::forward || mode != cldnn::dft_mode::real) {
--axis_correction;
}

View File

@ -21,7 +21,7 @@ void CreateExperimentalDetectronTopKROIsOp(Program &p,
const std::shared_ptr<ngraph::op::v6::ExperimentalDetectronTopKROIs> &op) {
validate_inputs_count(op, {2});
auto inputs = p.GetInputInfo(op);
auto max_rois = op->get_max_rois();
auto max_rois = static_cast<uint32_t>(op->get_max_rois());
auto layer_name = layer_type_name_ID(op);
auto argmax_layer_name = layer_name + "_topk";
auto top_k_indices = arg_max_min(argmax_layer_name,

View File

@ -28,9 +28,18 @@ static void CreateExtractImagePatchesOp(Program& p, const std::shared_ptr<ngraph
auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op);
std::vector<uint32_t> sizes = std::vector<uint32_t>(op->get_sizes().begin(), op->get_sizes().end());
std::vector<uint32_t> strides = std::vector<uint32_t>(op->get_strides().begin(), op->get_strides().end());
std::vector<uint32_t> rates = std::vector<uint32_t>(op->get_rates().begin(), op->get_rates().end());
std::vector<uint32_t> sizes;
std::vector<uint32_t> strides;
std::vector<uint32_t> rates;
for (auto size : op->get_sizes()) {
sizes.push_back(static_cast<uint32_t>(size));
}
for (auto stride : op->get_strides()) {
strides.push_back(static_cast<uint32_t>(stride));
}
for (auto rate : op->get_rates()) {
rates.push_back(static_cast<uint32_t>(rate));
}
std::string auto_pad = PadToString(op->get_auto_pad());
auto extractImagePatchesPrim = cldnn::extract_image_patches(layerName,

View File

@ -18,18 +18,16 @@ static void CreateGatherNDOp(Program& p, const std::shared_ptr<ngraph::op::v5::G
auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op);
auto input_rank = op->get_input_partial_shape(0).size();
auto indices_rank = op->get_input_partial_shape(1).size();
auto batch_dims = op->get_batch_dims();
auto input_rank = static_cast<const uint8_t>(op->get_input_partial_shape(0).size());
auto indices_rank = static_cast<const uint8_t>(op->get_input_partial_shape(1).size());
auto batch_dims = static_cast<const uint8_t>(op->get_batch_dims());
auto primitive = cldnn::gather_nd(layerName,
inputs[0],
inputs[1],
input_rank,
indices_rank,
batch_dims,
true);
batch_dims);
p.add_primitive(*op, primitive);
}
@ -41,10 +39,9 @@ static void CreateGatherNDOp(Program& p, const std::shared_ptr<ngraph::op::v8::G
auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op);
auto input_rank = op->get_input_partial_shape(0).size();
auto indices_rank = op->get_input_partial_shape(1).size();
auto batch_dims = op->get_batch_dims();
auto input_rank = static_cast<const uint8_t>(op->get_input_partial_shape(0).size());
auto indices_rank = static_cast<const uint8_t>(op->get_input_partial_shape(1).size());
auto batch_dims = static_cast<const uint8_t>(op->get_batch_dims());
auto primitive = cldnn::gather_nd(layerName,
inputs[0],

View File

@ -31,7 +31,7 @@ static void CreateLRNOp(Program& p, const std::shared_ptr<ngraph::op::v0::LRN>&
IE_THROW() << "Unsupported axes node type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")";
}
auto axis_value = axis_const->cast_vector<int64_t>();
auto localSize = op->get_nsize();
auto localSize = static_cast<uint32_t>(op->get_nsize());
auto lrnPrim = cldnn::lrn(layerName,
inputs[0],

View File

@ -41,7 +41,7 @@ static void CreateOneHotOp(Program& p, const std::shared_ptr<ngraph::op::v1::One
if (axis == -1) {
axis = dims.size();
for (int i = dims.size() - 1; i >= 0; i--) {
for (int i = static_cast<int>(dims.size() - 1); i >= 0; i--) {
if (dims[i] == 1)
axis--;
else

View File

@ -20,15 +20,15 @@ static void CreateProposalOp(Program& p, const std::shared_ptr<ngraph::op::v0::P
auto attrs = op->get_attrs();
float nms_thresh = attrs.nms_thresh;
int min_size = attrs.min_size;
int feature_stride = attrs.feat_stride;
int pre_nms_topn = attrs.pre_nms_topn;
int post_nms_topn = attrs.post_nms_topn;
int min_size = static_cast<int>(attrs.min_size);
int feature_stride = static_cast<int>(attrs.feat_stride);
int pre_nms_topn = static_cast<int>(attrs.pre_nms_topn);
int post_nms_topn = static_cast<int>(attrs.post_nms_topn);
const std::vector<float> ratio = attrs.ratio;
const std::vector<float> scale = attrs.scale;
float box_coordinate_scale = attrs.box_coordinate_scale;
float box_size_scale = attrs.box_size_scale;
int base_size = attrs.base_size;
int base_size = static_cast<int>(attrs.base_size);
std::string framework = attrs.framework;
bool normalize = attrs.normalize;
bool clip_before_nms = attrs.clip_before_nms;

View File

@ -17,11 +17,11 @@ static void CreateRegionYoloOp(Program& p, const std::shared_ptr<ngraph::op::v0:
auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op);
uint32_t coords = op->get_num_coords();
uint32_t classes = op->get_num_classes();
uint32_t num = op->get_num_regions();
uint32_t coords = static_cast<uint32_t>(op->get_num_coords());
uint32_t classes = static_cast<uint32_t>(op->get_num_classes());
uint32_t num = static_cast<uint32_t>(op->get_num_regions());
bool do_softmax = op->get_do_softmax();
uint32_t mask_size = op->get_mask().size();
uint32_t mask_size = static_cast<uint32_t>(op->get_mask().size());
auto regionPrim = cldnn::region_yolo(layerName,
inputs[0],

View File

@ -17,7 +17,7 @@ static void CreateReorgYoloOp(Program& p, const std::shared_ptr<ngraph::op::v0::
auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op);
uint32_t stride = op->get_strides()[0];
uint32_t stride = static_cast<uint32_t>(op->get_strides()[0]);
auto reorgPrim = cldnn::reorg_yolo(layerName,
inputs[0],

View File

@ -17,8 +17,8 @@ static void CreateReverseSequenceOp(Program& p, const std::shared_ptr<ngraph::op
auto inputs = p.GetInputInfo(op);
std::string layerName = layer_type_name_ID(op);
size_t batch_axis = op->get_batch_axis();
size_t seq_axis = op->get_sequence_axis();
auto batch_axis = static_cast<uint32_t>(op->get_batch_axis());
auto seq_axis = static_cast<uint32_t>(op->get_sequence_axis());
auto reverseSequencePrim = cldnn::reverse_sequence(layerName,
inputs[0],
inputs[1],

View File

@ -83,9 +83,9 @@ static void CreateLSTMCellOp(Program& p, const std::shared_ptr<ngraph::op::v4::L
op->get_input_shape(2).size() != 2)
IE_THROW() << "Wrong input shapes for LSTMCell op " << op->get_friendly_name();
lstm_input_size = in_dims0.back();
lstm_batch_size = in_dims0.at(in_dims0.size()-2);
lstm_hidden_size = out_dims0.back();
lstm_input_size = static_cast<int>(in_dims0.back());
lstm_batch_size = static_cast<int>(in_dims0.at(in_dims0.size()-2));
lstm_hidden_size = static_cast<int>(out_dims0.back());
}
std::vector<cldnn::activation_func> activations;
@ -179,10 +179,10 @@ static void CreateLSTMSequenceOp(Program& p, const std::shared_ptr<ngraph::op::v
op->get_input_shape(2).size() != 3)
IE_THROW() << "Wrong input shapes for LSTMSequence op " << op->get_friendly_name();
lstm_input_size = in_dims0.back();
lstm_sequence_len = in_dims0.at(in_dims0.size() - 2);
lstm_batch_size = in_dims0.at(in_dims0.size() - 3);
lstm_hidden_size = out_dims0.back();
lstm_input_size = static_cast<int>(in_dims0.back());
lstm_sequence_len = static_cast<int>(in_dims0.at(in_dims0.size() - 2));
lstm_batch_size = static_cast<int>(in_dims0.at(in_dims0.size() - 3));
lstm_hidden_size = static_cast<int>(out_dims0.back());
}
std::vector<cldnn::activation_func> activations;

View File

@ -68,8 +68,8 @@ static void CreatePSROIPoolingOp(Program& p, const std::shared_ptr<ngraph::op::v
std::string layerName = layer_type_name_ID(op);
cldnn::pooling_mode mode = GetPoolingMode(op->get_mode());
int group_size = op->get_group_size();
int output_dim = op->get_output_dim();
int group_size = static_cast<int>(op->get_group_size());
int output_dim = static_cast<int>(op->get_output_dim());
float spatial_scale = op->get_spatial_scale();
int spatial_bins_x = op->get_spatial_bins_x();
int spatial_bins_y = op->get_spatial_bins_y();
@ -96,8 +96,8 @@ static void CreateROIPoolingOp(Program& p, const std::shared_ptr<ngraph::op::v0:
// params
auto out_size = op->get_output_size();
int pooled_height = out_size[0];
int pooled_width = out_size[1];
int pooled_height = static_cast<int>(out_size[0]);
int pooled_width = static_cast<int>(out_size[1]);
float spatial_scale = op->get_spatial_scale();
bool position_sensitive = false;

View File

@ -23,7 +23,7 @@ void CreateRollOp(Program& p, const std::shared_ptr<ngraph::op::v7::Roll>& op) {
const auto& input_pshape = op->get_input_partial_shape(0);
OPENVINO_ASSERT(input_pshape.is_static(), "Dynamic shapes are not supported for Roll operation yet");
const auto& input_shape = input_pshape.to_shape();
const uint8_t rank = input_shape.size();
const auto rank = static_cast<int>(input_shape.size());
const auto format = cldnn::format::get_default_format(rank);
const auto default_rank = format.dimension();
@ -53,7 +53,7 @@ void CreateRollOp(Program& p, const std::shared_ptr<ngraph::op::v7::Roll>& op) {
}
// Normalize shift
for (size_t s = 0; s < rank; ++s) {
for (int s = 0; s < rank; ++s) {
auto& sh = shift[s];
const auto dim = static_cast<int32_t>(input_shape[s]);
sh %= dim;

View File

@ -21,14 +21,14 @@ static void CreateCommonSplitOp(Program& p, const std::shared_ptr<ngraph::Node>&
auto inputs = p.GetInputInfo(op);
if (p.use_new_shape_infer() || op->is_dynamic()) {
cldnn::crop_ngraph_op_mode op_mode = cldnn::crop_ngraph_op_mode::variadic_split;
size_t num_splits = 1;
auto num_splits = static_cast<size_t>(1);
if (ngraph::is_type<ngraph::op::v1::Split>(op)) {
num_splits = ngraph::as_type_ptr<ngraph::op::v1::Split>(op)->get_num_splits();
op_mode = cldnn::crop_ngraph_op_mode::split;
}
for (size_t i = 0; i < op->get_output_size(); i++) {
auto cropPrim = cldnn::crop(get_layer_name(i), inputs, cldnn::tensor(1), cldnn::tensor(0), op_mode, i, num_splits);
auto cropPrim = cldnn::crop(get_layer_name(i), inputs, cldnn::tensor(1), cldnn::tensor(0), op_mode, static_cast<int>(i), num_splits);
p.add_primitive(*op, cropPrim);
}
} else {

View File

@ -91,9 +91,11 @@ static void CreateStridedSliceOp(Program& p, const std::shared_ptr<ngraph::op::v
}
// -1 because it's a position of ellipses
unsigned long num_input_axis_after_ellipses = (begin.size() - axis - num_new_axis_after_ellipses - 1);
unsigned long num_of_hidden_dims = input_shape.size() - num_input_axis_after_ellipses
- num_input_axis_before_ellipses;
unsigned long num_input_axis_after_ellipses =
static_cast<unsigned long>(begin.size() - axis - num_new_axis_after_ellipses - 1);
unsigned long num_of_hidden_dims =
static_cast<unsigned long>(input_shape.size() - num_input_axis_after_ellipses
- num_input_axis_before_ellipses);
for (size_t i = 0; i < num_of_hidden_dims; ++i) {
axes.emplace_back(uniq_id);
uniq_id++;
@ -207,7 +209,7 @@ static void CreateStridedSliceOp(Program& p, const std::shared_ptr<ngraph::op::v
if (axes[i] < 0 || axes[i] > 3) {
IE_THROW() << "Invalid crop axis: " << std::to_string(axes[i]) << " in op " + op->get_friendly_name();
}
offset_tensor[axes[i]] = offset[i];
offset_tensor[axes[i]] = static_cast<uint32_t>(offset[i]);
}
ngraph::Shape crop_shape(reshape_pattern);

View File

@ -22,7 +22,7 @@ static void CreateTopKOp(Program& p, const std::shared_ptr<ngraph::op::v1::TopK>
ov::op::TopKMode mode = op->get_mode();
ov::op::TopKSortType stype = op->get_sort_type();
uint32_t top_k = op->get_k();
uint32_t top_k = static_cast<uint32_t>(op->get_k());
uint64_t chosen_axis = op->get_axis();
if (p.use_new_shape_infer()) {

View File

@ -186,7 +186,7 @@ Program::Program(InferenceEngine::CNNNetwork& network, cldnn::engine& engine, co
}
int m_bv_sz = GetMaxBatchSizeForSingleProgram();
m_max_batch = m_config.get_property(ov::intel_gpu::max_dynamic_batch);
m_max_batch = static_cast<int>(m_config.get_property(ov::intel_gpu::max_dynamic_batch));
if (dyn_shape_batch_found || m_max_batch > 1) {
// compile log2 networks to serve dynamic batch requests
@ -307,7 +307,7 @@ int Program::GetMaxBatchSizeForSingleProgram() {
auto max_dynamic_batch = m_config.get_property(ov::intel_gpu::max_dynamic_batch);
if (max_dynamic_batch > 1) {
// calculate number of networks necessary based on binary log
unsigned int tmp = max_dynamic_batch;
unsigned int tmp = static_cast<unsigned int>(max_dynamic_batch);
unsigned int mask = 1U << 31;
unsigned int ldigit = 31;
@ -469,9 +469,10 @@ std::vector<cldnn::input_info> Program::GetInputInfo(const std::shared_ptr<ngrap
if (primitive_ids.find(prevName) == primitive_ids.end()) {
IE_THROW() << "Input " << prevName << " hasn't been found in primitive_ids map";
}
inputInfo.push_back(cldnn::input_info(primitive_ids.at(prevName), is_legacy_multiple_outputs ? 0: op->get_input_source_output(i).get_index()));
inputInfo.push_back(
cldnn::input_info(primitive_ids.at(prevName), is_legacy_multiple_outputs ? 0: static_cast<int>(op->get_input_source_output(i).get_index())));
} else {
inputInfo.push_back(cldnn::input_info(prevName, is_legacy_multiple_outputs ? 0 : op->get_input_source_output(i).get_index()));
inputInfo.push_back(cldnn::input_info(prevName, is_legacy_multiple_outputs ? 0 : static_cast<int>(op->get_input_source_output(i).get_index())));
}
}
return inputInfo;

View File

@ -354,14 +354,14 @@ void TransformationsPipeline::apply(std::shared_ptr<ov::Model> func) {
auto axesVal = axesNode->cast_vector<int>();
auto& mvnShape = mvn->get_output_partial_shape(0);
for (int32_t& axis : axesVal)
axis = axis < 0 ? axis + mvnShape.size() : axis;
axis = axis < 0 ? axis + static_cast<int>(mvnShape.size()) : axis;
std::sort(axesVal.begin(), axesVal.end());
if (mvnShape.size() == 1)
return false;
if (mvnShape.size() > 5 || (mvnShape.size() != axesVal.size() + 1 && mvnShape.size() != axesVal.size() + 2))
return false;
int value = mvnShape.size() - 1;
for (int i = axesVal.size() - 1; i >= 0; i--, value--) {
int value = static_cast<int>(mvnShape.size()) - 1;
for (int i = static_cast<int>(axesVal.size()) - 1; i >= 0; i--, value--) {
if (axesVal[i] != value)
return false;
}

View File

@ -197,7 +197,10 @@ std::vector<tensor::value_type> layout::get_dims() const {
if (is_dynamic())
throw std::runtime_error("[GPU] get_dims() is called for dynamic shape");
auto shape = size.to_shape();
std::vector<tensor::value_type> res(shape.begin(), shape.end());
std::vector<tensor::value_type> res;
for (auto dim : shape) {
res.push_back(static_cast<tensor::value_type>(dim));
}
if (res.size() < format.dimension())
res.insert(res.end(), format.dimension() - res.size(), 1);
@ -333,7 +336,10 @@ tensor layout::get_tensor() const {
shape = size.to_shape();
}
std::vector<tensor::value_type> dims(shape.begin(), shape.end());
std::vector<tensor::value_type> dims;
for (auto dim : shape) {
dims.push_back(static_cast<tensor::value_type>(dim));
}
auto rank = std::max(format.dimension(), dims.size());
auto default_fmt = format::get_default_format(rank, format::is_weights_format(format), format::is_grouped(format));
@ -513,7 +519,10 @@ ov::PartialShape layout::transform(cldnn::format new_fmt) const {
cldnn::tensor::value_type default_size = -1;
auto shape = size.to_shape();
std::vector<tensor::value_type> dims(shape.begin(), shape.end());
std::vector<tensor::value_type> dims;
for (auto dim : shape) {
dims.push_back(static_cast<tensor::value_type>(dim));
}
const cldnn::format default_fmt = cldnn::format::bfwzyx;
auto old_sizes = convert_dimensions(dims, format.order(), default_fmt.internal_order()); // convert to internal order (bfxyzw)
@ -614,7 +623,7 @@ ov::PartialShape layout::transform(cldnn::format new_fmt) const {
}
auto new_dims = convert_dimensions(new_sizes, default_fmt.internal_order(), new_fmt.order());
for (int idx = (new_dims.size() - 1); idx >= 0; idx--) {
for (int idx = static_cast<int>(new_dims.size() - 1); idx >= 0; idx--) {
if (new_dims[idx] == -1)
new_dims.erase((new_dims.begin() + idx));
else if (new_dims[idx] < 0)

View File

@ -47,7 +47,7 @@ GTEST_API_ int main(int argc, char** argv) {
cldnn::device_query::device_id = FLAGS_device_suffix;
//restore cmdline arg for gtest
auto varg=gflags::GetArgvs();
int new_argc=varg.size();
int new_argc = static_cast<int>(varg.size());
char** new_argv=new char*[new_argc];
for(int i=0;i<new_argc;i++)
new_argv[i]=&varg[i][0];

View File

@ -18,7 +18,7 @@ public:
_info = device_info{};
_info.vendor_id = vendor_id;
_info.dev_type = type;
_info.device_id = device_id;
_info.device_id = static_cast<uint32_t>(device_id);
}
device_info get_info() const override { return _info; }

View File

@ -34,8 +34,8 @@ TEST_P(data_layout_test, size_check) {
auto l = layout(p.dt, p.fmt, tensor{default_fmt, p.size});
size_t expected_count = std::accumulate(p.size.begin(), p.size.end(), 1, std::multiplies<size_t>());
size_t expected_bytes_count = std::accumulate(p.expected_aligned_size.begin(), p.expected_aligned_size.end(), 1, std::multiplies<size_t>()) *
size_t expected_count = std::accumulate(p.size.begin(), p.size.end(), 1, std::multiplies<int>());
size_t expected_bytes_count = std::accumulate(p.expected_aligned_size.begin(), p.expected_aligned_size.end(), 1, std::multiplies<int>()) *
data_type_traits::size_of(p.dt);
ASSERT_EQ(l.bytes_count(), expected_bytes_count);
@ -117,8 +117,11 @@ TEST_P(weights_layout_test, size_check) {
auto l = layout(p.dt, p.fmt, tensor{default_fmt, p.size});
size_t expected_count = std::accumulate(p.size.begin(), p.size.end(), 1, std::multiplies<size_t>());
size_t expected_bytes_count = std::accumulate(p.expected_aligned_size.begin(), p.expected_aligned_size.end(), 1, std::multiplies<size_t>()) *
size_t expected_count = std::accumulate(p.size.begin(), p.size.end(), 1, std::multiplies<tensor::value_type>());
size_t expected_bytes_count = std::accumulate(p.expected_aligned_size.begin(),
p.expected_aligned_size.end(),
1,
std::multiplies<tensor::value_type>()) *
data_type_traits::size_of(p.dt);
ASSERT_EQ(l.bytes_count(), expected_bytes_count);

View File

@ -61,7 +61,7 @@ TEST_P(crop_si_test, shape_infer) {
for (size_t output_idx = 0; output_idx < p.expected_layouts.size(); output_idx++) {
auto prim_id = "crop.out" + std::to_string(output_idx);
auto crop_prim = std::make_shared<crop>(prim_id, input_prim_ids, p.reference_input_size, p.offsets[output_idx], op_mode, output_idx, p.param_num_splits);
auto crop_prim = std::make_shared<crop>(prim_id, input_prim_ids, p.reference_input_size, p.offsets[output_idx], op_mode, static_cast<int>(output_idx), p.param_num_splits);
auto& crop_node = prog.get_or_create(crop_prim);
for (auto& prim : input_prims) {

View File

@ -182,8 +182,8 @@ public:
return;
const auto block_sizes = format::traits(target_layout).block_sizes;
const auto index_offset = std::accumulate(block_sizes.begin(), block_sizes.end(), 1u,
[](size_t total, const std::pair<size_t, int>& b) {
const auto index_offset = std::accumulate(block_sizes.begin(), block_sizes.end(), 1,
[](int total, const std::pair<size_t, int>& b) {
return total * b.second;
}
);

View File

@ -133,7 +133,10 @@ void start_broadcast_test_dynamic(format input_format,
topology.add(
broadcast("broadcast", input_info("reorder"), input_info("target_shape"), ov::AxisSet(broadcast_axes)));
topology.add(reorder("output", input_info("broadcast"), fmt, input_data_type));
std::vector<int32_t> target_shape_data(output_shape.begin(), output_shape.end());
std::vector<int32_t> target_shape_data;
for (auto out_shape : output_shape) {
target_shape_data.push_back(static_cast<int32_t>(out_shape));
}
set_values<int32_t>(target_shape_mem, target_shape_data);
}

View File

@ -8732,15 +8732,18 @@ public:
auto pad = convolution->pad;
tensor weights_size = generic_params->input_layouts[1].get_tensor();
int kernel_extent_y = dilation[dilation.size() - 2] * (weights_size.spatial[1] - 1) + 1;
int kernel_extent_x = dilation[dilation.size() - 1] * (weights_size.spatial[0] - 1) + 1;
auto kernel_extent_y = dilation[dilation.size() - 2] * (weights_size.spatial[1] - 1) + 1;
auto kernel_extent_x = dilation[dilation.size() - 1] * (weights_size.spatial[0] - 1) + 1;
// Calculate output size
int output_size_y = 1 + (input_size.spatial[1] - kernel_extent_y + 2 * pad[0]) / stride[0];
int output_size_x = 1 + (input_size.spatial[0] - kernel_extent_x + 2 * pad[1]) / stride[1];
int output_features = weights_size.batch[0];
auto output_size_y = 1 + (input_size.spatial[1] - kernel_extent_y + 2 * pad[0]) / stride[0];
auto output_size_x = 1 + (input_size.spatial[0] - kernel_extent_x + 2 * pad[1]) / stride[1];
auto output_features = weights_size.batch[0];
return cldnn::tensor(input_size.batch[0], output_features, output_size_x, output_size_y);
return cldnn::tensor(input_size.batch[0],
static_cast<cldnn::tensor::value_type>(output_features),
static_cast<cldnn::tensor::value_type>(output_size_x),
static_cast<cldnn::tensor::value_type>(output_size_y));
}
void prepare_input_for_test(std::vector<cldnn::memory::ptr>& inputs) override {
@ -8841,19 +8844,19 @@ public:
int output_fi = out_f;
int output_yi = y;
int output_xi = x;
int output_index = (output_bi * output_buffer_size.feature[0] + output_fi) * output_buffer_size.spatial[1] * output_buffer_size.spatial[0];
auto output_index = (output_bi * output_buffer_size.feature[0] + output_fi) * output_buffer_size.spatial[1] * output_buffer_size.spatial[0];
tensor lower_output_padding = convolution->output_paddings[0].lower_size();
output_index += (lower_output_padding.spatial[1] + output_yi) * output_buffer_size.spatial[0] + lower_output_padding.spatial[0] + output_xi;
for (int kernel_y = 0; kernel_y < weights_size.spatial[1]; kernel_y++) {
int input_yi = y * stride[0] - pad[0] + kernel_y * dilation[0];
if ((input_yi < 0) || (input_yi >= input_size.spatial[1])) {
int input_yi = static_cast<int>(y * stride[0] - pad[0] + kernel_y * dilation[0]);
if ((input_yi < 0) || (input_yi >= static_cast<int>(input_size.spatial[1]))) {
continue;
}
for (int kernel_x = 0; kernel_x < weights_size.spatial[0]; kernel_x++) {
int input_xi = x * stride[1] - pad[1] + kernel_x * dilation[1];
if ((input_xi < 0) || (input_xi >= input_size.spatial[0])) {
int input_xi = static_cast<int>(x * stride[1] - pad[1] + kernel_x * dilation[1]);
if ((input_xi < 0) || (input_xi >= static_cast<int>(input_size.spatial[0]))) {
continue;
}

View File

@ -1460,7 +1460,7 @@ TEST(crop_gpu, static_split_batch) {
topology.add(crop("crop3", { input_info("input") }, tensor(1, 4, 1, 1), { tensor(2, 0, 0, 0) }, op_mode, 2));
std::vector<int32_t> input_vec(12);
for (size_t i = 0; i < 12; i++) {
for (int32_t i = 0; i < 12; i++) {
input_vec[i] = i;
}

View File

@ -2408,7 +2408,7 @@ struct deconvolution_random_test_params {
static std::string print_params(const testing::TestParamInfo<deconvolution_random_test_params>& param_info) {
auto& param = param_info.param;
auto to_string_neg = [](int v) {
auto to_string_neg = [](int64_t v) {
if (v >= 0) {
return std::to_string(v);
} else {

View File

@ -2924,10 +2924,10 @@ TEST(eltwise_gpu_f16, fs_b_yx_fsv32_broadcast)
return;
}
size_t input_b = 2;
size_t input_f = 72;
size_t input1_y = 10, input1_x = 10;
size_t input2_y = 1, input2_x = 1;
tensor::value_type input_b = 2;
tensor::value_type input_f = 72;
tensor::value_type input1_y = 10, input1_x = 10;
tensor::value_type input2_y = 1, input2_x = 1;
tensor input1_tensor(input_b, input_f, input1_x, input1_y);
tensor input2_tensor(input_b, input_f, input2_x, input2_y);
@ -2989,9 +2989,9 @@ TEST(eltwise_gpu_f16, fs_b_yx_fsv32_broadcast_bfyx)
return;
}
size_t input_b = 2;
size_t input_f = 72;
size_t input1_y = 10, input1_x = 10;
tensor::value_type input_b = 2;
tensor::value_type input_f = 72;
tensor::value_type input1_y = 10, input1_x = 10;
tensor input1_tensor(input_b, input_f, input1_x, input1_y);
tensor input2_tensor(1, input_f, 1, 1);

View File

@ -17,7 +17,7 @@ using namespace ::tests;
template <class T>
int get_not_one_dim(const T& a) {
int ret = a.size();
int ret = static_cast<int>(a.size());
while (ret - 1 >= 0 && a[ret - 1] == 1)
ret--;
return ret;

View File

@ -43,11 +43,11 @@ TEST(lru_cache, basic_data_type)
std::vector<std::pair<int, int>> expected_value;
for (size_t i = ca.size(); i > 0; i--) { // 5, 1, 2, 4
int idx = input_values.size() - i;
auto idx = input_values.size() - i;
expected_value.push_back(input_values[idx]);
}
int idx = expected_value.size() - 1;
auto idx = expected_value.size() - 1;
for (auto key : ca.get_all_keys()) {
ASSERT_EQ(key, expected_value[idx--].first);
}
@ -118,7 +118,7 @@ TEST(lru_cache, custom_data_type) {
expected_keys.push_back(inputs[inputs.size() - i]->key);
}
int idx = expected_keys.size() - 1;
auto idx = expected_keys.size() - 1;
for (auto key : ca.get_all_keys()) {
ASSERT_EQ(key, expected_keys[idx--]);
}

View File

@ -1216,7 +1216,7 @@ static void generic_average_wo_padding_test(format fmt, tensor output, tensor in
tensor off(0);
for (size_t i = 0; i < offset.size(); i++) {
off.spatial[i] = offset[offset.size() - i - 1];
off.spatial[i] = static_cast<tensor::value_type>(offset[offset.size() - i - 1]);
}
auto pool_in = "in";
@ -2081,15 +2081,15 @@ public:
for (size_t fi = 0; fi < this->input_features(); ++fi) {
reference[bi][fi] = reference_pooling<InputT, Mode>(
this->_input[bi][fi],
this->pool_x(),
this->pool_y(),
this->pool_z(),
this->stride_x(),
this->stride_y(),
this->stride_z(),
this->offset_x(),
this->offset_y(),
this->offset_z());
static_cast<int>(this->pool_x()),
static_cast<int>(this->pool_y()),
static_cast<int>(this->pool_z()),
static_cast<int>(this->stride_x()),
static_cast<int>(this->stride_y()),
static_cast<int>(this->stride_z()),
static_cast<int>(this->offset_x()),
static_cast<int>(this->offset_y()),
static_cast<int>(this->offset_z()));
}
}
return reference;
@ -3008,14 +3008,14 @@ public:
cldnn::pooling_mode pooling_mode = pooling->mode;
int pad_width = pooling->pads_begin[1];
int pad_height = pooling->pads_begin[0];
int pad_width = static_cast<int>(pooling->pads_begin[1]);
int pad_height = static_cast<int>(pooling->pads_begin[0]);
int kernel_width = pooling->size[1];
int kernel_height = pooling->size[0];
int kernel_width = static_cast<int>(pooling->size[1]);
int kernel_height = static_cast<int>(pooling->size[0]);
int stride_width = pooling->stride[1];
int stride_height = pooling->stride[0];
int stride_width = static_cast<int>(pooling->stride[1]);
int stride_height = static_cast<int>(pooling->stride[0]);
auto output_tensor = get_expected_output_tensor();
@ -3060,9 +3060,9 @@ public:
const size_t output_index = get_linear_index(output->get_layout(), b, f, h, w, output_desc);
for (int y = pad_y_start; y < pad_y_end; y++)
for (auto y = pad_y_start; y < pad_y_end; y++)
{
for (int x = pad_x_start; x < pad_x_end; x++)
for (auto x = pad_x_start; x < pad_x_end; x++)
{
const size_t input_index = get_linear_index(inputs[0]->get_layout(), b, f, y, x, input_desc);
@ -3081,10 +3081,12 @@ public:
case cldnn::pooling_mode::average:
case cldnn::pooling_mode::average_no_padding:
{
int pool_size_w = pooling->size[1];
int pool_size_h = pooling->size[0];
auto dynamic_mode = (((output_tensor.spatial[0] - 1) * stride_width) + pool_size_w) > -2 * pad_width + width ||
(((output_tensor.spatial[1] - 1) * stride_height) + pool_size_h) > -2 * pad_height + height;
auto pool_size_w = pooling->size[1];
auto pool_size_h = pooling->size[0];
auto dynamic_mode = static_cast<int>(((output_tensor.spatial[0] - 1) * stride_width) + pool_size_w) >
-2 * pad_width + width ||
static_cast<int>(((output_tensor.spatial[1] - 1) * stride_height) + pool_size_h) >
-2 * pad_height + height;
auto divider = [=](int actual_x, int actual_y) {
auto x = kernel_width;

View File

@ -36,7 +36,7 @@ void test_basic(bool is_caching_test) {
auto input_data = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } });
auto output_mem = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } });
const int inputSize = input_data->get_layout().count();
const auto inputSize = input_data->get_layout().count();
auto inputVals = generateVector(inputSize);
set_values(input_data, inputVals);
@ -80,7 +80,7 @@ TEST(set_output_memory_gpu, basic_const) {
auto output_mem = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } });
auto output_const_mem = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } });
const int inputSize = input_data->get_layout().count();
const int inputSize = static_cast<int>(input_data->get_layout().count());
auto inputVals = generateVector(inputSize);
auto constVals = generateVector(inputSize);
set_values(input_data, inputVals);
@ -129,7 +129,7 @@ TEST(set_output_memory_gpu, basic_mutable) {
auto md = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } });
auto output_mem = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } });
auto output_mutable_mem = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } });
const int inputSize = input_data->get_layout().count();
const auto inputSize = input_data->get_layout().count();
auto inputVals = generateVector(inputSize);
auto mutableVals = generateVector(inputSize);
set_values(input_data, inputVals);

View File

@ -30,7 +30,7 @@ public:
assert(input_shape_.size() == 4 || input_shape_.size() == 5);
format input_format = input_shape_.size() == 4 ? format::bfyx : format::bfzyx;
layout data_layout ( input_type_, input_format, tensor{input_shape_} );
std::vector<T> input_vals = GenInput(data_layout.get_linear_size());
std::vector<T> input_vals = GenInput(static_cast<int>(data_layout.get_linear_size()));
memory::ptr input = engine_.allocate_memory(data_layout);
set_values(input, input_vals);
topology topology;