diff --git a/src/inference/dev_api/performance_heuristics.hpp b/src/inference/dev_api/performance_heuristics.hpp index e6f374beca0..563d7627393 100644 --- a/src/inference/dev_api/performance_heuristics.hpp +++ b/src/inference/dev_api/performance_heuristics.hpp @@ -29,7 +29,7 @@ static MemBandwidthPressure MemBandwidthPressureTolerance( const float memThresholdAssumeLimited = MemBandwidthPressure::LIMITED) { int total_convs = 0, mem_limited_convs = 0, compute_convs = 0, total_gemms = 0, mem_limited_gemms = 0, total_deconvs = 0, compute_deconvs = 0, mem_limited_deconvs = 0; - auto memLimitedFactor = [&](int size_data_moved, int datatype_size = 4) -> float { + auto memLimitedFactor = [&](size_t size_data_moved, int datatype_size = 4) -> float { return (cache_size / (size_data_moved * datatype_size)); }; auto isLowPrecision = [&](ngraph::element::Type type) -> bool { @@ -57,7 +57,7 @@ static MemBandwidthPressure MemBandwidthPressureTolerance( const bool isBF16orFP16 = isHalfPrecision(type1); const int data_type_size = isINT8 ? 1 : isBF16orFP16 ? 2 : 4; - int dataSizeInput = 0, dataSizeOutput = 0; + size_t dataSizeInput = 0, dataSizeOutput = 0; if (!std::strcmp("MatMul", node_name)) { const auto input0 = node->input(0); const auto input1 = node->input(1); @@ -103,7 +103,7 @@ static MemBandwidthPressure MemBandwidthPressureTolerance( std::accumulate(shapeInput.begin(), shapeInput.end(), size_t(1), std::multiplies()); dataSizeOutput = std::accumulate(shapeOutput.begin(), shapeOutput.end(), size_t(1), std::multiplies()); - const auto factor = memLimitedFactor(dataSizeInput + dataSizeOutput, data_type_size); + const auto factor = memLimitedFactor(static_cast(dataSizeInput + dataSizeOutput), data_type_size); mem_limited_convs += factor < memThresholdAssumeLimited; worst_case = std::min(factor, worst_case); } @@ -124,7 +124,7 @@ static MemBandwidthPressure MemBandwidthPressureTolerance( std::accumulate(shapeInput.begin(), shapeInput.end(), size_t(1), std::multiplies()); dataSizeOutput = std::accumulate(shapeOutput.begin(), shapeOutput.end(), size_t(1), std::multiplies()); - const auto factor = memLimitedFactor(dataSizeInput + dataSizeOutput, data_type_size); + const auto factor = memLimitedFactor(static_cast(dataSizeInput + dataSizeOutput), data_type_size); mem_limited_deconvs += factor < memThresholdAssumeLimited; worst_case = std::min(factor, worst_case); } diff --git a/src/plugins/intel_gpu/include/intel_gpu/plugin/graph.hpp b/src/plugins/intel_gpu/include/intel_gpu/plugin/graph.hpp index 8e831a43d8b..39a36443a68 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/plugin/graph.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/plugin/graph.hpp @@ -59,7 +59,7 @@ public: cldnn::engine& get_engine() const { return m_context->get_engine(); } const ExecutionConfig& get_config() const { return m_config; } - int GetMaxDynamicBatchSize() const { return m_config.get_property(ov::intel_gpu::max_dynamic_batch); } + size_t GetMaxDynamicBatchSize() const { return m_config.get_property(ov::intel_gpu::max_dynamic_batch);} const std::map& GetInputLayouts() const { return m_program->GetInputLayouts(); } const InferenceEngine::InputsDataMap GetNetworkInputs() const { return m_program->GetNetworkInputs(); } const InferenceEngine::OutputsDataMap GetNetworkOutputs() const { return m_program->GetNetworkOutputs(); } diff --git a/src/plugins/intel_gpu/include/intel_gpu/primitives/arg_max_min.hpp b/src/plugins/intel_gpu/include/intel_gpu/primitives/arg_max_min.hpp index 6da5ce80d20..cabb7d7be36 100644 --- a/src/plugins/intel_gpu/include/intel_gpu/primitives/arg_max_min.hpp +++ b/src/plugins/intel_gpu/include/intel_gpu/primitives/arg_max_min.hpp @@ -103,7 +103,9 @@ struct arg_max_min : public primitive_base { values_first == rhs_casted.values_first; } - uint32_t get_output_nums() const { return (input_size() == 3 ? 2 : output_size()); } + size_t get_output_nums() const { + return (input_size() == 3 ? 2 : output_size()); + } bool has_second_output() const { return get_output_nums() == 2; } bool use_multiple_outputs() const { return input_size() != 3; } diff --git a/src/plugins/intel_gpu/src/graph/broadcast.cpp b/src/plugins/intel_gpu/src/graph/broadcast.cpp index 02a57e35b93..348bac0081c 100644 --- a/src/plugins/intel_gpu/src/graph/broadcast.cpp +++ b/src/plugins/intel_gpu/src/graph/broadcast.cpp @@ -88,7 +88,7 @@ std::vector broadcast_inst::calc_output_layouts(broadcast_node const& /* if (input1.is_static()) { output_rank = input1.get_dim(0); // target shape rank is set as second input. } - output_shapes[0] = ShapeType::dynamic(std::max(output_rank, static_cast(1))); + output_shapes[0] = ShapeType::dynamic(std::max(output_rank, 1)); } format output_format = format::adjust_to_rank(input0_layout.format, output_shapes[0].size()); diff --git a/src/plugins/intel_gpu/src/graph/convolution.cpp b/src/plugins/intel_gpu/src/graph/convolution.cpp index a50b2f59c0b..8c84a8b2c84 100644 --- a/src/plugins/intel_gpu/src/graph/convolution.cpp +++ b/src/plugins/intel_gpu/src/graph/convolution.cpp @@ -42,13 +42,13 @@ layout convolution_inst::calc_output_layout(convolution_node const& node, kernel output_type = data_types::f32; } - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + auto stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; + auto stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; + auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + auto dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; + auto dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; + auto dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; // TODO: Consider moving general parameter verification to arguments constructor. CLDNN_ERROR_LESS_OR_EQUAL_THAN(desc->id, @@ -249,13 +249,13 @@ std::vector convolution_inst::calc_output_layouts(convolution_node const output_type = data_types::f32; } - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + auto stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; + auto stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; + auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + auto dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; + auto dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; + auto dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; // TODO: Consider moving general parameter verification to arguments constructor. CLDNN_ERROR_LESS_OR_EQUAL_THAN(desc->id, diff --git a/src/plugins/intel_gpu/src/graph/deconvolution.cpp b/src/plugins/intel_gpu/src/graph/deconvolution.cpp index 6fb1d8496de..af71244e3b9 100644 --- a/src/plugins/intel_gpu/src/graph/deconvolution.cpp +++ b/src/plugins/intel_gpu/src/graph/deconvolution.cpp @@ -80,14 +80,17 @@ layout deconvolution_inst::calc_output_layout(deconvolution_node const& node, ke 3, "As for now, deconvolutions with more than 3 dimensions are not supported"); - int32_t x = off_factor * pad[pad.size() - 1] + (input_layout.spatial(0) - 1) * strd[strd.size() - 1] + weights_layout.spatial(0); + int32_t x = static_cast( + off_factor * pad[pad.size() - 1] + (input_layout.spatial(0) - 1) * strd[strd.size() - 1] + weights_layout.spatial(0)); int32_t y = 1; if (spatial_dims > 1) { - y = off_factor * pad[pad.size() - 2] + (input_layout.spatial(1) - 1) * strd[strd.size() - 2] + weights_layout.spatial(1); + y = static_cast( + off_factor * pad[pad.size() - 2] + (input_layout.spatial(1) - 1) * strd[strd.size() - 2] + weights_layout.spatial(1)); } int32_t z = 1; if (spatial_dims > 2) { - z = off_factor * pad[pad.size() - 3] + (input_layout.spatial(2) - 1) * strd[strd.size() - 3] + weights_layout.spatial(2); + z = static_cast( + off_factor * pad[pad.size() - 3] + (input_layout.spatial(2) - 1) * strd[strd.size() - 3] + weights_layout.spatial(2)); } tensor output_size(input_layout.batch(), diff --git a/src/plugins/intel_gpu/src/graph/fully_connected.cpp b/src/plugins/intel_gpu/src/graph/fully_connected.cpp index 80a3b97b844..53e1ff3e141 100644 --- a/src/plugins/intel_gpu/src/graph/fully_connected.cpp +++ b/src/plugins/intel_gpu/src/graph/fully_connected.cpp @@ -107,7 +107,7 @@ layout fully_connected_inst::calc_output_layout(fully_connected_node const& node auto reshape_to_2d = [](const ov::PartialShape& shape, int64_t feature) { auto staticShape = shape.to_shape(); - size_t total = std::accumulate(staticShape.begin(), staticShape.end(), 1, std::multiplies()); + size_t total = std::accumulate(staticShape.begin(), staticShape.end(), static_cast(1), std::multiplies()); std::vector reshapeSize = { static_cast(total) / feature, feature }; return reshapeSize; }; diff --git a/src/plugins/intel_gpu/src/graph/gather.cpp b/src/plugins/intel_gpu/src/graph/gather.cpp index 7c022b1be2b..702a4989c07 100644 --- a/src/plugins/intel_gpu/src/graph/gather.cpp +++ b/src/plugins/intel_gpu/src/graph/gather.cpp @@ -17,7 +17,10 @@ layout gather_inst::calc_output_layout(gather_node const& node, kernel_impl_para auto desc = impl_param.typed_desc(); auto input_layout = impl_param.get_input_layout(); - std::vector dims_converted(desc->output_shape.begin(), desc->output_shape.end()); + std::vector dims_converted; + for (auto dim : desc->output_shape) { + dims_converted.push_back(static_cast(dim)); + } // extend shape to 4d for (size_t i = dims_converted.size(); i < 4; i++) dims_converted.push_back(1); diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp index 40229fe358c..e4379d2c959 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/pre_replace_deconv.cpp @@ -225,7 +225,7 @@ void pre_replace_deconv::run(program& p) { p.rename(deconv_node, rename_id); // reshape weights - int pixel_shuffle_size = scale_factor * scale_factor; + auto pixel_shuffle_size = static_cast(scale_factor * scale_factor); int kernel_size = 5; tensor target_weights_size = { pixel_shuffle_size, filter_layout.feature(), kernel_size, kernel_size }; auto target_weights_layout = layout{ weights_layout.data_type, weights_layout.format, target_weights_size }; @@ -252,7 +252,7 @@ void pre_replace_deconv::run(program& p) { static_cast(filter_layout.feature()), static_cast(filter_layout.spatial(0)), static_cast(filter_layout.spatial(1)), - scale_factor, + static_cast(scale_factor), subpixel_weights); if (weights_data_type == data_types::f16) { diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp index c7860dc5ac9..9460351beb5 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_padding.cpp @@ -102,7 +102,7 @@ void prepare_padding::run(program& p) { // WA for this format. sliding window needs to be fixed --perf degradation for IncepctionV1 type models tensor size(1); for (size_t i = 0; i < prim->size.size(); i++) { - size.spatial[i] = prim->size[prim->size.size() - i - 1]; + size.spatial[i] = static_cast(prim->size[prim->size.size() - i - 1]); } if (node->get_output_layout().format == format::b_fs_yx_fsv16) @@ -183,13 +183,13 @@ void prepare_padding::run(program& p) { auto pad = conv->pad; auto stride = conv->stride; auto dilation = conv->dilation; - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + uint32_t stride_z = stride.size() >= 3 ? static_cast(stride[stride.size() - 3]) : 1; + uint32_t stride_y = stride.size() >= 2 ? static_cast(stride[stride.size() - 2]) : 1; + uint32_t stride_x = stride.size() >= 1 ? static_cast(stride[stride.size() - 1]) : 1; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + uint32_t dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + uint32_t dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + uint32_t dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; tensor::value_type pad_z = pad.size() >= 3 ? pad[pad.size() - 3] : 0; tensor::value_type pad_y = pad.size() >= 2 ? pad[pad.size() - 2] : 0; @@ -277,9 +277,15 @@ void prepare_padding::run(program& p) { auto padding_begin_x = std::max(pad_x, 0); auto padding_begin_y = std::max(pad_y, 0); auto padding_begin_z = std::max(pad_z, 0); - auto padding_end_x = std::max(input_limit_x - prev_prim_output_layout.spatial(0), 0); - auto padding_end_y = std::max(input_limit_y - prev_prim_output_layout.spatial(1), 0); - auto padding_end_z = std::max(input_limit_z - prev_prim_output_layout.spatial(2), 0); + auto padding_end_x = std::max( + static_cast(input_limit_x) - prev_prim_output_layout.spatial(0), + 0); + auto padding_end_y = std::max( + static_cast(input_limit_y) - prev_prim_output_layout.spatial(1), + 0); + auto padding_end_z = std::max( + static_cast(input_limit_z) - prev_prim_output_layout.spatial(2), + 0); cldnn::padding needed_padding({0, 0, padding_begin_x, padding_begin_y, padding_begin_z}, {0, 0, padding_end_x, padding_end_y, padding_end_z}, 0); needed_padding = padding::max(prev_prim_output_layout.data_padding, needed_padding); diff --git a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp index dea6b9c7cf5..938b599e85c 100644 --- a/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp +++ b/src/plugins/intel_gpu/src/graph/graph_optimizer/prepare_primitive_fusing.cpp @@ -275,7 +275,7 @@ void prepare_primitive_fusing::fuse_bias(program &p) { for (size_t i = 0; i < const_shape.size(); ++i) { if (const_shape[i] != 1) { count_elements_not_one++; - idx_element_not_one = i; + idx_element_not_one = static_cast(i); } if (count_elements_not_one > 1) break; diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp index 98a6cf168d4..150ae0e6122 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/arg_max_min.cpp @@ -66,7 +66,7 @@ public: const auto& mode = primitive->mode; const auto& sort_type = primitive->sort; const auto& values_first = primitive->values_first; - const auto& outputs_num = (primitive->input_size() == 3 ? 2 : primitive->output_size()); + const auto& outputs_num = primitive->input_size() == 3 ? 2 : primitive->output_size(); auto argm_params = get_default_params(impl_param); auto argm_optional_params = diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/binary_convolution.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/binary_convolution.cpp index 4d91b87fc03..9292a491106 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/binary_convolution.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/binary_convolution.cpp @@ -59,14 +59,14 @@ public: uint32_t pad_x = std::max(pad.size() >= 1 ? pad[pad.size() - 1] : 0, 0); params.padding = {pad_x, pad_y, pad_z}; - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + uint32_t stride_z = stride.size() >= 3 ? static_cast(stride[stride.size() - 3]) : 1; + uint32_t stride_y = stride.size() >= 2 ? static_cast(stride[stride.size() - 2]) : 1; + uint32_t stride_x = stride.size() >= 1 ? static_cast(stride[stride.size() - 1]) : 1; params.stride = {stride_x, stride_y, stride_z}; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + uint32_t dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + uint32_t dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + uint32_t dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; params.dilation = {dilation_x, dilation_y, dilation_z}; return {params, optional_params}; diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/concatenation.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/concatenation.cpp index 80485d4aef2..2280b00c76f 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/concatenation.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/concatenation.cpp @@ -13,8 +13,8 @@ namespace ocl { namespace { kernel_selector::concat_axis convert_axis(int64_t axis, size_t rank) { - unsigned cldnn_axis = axis >= 0 ? axis : axis + static_cast(rank); - if (cldnn_axis >= rank) + auto cldnn_axis = axis >= 0 ? axis : axis + static_cast(rank); + if (cldnn_axis >= static_cast(rank)) IE_THROW() << "Concatenation axis exceeds number of dimensions"; // Difference in dimension ordering between IE and GPU plugin, diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.cpp index ec822fd2a35..f7181441321 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/convolution.cpp @@ -78,14 +78,14 @@ public: uint32_t pad_x = std::max(pad.size() >= 1 ? pad[pad.size() - 1] : 0, 0); conv_params.padding = {pad_x, pad_y, pad_z}; - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + uint32_t stride_z = stride.size() >= 3 ? static_cast(stride[stride.size() - 3]) : 1; + uint32_t stride_y = stride.size() >= 2 ? static_cast(stride[stride.size() - 2]) : 1; + uint32_t stride_x = stride.size() >= 1 ? static_cast(stride[stride.size() - 1]) : 1; conv_params.stride = {stride_x, stride_y, stride_z}; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + uint32_t dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + uint32_t dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + uint32_t dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; conv_params.dilation = {dilation_x, dilation_y, dilation_z}; if ((impl_param.input_layouts[0].data_type == data_types::u8 || diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/crop.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/crop.cpp index 760e7d6ba4a..b11ba4f4956 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/crop.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/crop.cpp @@ -45,7 +45,7 @@ public: auto runtime_offset = convert_data_tensor(impl_param.get_input_layout(), impl_param.input_offsets[0]).GetFirstElementOffset(); kernel_selector::ScalarDescriptor s; s.t = kernel_selector::ScalarDescriptor::Types::UINT32; - s.v.u32 = runtime_offset; + s.v.u32 = static_cast(runtime_offset); OPENVINO_ASSERT(_kernel_data.kernels[0].params.scalars.size() == 1, "[GPU] Scalar field for runtime offset is not added for crop shape agnostic impl"); _kernel_data.kernels[0].params.scalars[0] = s; diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/deconvolution.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/deconvolution.cpp index d25826c63ca..c1cec6e088e 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/deconvolution.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/deconvolution.cpp @@ -60,14 +60,14 @@ public: uint32_t pad_x = std::max(pad.size() >= 1 ? pad[pad.size() - 1] : 0, 0); params.padding = {pad_x, pad_y, pad_z}; - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + uint32_t stride_z = stride.size() >= 3 ? static_cast(stride[stride.size() - 3]) : 1; + uint32_t stride_y = stride.size() >= 2 ? static_cast(stride[stride.size() - 2]) : 1; + uint32_t stride_x = stride.size() >= 1 ? static_cast(stride[stride.size() - 1]) : 1; params.stride = {stride_x, stride_y, stride_z}; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + uint32_t dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + uint32_t dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + uint32_t dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; params.dilation = {dilation_x, dilation_y, dilation_z}; return {params, optional_params}; diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/deformable_convolution.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/deformable_convolution.cpp index 048a83f3cec..0860f4f3678 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/deformable_convolution.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/deformable_convolution.cpp @@ -102,14 +102,14 @@ public: params.padding = {pad_x, pad_y, pad_z}; - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + uint32_t stride_z = stride.size() >= 3 ? static_cast(stride[stride.size() - 3]) : 1; + uint32_t stride_y = stride.size() >= 2 ? static_cast(stride[stride.size() - 2]) : 1; + uint32_t stride_x = stride.size() >= 1 ? static_cast(stride[stride.size() - 1]) : 1; params.stride = {stride_x, stride_y, stride_z}; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + uint32_t dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + uint32_t dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + uint32_t dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; params.dilation = {dilation_x, dilation_y, dilation_z}; params.kernelSize = { (uint32_t)kernel_size.spatial[0], diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/fully_connected.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/fully_connected.cpp index c6e703d63ff..64d6ac2197b 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/fully_connected.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/fully_connected.cpp @@ -41,7 +41,8 @@ public: auto reshape_to_2d = [](const ov::PartialShape& shape, const ov::Dimension& feature) { if (shape.is_static()) { auto static_shape = shape.to_shape(); - size_t total = std::accumulate(static_shape.begin(), static_shape.end(), 1, std::multiplies()); + size_t total = + std::accumulate(static_shape.begin(), static_shape.end(), size_t(1), std::multiplies()); auto dim = feature.is_static() ? feature.get_length() : static_cast(static_shape.back()); return ov::PartialShape{ static_cast(total) / dim, dim }; } else { diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/non_zero.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/non_zero.cpp index e796cec93e0..0c0a904847b 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/non_zero.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/non_zero.cpp @@ -55,7 +55,7 @@ struct gather_nonzero_impl : typed_primitive_impl_ocl { auto optional_params = get_default_optional_params(impl_param.get_program()); params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(1))); - params.ov_input_rank = impl_param.get_input_layout().get_partial_shape().size(); + params.ov_input_rank = static_cast(impl_param.get_input_layout().get_partial_shape().size()); return {params, optional_params}; } diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/permute.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/permute.cpp index 3f8842d1e5b..b853d89823d 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/permute.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/permute.cpp @@ -24,7 +24,7 @@ inline std::vector convert_permute_order(const std::vector& // 1. Switch permute order values for spatial dims for (auto const& o : ie_order_aligned) { if (o >= 2) - cldnn_order.push_back(1 + ie_order_aligned.size() - o); + cldnn_order.push_back(1 + static_cast(ie_order_aligned.size()) - o); else cldnn_order.push_back(o); } diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/pooling.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/pooling.cpp index de63adf0c27..40b71ceb58b 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/pooling.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/pooling.cpp @@ -138,9 +138,9 @@ public: else pp.divMode = cldnn_2_kernel_divider_mode(primitive->mode); - uint32_t kernel_z = kernel.size() >= 3 ? kernel[kernel.size() - 3] : 1; - uint32_t kernel_y = kernel.size() >= 2 ? kernel[kernel.size() - 2] : 1; - uint32_t kernel_x = kernel.size() >= 1 ? kernel[kernel.size() - 1] : 1; + uint32_t kernel_z = kernel.size() >= 3 ? static_cast(kernel[kernel.size() - 3]) : 1; + uint32_t kernel_y = kernel.size() >= 2 ? static_cast(kernel[kernel.size() - 2]) : 1; + uint32_t kernel_x = kernel.size() >= 1 ? static_cast(kernel[kernel.size() - 1]) : 1; pp.poolSize = {kernel_x, kernel_y, kernel_z}; uint32_t pad_z = std::max(pads_begin.size() >= 3 ? pads_begin[pads_begin.size() - 3] : 0, 0); @@ -148,14 +148,14 @@ public: uint32_t pad_x = std::max(pads_begin.size() >= 1 ? pads_begin[pads_begin.size() - 1] : 0, 0); pp.poolPad = {pad_x, pad_y, pad_z}; - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + uint32_t stride_z = stride.size() >= 3 ? static_cast(stride[stride.size() - 3]) : 1; + uint32_t stride_y = stride.size() >= 2 ? static_cast(stride[stride.size() - 2]) : 1; + uint32_t stride_x = stride.size() >= 1 ? static_cast(stride[stride.size() - 1]) : 1; pp.poolStride = {stride_x, stride_y, stride_z}; - uint32_t dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - uint32_t dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - uint32_t dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + uint32_t dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + uint32_t dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + uint32_t dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; pp.poolDilation = {dilation_x, dilation_y, dilation_z}; return {params, optional_params}; diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/prior_box.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/prior_box.cpp index aeaf96f98de..fd6f6ca72ba 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/prior_box.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/prior_box.cpp @@ -71,7 +71,7 @@ struct prior_box_impl : typed_primitive_impl_ocl { params.widths = primitive->widths; params.heights = primitive->heights; const auto output_shape = impl_param.get_output_layout().get_shape(); - params.num_priors_4 = output_shape[1] / (params.width * params.height); + params.num_priors_4 = static_cast(output_shape[1] / (params.width * params.height)); params.inputs.push_back(convert_data_tensor(impl_param.get_input_layout(1))); return {params, {}}; diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/reduce.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/reduce.cpp index f7d8ba3f96c..7bfda772ac1 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/reduce.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/reduce.cpp @@ -22,7 +22,7 @@ static std::vector convert_axes(std::vector axes, size_t rank if (axis < 0) axis = axis + rank; - converted_axes.push_back(rank + 1 - axis); + converted_axes.push_back(static_cast(rank + 1 - axis)); } return converted_axes; diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/resample.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/resample.cpp index c01d6bc6f2b..bd481afb847 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/resample.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/resample.cpp @@ -79,7 +79,9 @@ inline std::vector convert_pads(const std::vector& pad, size_t if (pad.empty()) { new_pad = std::vector(rank, 0); } else { - new_pad = std::vector(pad.begin(), pad.end()); + for (auto p : pad) { + new_pad.push_back(static_cast(p)); + } if (new_pad.size() > 2) std::reverse(new_pad.begin() + 2, new_pad.end()); for (size_t i = new_pad.size(); i < rank || i < 4; ++i) diff --git a/src/plugins/intel_gpu/src/graph/impls/ocl/slice.cpp b/src/plugins/intel_gpu/src/graph/impls/ocl/slice.cpp index 898de26502a..19dfd79bbcf 100644 --- a/src/plugins/intel_gpu/src/graph/impls/ocl/slice.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/ocl/slice.cpp @@ -48,7 +48,11 @@ std::vector extractIntegerData(const data_node& node, const stream std::vector extractShape(kernel_selector::Tensor::DataTensor& tensor) { auto logical_dims = tensor.LogicalDims(); // LogicalDims method returns dims in reversed order - return {logical_dims.rbegin(), logical_dims.rend()}; + std::vector reverse_logical_dims; + for (auto it = logical_dims.rbegin(); it != logical_dims.rend(); ++it) { + reverse_logical_dims.push_back(static_cast(*it)); + } + return reverse_logical_dims; } } // namespace diff --git a/src/plugins/intel_gpu/src/graph/impls/onednn/concatenation_onednn.cpp b/src/plugins/intel_gpu/src/graph/impls/onednn/concatenation_onednn.cpp index aa798b390f8..d991b891e62 100644 --- a/src/plugins/intel_gpu/src/graph/impls/onednn/concatenation_onednn.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/onednn/concatenation_onednn.cpp @@ -34,8 +34,8 @@ protected: int input_idx = DNNL_ARG_MULTIPLE_SRC; for (size_t i = 0; i < instance.inputs_memory_count(); i++) { auto& input = instance.input_memory(i); - auto offset = onednn::get_f_offset(instance.get_input_layout(), _pd.dnnl::primitive_desc_base::src_desc(i)); - args.insert({input_idx++, input.get_onednn_memory(_pd.dnnl::primitive_desc_base::src_desc(i), offset)}); + auto offset = onednn::get_f_offset(instance.get_input_layout(), _pd.dnnl::primitive_desc_base::src_desc(static_cast(i))); + args.insert({input_idx++, input.get_onednn_memory(_pd.dnnl::primitive_desc_base::src_desc(static_cast(i)), offset)}); } { diff --git a/src/plugins/intel_gpu/src/graph/impls/onednn/fully_connected_onednn.cpp b/src/plugins/intel_gpu/src/graph/impls/onednn/fully_connected_onednn.cpp index b8b1fb70bd5..7cf62d141e4 100644 --- a/src/plugins/intel_gpu/src/graph/impls/onednn/fully_connected_onednn.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/onednn/fully_connected_onednn.cpp @@ -24,7 +24,8 @@ struct fully_connected_onednn : typed_primitive_onednn_impl { private: static std::vector reshape_to_2d(const ov::PartialShape& shape, int64_t feature) { auto staticShape = shape.to_shape(); - size_t total = std::accumulate(staticShape.begin(), staticShape.end(), 1, std::multiplies()); + size_t total = + std::accumulate(staticShape.begin(), staticShape.end(), static_cast(1), std::multiplies()); std::vector reshapeSize = { static_cast(total) / feature, feature }; return reshapeSize; } diff --git a/src/plugins/intel_gpu/src/graph/include/sliding_window_utils.hpp b/src/plugins/intel_gpu/src/graph/include/sliding_window_utils.hpp index 6899ef9bc3c..fd9af3a1d67 100644 --- a/src/plugins/intel_gpu/src/graph/include/sliding_window_utils.hpp +++ b/src/plugins/intel_gpu/src/graph/include/sliding_window_utils.hpp @@ -104,9 +104,9 @@ inline tensor calc_sliding_window_output_range(const tensor& inp auto stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; - tensor::value_type dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - tensor::value_type dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - tensor::value_type dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + tensor::value_type dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + tensor::value_type dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + tensor::value_type dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; auto pad_z = pad.size() >= 3 ? pad[pad.size() - 3] : 0; auto pad_y = pad.size() >= 2 ? pad[pad.size() - 2] : 0; @@ -161,9 +161,9 @@ inline tensor calc_sliding_window_output_range(const ten int64_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; int64_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; - tensor::value_type dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - tensor::value_type dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - tensor::value_type dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + tensor::value_type dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + tensor::value_type dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + tensor::value_type dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; int64_t pad_z = pad.size() >= 3 ? pad[pad.size() - 3] : 0; int64_t pad_y = pad.size() >= 2 ? pad[pad.size() - 2] : 0; @@ -347,9 +347,9 @@ inline tensor calc_sliding_window_needed_input_range(const tensor& output_size, auto stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; - tensor::value_type dilation_z = dilation.size() >= 3 ? dilation[dilation.size() - 3] : 1; - tensor::value_type dilation_y = dilation.size() >= 2 ? dilation[dilation.size() - 2] : 1; - tensor::value_type dilation_x = dilation.size() >= 1 ? dilation[dilation.size() - 1] : 1; + tensor::value_type dilation_z = dilation.size() >= 3 ? static_cast(dilation[dilation.size() - 3]) : 1; + tensor::value_type dilation_y = dilation.size() >= 2 ? static_cast(dilation[dilation.size() - 2]) : 1; + tensor::value_type dilation_x = dilation.size() >= 1 ? static_cast(dilation[dilation.size() - 1]) : 1; auto pad_z = pad.size() >= 3 ? pad[pad.size() - 3] : 0; auto pad_y = pad.size() >= 2 ? pad[pad.size() - 2] : 0; diff --git a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp index f49827fb0eb..e2bbf5886a3 100644 --- a/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp +++ b/src/plugins/intel_gpu/src/graph/layout_optimizer.cpp @@ -826,11 +826,11 @@ static bool is_node_for_onednn(deconvolution_node const& node) { static bool is_node_for_onednn(fully_connected_node const& node) { auto fc_prim = node.get_primitive(); auto ps = node.get_output_layout().get_partial_shape(); - int non_spatial_count = 2 + (fc_prim->input_size == 3 ? 1 : 0); - int rank = ps.size(); + size_t non_spatial_count = 2 + (fc_prim->input_size == 3 ? 1 : 0); + size_t rank = ps.size(); // OneDnn doesn't support spatial dimensions for output - for (int i = non_spatial_count; i < rank; i++) { + for (auto i = non_spatial_count; i < rank; i++) { if (ps[i].is_dynamic() || ps[i] != 1) { return false; } diff --git a/src/plugins/intel_gpu/src/graph/network.cpp b/src/plugins/intel_gpu/src/graph/network.cpp index 40217538874..44d361ac35a 100644 --- a/src/plugins/intel_gpu/src/graph/network.cpp +++ b/src/plugins/intel_gpu/src/graph/network.cpp @@ -540,8 +540,7 @@ void network::save(cldnn::BinaryOutputBuffer& ob) { } } - int exec_order_size; - exec_order_size = _exec_order.size(); + int exec_order_size = _exec_order.size(); ob << exec_order_size; for (const auto& p_inst : _exec_order) { diff --git a/src/plugins/intel_gpu/src/graph/pooling.cpp b/src/plugins/intel_gpu/src/graph/pooling.cpp index f92e7ca91af..908f1b44384 100644 --- a/src/plugins/intel_gpu/src/graph/pooling.cpp +++ b/src/plugins/intel_gpu/src/graph/pooling.cpp @@ -47,13 +47,13 @@ layout pooling_inst::calc_output_layout(parent::typed_node const& node, kernel_i } } - uint32_t stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; - uint32_t stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; - uint32_t stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; + auto stride_z = stride.size() >= 3 ? stride[stride.size() - 3] : 1; + auto stride_y = stride.size() >= 2 ? stride[stride.size() - 2] : 1; + auto stride_x = stride.size() >= 1 ? stride[stride.size() - 1] : 1; - uint32_t kernel_z = window_size.size() >= 3 ? window_size[window_size.size() - 3] : 1; - uint32_t kernel_y = window_size.size() >= 2 ? window_size[window_size.size() - 2] : 1; - uint32_t kernel_x = window_size.size() >= 1 ? window_size[window_size.size() - 1] : 1; + auto kernel_z = window_size.size() >= 3 ? window_size[window_size.size() - 3] : 1; + auto kernel_y = window_size.size() >= 2 ? window_size[window_size.size() - 2] : 1; + auto kernel_x = window_size.size() >= 1 ? window_size[window_size.size() - 1] : 1; // TODO: Consider moving general parameter verification to arguments constructor. CLDNN_ERROR_LESS_OR_EQUAL_THAN(desc->id, @@ -127,7 +127,7 @@ layout pooling_inst::calc_output_layout(parent::typed_node const& node, kernel_i // TODO: Check compatibility of output size calculation (with caffe). tensor size(1); for (size_t i = 0; i < window_size.size(); i++) { - size.spatial[i] = window_size[window_size.size() - i - 1]; + size.spatial[i] = static_cast(window_size[window_size.size() - i - 1]); } auto output_range = calc_sliding_window_output_range(input_layout.get_tensor(), size, diff --git a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp index 29b97f400be..aae9e844a4e 100644 --- a/src/plugins/intel_gpu/src/graph/primitive_inst.cpp +++ b/src/plugins/intel_gpu/src/graph/primitive_inst.cpp @@ -300,7 +300,7 @@ bool primitive_inst::update_impl() { size_t offset = 0; for (size_t i = 0; i < _node->get_dependencies().size(); i++) { if (_node->get_dependency(i).get_output_layout().is_dynamic()) { - auto input_shape = _node->type()->extend_input_shape_to_6d(params, i); + auto input_shape = _node->type()->extend_input_shape_to_6d(params, static_cast(i)); for (size_t j = 0; j < input_shape.size(); j++) lock[offset++] = static_cast(input_shape[j]); } @@ -308,7 +308,7 @@ bool primitive_inst::update_impl() { for (size_t i = 0; i < _node->get_output_layouts().size(); i++) { if (_node->get_output_layout(i).is_dynamic()) { - auto output_shape = _node->type()->extend_output_shape_to_6d(params, i); + auto output_shape = _node->type()->extend_output_shape_to_6d(params, static_cast(i)); for (size_t j = 0; j < output_shape.size(); j++) lock[offset++] = static_cast(output_shape[j]); } diff --git a/src/plugins/intel_gpu/src/graph/program.cpp b/src/plugins/intel_gpu/src/graph/program.cpp index d1abe87a7a2..3e9f1703e36 100644 --- a/src/plugins/intel_gpu/src/graph/program.cpp +++ b/src/plugins/intel_gpu/src/graph/program.cpp @@ -364,7 +364,7 @@ bool program::analyze_output_size_handling_need() { tensor size(1); for (size_t i = 0; i < prim->size.size(); i++) { - size.spatial[i] = prim->size[prim->size.size() - i - 1]; + size.spatial[i] = static_cast(prim->size[prim->size.size() - i - 1]); } // TODO: Check compatibility of output size calculation (with caffe). auto primInputSize = prim_node.input().get_output_layout().get_tensor(); diff --git a/src/plugins/intel_gpu/src/graph/program_node.cpp b/src/plugins/intel_gpu/src/graph/program_node.cpp index 140562e8d93..6055cd23407 100644 --- a/src/plugins/intel_gpu/src/graph/program_node.cpp +++ b/src/plugins/intel_gpu/src/graph/program_node.cpp @@ -927,7 +927,7 @@ void program_node::init_onednn_primitive_attributes() { if (fused_desc->activation_function == cldnn::activation_func::relu_negative_slope && !fused_desc->additional_params_input.empty()) { auto dep_idx = cldnn_post_ops[idx].dep_start_idx; - int oc_dim = desc.output_layout.get_tensor().feature.size(); + int oc_dim = static_cast(desc.output_layout.get_tensor().feature.size()); post_ops.append_prelu(1 << oc_dim); update_onednn_post_op_list(onednn_post_op_type::binary_relu, dep_idx); } else if (fused_desc->activation_function == cldnn::activation_func::hard_sigmoid) { @@ -936,7 +936,7 @@ void program_node::init_onednn_primitive_attributes() { } else if (fused_desc->activation_function == cldnn::activation_func::hsigmoid) { // hard_sigmoid(x,a,b) = clamp(ax+b, 0, 1) // hsigmoid(x) = clamp(val+3, 0, 6) / 6 = clamp(val/6+0.5, 0, 1) = hard_sigmoid(val, 1/6, 1/2) - post_ops.append_eltwise(dnnl::algorithm::eltwise_hardsigmoid, 1./6, 1./2); + post_ops.append_eltwise(dnnl::algorithm::eltwise_hardsigmoid, 1.f/6, 1.f/2); update_onednn_post_op_list(onednn_post_op_type::eltwise_hardsigmoid, empty_mem); } else if (fused_desc->activation_function == cldnn::activation_func::negative) { post_ops.append_eltwise(dnnl::algorithm::eltwise_linear, -1, 0); diff --git a/src/plugins/intel_gpu/src/graph/reduce.cpp b/src/plugins/intel_gpu/src/graph/reduce.cpp index cb5d82c8758..5e0ddb723a5 100644 --- a/src/plugins/intel_gpu/src/graph/reduce.cpp +++ b/src/plugins/intel_gpu/src/graph/reduce.cpp @@ -25,7 +25,7 @@ static std::vector convert_axes(std::vector axes, size_t rank if (axis < 0) axis = axis + rank; - converted_axes.push_back(rank + 1 - axis); + converted_axes.push_back(static_cast(rank + 1 - axis)); } return converted_axes; diff --git a/src/plugins/intel_gpu/src/graph/strided_slice.cpp b/src/plugins/intel_gpu/src/graph/strided_slice.cpp index 7d13b4e5b42..2942e9624de 100644 --- a/src/plugins/intel_gpu/src/graph/strided_slice.cpp +++ b/src/plugins/intel_gpu/src/graph/strided_slice.cpp @@ -18,7 +18,10 @@ layout strided_slice_inst::calc_output_layout(strided_slice_node const& node, ke auto input_layout = impl_param.get_input_layout(); auto output_format = format::get_default_format(desc->out_size.size()); auto out_shape = desc->out_size; - std::vector dims_converted(out_shape.begin(), out_shape.end()); + std::vector dims_converted; + for (auto dim : out_shape) { + dims_converted.push_back(static_cast(dim)); + } // extend shape to 4d for (size_t i = dims_converted.size(); i < 4; i++) { dims_converted.push_back(1); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector.cpp index 8ad5edf9b74..eb6c04765d9 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector.cpp @@ -89,7 +89,10 @@ KernelsData kernel_selector_base::GetNaiveBestKernel(const KernelList& all_impls if (!params.is_shape_agnostic) { for (size_t k = 0; k < kds[0].kernels.size(); ++k) { auto gws = kds[0].kernels[k].params.workGroups.global; - kernelsData[0].kernels[k].skip_execution = (std::accumulate(gws.begin(), gws.end(), 1, std::multiplies()) == 0); + kernelsData[0].kernels[k].skip_execution = (std::accumulate(gws.begin(), + gws.end(), + static_cast(1), + std::multiplies()) == 0); } } break; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h index 207999c2c47..40cd4165f0c 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h +++ b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_params.h @@ -522,13 +522,13 @@ struct FusedOpsConfiguration { FusedOpsConfiguration& SetShuffleVarName(std::string val) { shuffle_var_name = val; return *this; } bool IsPostReorderFused(void) const { return orig_output_layout != DataLayout::DataLayoutCount; } int GetDimIndexFromOrder(Tensor::DataChannelName val) const { - int dims_num = bfzyx_idx_order.size(); + size_t dims_num = bfzyx_idx_order.size(); if (val == Tensor::DataChannelName::BATCH && dims_num >= 1) { return 0; } else if (val == Tensor::DataChannelName::FEATURE && dims_num >= 2) { return 1; } else if (dims_num >= 3 && dims_num - static_cast(val) - 1 >= 0) { - return bfzyx_idx_order.size() - static_cast(val) - 1; + return static_cast(bfzyx_idx_order.size()) - static_cast(val) - 1; } else { return -1; } diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_utils.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_utils.cpp index 5b5171df9e5..cab26c337b4 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_utils.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernel_selector_utils.cpp @@ -240,7 +240,7 @@ std::vector GetOptimalLocalWorkGroupSizes(std::vector gws, const if (axis_by_gws[layout_order[axis_idx]] != unused_axis) { bool is_already_exists = false; if (axis_idx > 0) { - for (int i = axis_idx - 1; i >= 0; i--) { + for (int i = static_cast(axis_idx) - 1; i >= 0; i--) { if (axis_by_gws[layout_order[axis_idx]] == axis_by_gws[layout_order[i]]) { is_already_exists = true; break; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/border/border_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/border/border_kernel_base.cpp index 94310677c75..fe5fd418071 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/border/border_kernel_base.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/border/border_kernel_base.cpp @@ -15,7 +15,7 @@ inline std::string GetInputTypeStr(uint32_t idx) { JitConstants BorderKernelBase::GetJitConstants(const border_params& params) const { JitConstants jit = MakeBaseParamsJitConstants(params); - size_t input_offset = 1; + uint32_t input_offset = 1; if (params.begin_type == base_params::ArgType::Input) { jit.AddConstant(MakeJitConstant("BEGIN_TYPE", GetInputTypeStr(input_offset))); input_offset += 1; diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/convert_color/convert_color_kernel_base.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/convert_color/convert_color_kernel_base.cpp index a14ae64e8ae..a38c677b24c 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/convert_color/convert_color_kernel_base.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/convert_color/convert_color_kernel_base.cpp @@ -88,7 +88,7 @@ KernelsData ConvertColorKernelBase::GetCommonKernelsData(const Params& params, c auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - size_t number_of_inputs = prim_params.inputs.size(); + uint32_t number_of_inputs = static_cast(prim_params.inputs.size()); FillCLKernelData(kernel, dispatchData, params.engineInfo, kernelName, jit, entry_point, "", false, false, number_of_inputs); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/ctc_loss/ctc_loss_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/ctc_loss/ctc_loss_kernel_ref.cpp index 4d315b3c987..a4e3be0477b 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/ctc_loss/ctc_loss_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/ctc_loss/ctc_loss_kernel_ref.cpp @@ -44,7 +44,7 @@ KernelsData CTCLossKernelRef::GetKernelsData(const Params& params, const optiona {}, false, false, - kernel_params.inputs.size()); + static_cast(kernel_params.inputs.size())); return {kernel_data}; } diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/detection_output/detection_output_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/detection_output/detection_output_kernel_ref.cpp index 8d5931cc3f0..3e82d684d08 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/detection_output/detection_output_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/detection_output/detection_output_kernel_ref.cpp @@ -229,7 +229,7 @@ KernelsData DetectionOutputKernelRef::GetKernelsData(const Params& params, const kd.internalBufferDataType = GetUnitType(detectOutParams); for (size_t i = 0; i < kKernelsNum; i++) { - DispatchData dispatchData = SetDefault(detectOutParams, i); + DispatchData dispatchData = SetDefault(detectOutParams, static_cast(i)); auto cldnnJit = GetJitConstants(detectOutParams); auto entryPoint = GetEntryPoint(kernelName, detectOutParams.layerID, params, options, i); cldnnJit.AddConstant(MakeJitConstant("BUFFER_STRIDE", buffer_stride)); @@ -256,7 +256,7 @@ KernelsData DetectionOutputKernelRef::GetKernelsData(const Params& params, const cldnnJit.AddConstant(MakeJitConstant("USE_LOCAL_MEMORY_FOR_STACK", true)); cldnnJit.AddConstants({MakeJitConstant("DO_STAGE_" + std::to_string(i) + "_MXNET", "true"), MakeJitConstant("LOCAL_WORK_NUM", dispatchData.lws[2]), - MakeJitConstant("PARTITION_STEP", GetPartitionStep(dispatchData.lws[2]))}); + MakeJitConstant("PARTITION_STEP", GetPartitionStep(static_cast(dispatchData.lws[2])))}); } else { // Limit local memory usage for two buffers: __range [LWS1 * LWS2 * 2 * 4 (int size) bytes] // stack [LWS1 * LWS2 * 100 (stack_size) * 4 (int size) bytes] @@ -267,7 +267,7 @@ KernelsData DetectionOutputKernelRef::GetKernelsData(const Params& params, const cldnnJit.AddConstants({MakeJitConstant("DO_STAGE_" + std::to_string(i) + "_CAFFE", "true"), MakeJitConstant("LOCAL_CLASS_NUM", dispatchData.lws[1]), MakeJitConstant("LOCAL_WORK_NUM", dispatchData.lws[2]), - MakeJitConstant("PARTITION_STEP", GetPartitionStep(dispatchData.lws[2]))}); + MakeJitConstant("PARTITION_STEP", GetPartitionStep(static_cast(dispatchData.lws[2])))}); } } else if (i == 2) { if (detectOutParams.detectOutParams.decrease_label_id) { diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/ed_rfe/roi_feature_extractor_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/ed_rfe/roi_feature_extractor_kernel_ref.cpp index 762f2876732..f84bb64fe18 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/ed_rfe/roi_feature_extractor_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/ed_rfe/roi_feature_extractor_kernel_ref.cpp @@ -111,7 +111,7 @@ KernelsData ExperimentalDetectronROIFeatureExtractorRef::GetKernelsData(const Pa auto jit = CreateJit(kernelName, cldnn_jit, entry_point); auto& kernel = kd.kernels[0]; - FillCLKernelData(kernel, dispatch_data, params.engineInfo, kernelName, jit, entry_point, "", false, false, org_params.number_of_inputs); + FillCLKernelData(kernel, dispatch_data, params.engineInfo, kernelName, jit, entry_point, "", false, false, static_cast(org_params.number_of_inputs)); return {kd}; } diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/gather/gather_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/gather/gather_kernel_ref.cpp index 4e15c2d8fef..25d3517a4ac 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/gather/gather_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/gather/gather_kernel_ref.cpp @@ -180,7 +180,7 @@ CommonDispatchData GatherKernelRef::SetDefault(const gather_params& params) cons auto out_layout = params.outputs[0].GetLayout(); std::vector> dims_by_gws; - int rank = params.outputs[0].Dimentions(); + auto rank = params.outputs[0].Dimentions(); if (rank == 4) { dispatchData.gws = {output.X().v, output.Y().v, output.Feature().v * output.Batch().v}; dims_by_gws = {{Tensor::DataChannelName::X}, diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/matrix_nms/matrix_nms_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/matrix_nms/matrix_nms_kernel_ref.cpp index b73e4288413..dc4cdcaee35 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/matrix_nms/matrix_nms_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/matrix_nms/matrix_nms_kernel_ref.cpp @@ -49,8 +49,8 @@ MatrixNmsKernelRef::DispatchData SetDefault(const matrix_nms_params& params, siz } std::tuple GetMaxBoxes(const matrix_nms_params& params) { - const int classes_num = params.inputs[1].Feature().v; - const int boxes_num = params.inputs[0].Feature().v; + const int classes_num = static_cast(params.inputs[1].Feature().v); + const int boxes_num = static_cast(params.inputs[0].Feature().v); int max_boxes_per_class{boxes_num}; if (params.nms_top_k >= 0) @@ -79,8 +79,8 @@ KernelsData MatrixNmsKernelRef::GetKernelsData(const Params& params, const optio constexpr size_t BOX_INFO_SIZE{16}; - const int batches_num = new_params.inputs[1].Batch().v; - const int classes_num = new_params.inputs[1].Feature().v; + const int batches_num = static_cast(new_params.inputs[1].Batch().v); + const int classes_num = static_cast(new_params.inputs[1].Feature().v); int max_boxes_per_class, max_boxes_per_batch; std::tie(max_boxes_per_class, max_boxes_per_batch) = GetMaxBoxes(new_params); diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/multiclass_nms/multiclass_nms_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/multiclass_nms/multiclass_nms_kernel_ref.cpp index ed356cdc1ce..f44d9b52a71 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/multiclass_nms/multiclass_nms_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/multiclass_nms/multiclass_nms_kernel_ref.cpp @@ -87,7 +87,7 @@ JitConstants MulticlassNmsKernelRef::GetJitConstants(const multiclass_nms_params int64_t max_output_boxes_per_class = 0; if (params.nms_top_k >= 0) { - max_output_boxes_per_class = std::min(num_boxes, params.nms_top_k); + max_output_boxes_per_class = std::min(static_cast(num_boxes), params.nms_top_k); } else { max_output_boxes_per_class = num_boxes; } diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/resample/resample_kernel_opt.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/resample/resample_kernel_opt.cpp index 01e8f7758a2..1633c62e172 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/resample/resample_kernel_opt.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/resample/resample_kernel_opt.cpp @@ -90,7 +90,7 @@ static size_t get_vec_size(const resample_params ¶ms) { } static int get_feature_slice_size(const resample_params ¶ms) { - return 16 * get_vec_size(params); + return static_cast(16 * get_vec_size(params)); } ResampleKernelBase::DispatchData ResampleKernelOpt::SetDefault(const kernel_selector::resample_params &arg) const { diff --git a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp index d11630c978e..7a4e7f85fe8 100644 --- a/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp +++ b/src/plugins/intel_gpu/src/kernel_selector/kernels/scatter_update/scatter_nd_update_kernel_ref.cpp @@ -130,7 +130,7 @@ static std::string GetInputBlockND(const scatter_nd_update_params& params, size_ block_nd_s[rank] = "1"; size_t input_offset = num * 6; - for (int32_t idx = (rank - 1); idx >= 0; --idx) { + for (int32_t idx = rank - 1; idx >= 0; --idx) { block_nd[idx] = input_dims[idx] * block_nd[idx + 1]; size_t dim_offset = idx < 2 ? idx : idx + 6 - rank; diff --git a/src/plugins/intel_gpu/src/plugin/infer_request.cpp b/src/plugins/intel_gpu/src/plugin/infer_request.cpp index 1619c5cf195..5f5fc4ad174 100644 --- a/src/plugins/intel_gpu/src/plugin/infer_request.cpp +++ b/src/plugins/intel_gpu/src/plugin/infer_request.cpp @@ -592,7 +592,7 @@ void InferRequest::setup_stream_graph() { auto& streamGraphs = static_cast(_exeNetwork.get())->m_graphs; if (nullptr != streamExecutor) { streamID = streamExecutor->GetStreamId(); - int numGraphs = streamGraphs.size(); + auto numGraphs = streamGraphs.size(); streamID = streamID % numGraphs; } m_graph = streamGraphs[streamID]; diff --git a/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp b/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp index 35fbf6c55b4..45641b1b35b 100644 --- a/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp +++ b/src/plugins/intel_gpu/src/plugin/infer_request_legacy.cpp @@ -127,7 +127,7 @@ void checkInputBlob(const Blob::Ptr &blob, checkAlloc(nv12_ptr->uv(), str_input_not_allocated); } else if (auto batched_ptr = blob->as()) { for (size_t i = 0; i < batched_ptr->size(); i++) { - auto nv12_ptr = getNV12BlobOrException(batched_ptr, i); + auto nv12_ptr = getNV12BlobOrException(batched_ptr, static_cast(i)); checkAlloc(nv12_ptr->y(), str_input_not_allocated); checkAlloc(nv12_ptr->uv(), str_input_not_allocated); } @@ -289,7 +289,7 @@ void InferRequestLegacy::SetBlob(const std::string& name, const Blob::Ptr& data) auto batched_ptr = data->as(); bool is_batched = batched_ptr != nullptr; bool is_nv12 = nv12_ptr != nullptr; - int expected_batch = is_batched ? desc.getDims()[0] : 1; + auto expected_batch = is_batched ? static_cast(desc.getDims()[0]) : 1; if (ColorFormat::NV12 == foundInput->getPreProcess().getColorFormat() && m_graph->get_config().get_property(ov::intel_gpu::nv12_two_inputs)) { // try extracting Y and UV remote blobs from it @@ -297,12 +297,12 @@ void InferRequestLegacy::SetBlob(const std::string& name, const Blob::Ptr& data) // that should then go into biplanar NV12 reorder if (is_nv12 || is_batched) { - int num_blobs = is_batched ? batched_ptr->size() : 1; + auto num_blobs = is_batched ? static_cast(batched_ptr->size()) : 1; for (auto i = 0; i < expected_batch; i++) { std::string y_name = name + "_Y" + std::to_string(i); std::string uv_name = name + "_UV" + std::to_string(i); if (is_batched) { - int idx = i < num_blobs ? i : num_blobs-1; + int idx = i < num_blobs ? i : static_cast(num_blobs)-1; nv12_ptr = getNV12BlobOrException(batched_ptr, idx); } @@ -352,7 +352,7 @@ void InferRequestLegacy::SetBlob(const std::string& name, const Blob::Ptr& data) if (m_graph->GetMaxDynamicBatchSize() > 1) { const auto batch_idx = m_graph->GetInputDynBatchDims()[name].first; if (batch_idx >= 0) - SetBatch(blobDesc.getDims()[batch_idx]); + SetBatch(static_cast(blobDesc.getDims()[batch_idx])); } } else { size_t blobSize = desc.getLayout() != SCALAR @@ -531,7 +531,7 @@ void InferRequestLegacy::SetGraph(std::shared_ptr graph) { } if (m_graph->GetMaxDynamicBatchSize() > 1) { - SetBatch(m_graph->GetMaxDynamicBatchSize()); + SetBatch(static_cast(m_graph->GetMaxDynamicBatchSize())); allocate_inputs_dynamic(); allocate_outputs_dynamic(); } else { @@ -546,7 +546,7 @@ void InferRequestLegacy::SetBatch(int new_batch) { if (m_graph->GetMaxDynamicBatchSize() < 0) IE_THROW() << "Dynamic batch is not enabled."; - if (new_batch < 1 || new_batch > m_graph->GetMaxDynamicBatchSize()) { + if (new_batch < 1 || static_cast(new_batch) > m_graph->GetMaxDynamicBatchSize()) { IE_THROW() << "Invalid dynamic batch size " << new_batch << " for this request. Got: " << new_batch << ". Expected value in range [1;" << m_graph->GetMaxDynamicBatchSize() << "]"; } @@ -735,9 +735,9 @@ void InferRequestLegacy::enqueue() { bool is_nv12 = nv12_ptr != nullptr; if (is_nv12 || is_batched) { - int num_blobs = is_batched ? batched_ptr->size() : 1; + int num_blobs = is_batched ? static_cast(batched_ptr->size()) : 1; int expected_batch = is_batched - ? _networkInputs.at(inputName)->getTensorDesc().getDims()[0] + ? static_cast(_networkInputs.at(inputName)->getTensorDesc().getDims()[0]) : 1; for (auto i = 0; i < expected_batch; i++) { std::string y_name = inputName + "_Y" + std::to_string(i); @@ -890,7 +890,7 @@ void InferRequestLegacy::setup_stream_graph() { auto& streamGraphs = static_cast(_exeNetwork.get())->m_graphs; if (nullptr != streamExecutor) { streamID = streamExecutor->GetStreamId(); - int numGraphs = streamGraphs.size(); + auto numGraphs = streamGraphs.size(); streamID = streamID % numGraphs; } m_graph = streamGraphs[streamID]; @@ -904,7 +904,7 @@ void InferRequestLegacy::setup_stream_graph() { // extract new batch size from blob const auto batch_idx = m_graph->GetInputDynBatchDims()[input.first].first; if (batch_idx >= 0) { - SetBatch(_inputs[input.first]->getTensorDesc().getDims()[batch_idx]); + SetBatch(static_cast(_inputs[input.first]->getTensorDesc().getDims()[batch_idx])); break; } } diff --git a/src/plugins/intel_gpu/src/plugin/ops/constant.cpp b/src/plugins/intel_gpu/src/plugin/ops/constant.cpp index 3bffd477773..1714e50e5da 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/constant.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/constant.cpp @@ -154,7 +154,7 @@ void createClDnnConstant(Program& p, const ngraph::Shape& constDims, const std:: auto constFormat = cldnn::format::get_default_format(constDims.size()); if (props.needsBatchInterpretation) { - constTensor.batch[0] = constTensor.count(); + constTensor.batch[0] = static_cast(constTensor.count()); constTensor.feature[0] = 1; } diff --git a/src/plugins/intel_gpu/src/plugin/ops/convolution.cpp b/src/plugins/intel_gpu/src/plugin/ops/convolution.cpp index 685371c2648..0d1f8443eb0 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/convolution.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/convolution.cpp @@ -221,7 +221,7 @@ static void CreateGroupConvolutionBackpropDataOp(Program& p, const std::shared_p } } - uint32_t groups = op->get_input_shape(1)[0]; + uint32_t groups = static_cast(op->get_input_shape(1)[0]); auto weightsName = inputs[1]; auto weights_node = op->get_input_node_shared_ptr(1); diff --git a/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp b/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp index 65337876efe..2b4d0c13415 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/ctc_greedy_decoder.cpp @@ -45,7 +45,7 @@ static void CreateCommonCTCGreedyDecoderOp(Program& p, const std::shared_ptrget_input_shape(0).back() - 1; + uint32_t blank_index = static_cast(op->get_input_shape(0).back() - 1); if (reordered_inputs.size() == 3) { auto blank_index_node = std::dynamic_pointer_cast(op->get_input_node_shared_ptr(2)); if (!blank_index_node) { diff --git a/src/plugins/intel_gpu/src/plugin/ops/custom.cpp b/src/plugins/intel_gpu/src/plugin/ops/custom.cpp index 895dc9f5f55..267db4ecb19 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/custom.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/custom.cpp @@ -199,10 +199,10 @@ void CreateCustomOp(Program& p, const std::shared_ptr& op, CustomL IE_THROW() << "Invalid input tensor for index: " << iidx; auto inputDims = op->get_input_shape(iidx); - xDim = inputDims[inputDims.size() - 1]; - yDim = dims.size() > 1 ? inputDims[inputDims.size() - 2] : 0; - featureDim = dims.size() > 2 ? inputDims[inputDims.size() - 3] : 0; - batchDim = dims.size() > 3 ? inputDims[inputDims.size() - 4]: 0; + xDim = static_cast(inputDims[inputDims.size() - 1]); + yDim = dims.size() > 1 ? static_cast(inputDims[inputDims.size() - 2]) : 0; + featureDim = dims.size() > 2 ? static_cast(inputDims[inputDims.size() - 3]) : 0; + batchDim = dims.size() > 3 ? static_cast(inputDims[inputDims.size() - 4]) : 0; } const std::map vars = { { 'b', batchDim } , { 'B', batchDim }, diff --git a/src/plugins/intel_gpu/src/plugin/ops/detection_output.cpp b/src/plugins/intel_gpu/src/plugin/ops/detection_output.cpp index 7af50dc7bc1..542ce6e1359 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/detection_output.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/detection_output.cpp @@ -43,8 +43,8 @@ static void CreateDetectionOutputOp(Program& p, const std::shared_ptr(attrs.input_width); + int input_height = static_cast(attrs.input_height); bool normalized = attrs.normalized; std::string code_type = attrs.code_type; bool clip_before_nms = attrs.clip_before_nms; diff --git a/src/plugins/intel_gpu/src/plugin/ops/dft.cpp b/src/plugins/intel_gpu/src/plugin/ops/dft.cpp index a785e7543a3..42e079e8149 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/dft.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/dft.cpp @@ -29,7 +29,7 @@ void createDft(Program& p, IE_THROW() << "Unsupported parameter nodes type in " << friendly_name << " (" << op->get_type_name() << ")"; } auto axes = axes_constant->cast_vector(); - uint8_t axis_correction = op->get_input_shape(0).size(); + uint8_t axis_correction = static_cast(op->get_input_shape(0).size()); if (direction != cldnn::dft_direction::forward || mode != cldnn::dft_mode::real) { --axis_correction; } diff --git a/src/plugins/intel_gpu/src/plugin/ops/experimental_detectron_topk_rois.cpp b/src/plugins/intel_gpu/src/plugin/ops/experimental_detectron_topk_rois.cpp index f6907e0e4ba..e76d796df47 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/experimental_detectron_topk_rois.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/experimental_detectron_topk_rois.cpp @@ -21,7 +21,7 @@ void CreateExperimentalDetectronTopKROIsOp(Program &p, const std::shared_ptr &op) { validate_inputs_count(op, {2}); auto inputs = p.GetInputInfo(op); - auto max_rois = op->get_max_rois(); + auto max_rois = static_cast(op->get_max_rois()); auto layer_name = layer_type_name_ID(op); auto argmax_layer_name = layer_name + "_topk"; auto top_k_indices = arg_max_min(argmax_layer_name, diff --git a/src/plugins/intel_gpu/src/plugin/ops/extract_image_patches.cpp b/src/plugins/intel_gpu/src/plugin/ops/extract_image_patches.cpp index da42fb9d83b..7e9dca2fcaf 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/extract_image_patches.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/extract_image_patches.cpp @@ -28,9 +28,18 @@ static void CreateExtractImagePatchesOp(Program& p, const std::shared_ptr sizes = std::vector(op->get_sizes().begin(), op->get_sizes().end()); - std::vector strides = std::vector(op->get_strides().begin(), op->get_strides().end()); - std::vector rates = std::vector(op->get_rates().begin(), op->get_rates().end()); + std::vector sizes; + std::vector strides; + std::vector rates; + for (auto size : op->get_sizes()) { + sizes.push_back(static_cast(size)); + } + for (auto stride : op->get_strides()) { + strides.push_back(static_cast(stride)); + } + for (auto rate : op->get_rates()) { + rates.push_back(static_cast(rate)); + } std::string auto_pad = PadToString(op->get_auto_pad()); auto extractImagePatchesPrim = cldnn::extract_image_patches(layerName, diff --git a/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp b/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp index 7e3e38e9a07..99861fa36c2 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/gather_nd.cpp @@ -18,18 +18,16 @@ static void CreateGatherNDOp(Program& p, const std::shared_ptrget_input_partial_shape(0).size(); - auto indices_rank = op->get_input_partial_shape(1).size(); - - auto batch_dims = op->get_batch_dims(); + auto input_rank = static_cast(op->get_input_partial_shape(0).size()); + auto indices_rank = static_cast(op->get_input_partial_shape(1).size()); + auto batch_dims = static_cast(op->get_batch_dims()); auto primitive = cldnn::gather_nd(layerName, inputs[0], inputs[1], input_rank, indices_rank, - batch_dims, - true); + batch_dims); p.add_primitive(*op, primitive); } @@ -41,10 +39,9 @@ static void CreateGatherNDOp(Program& p, const std::shared_ptrget_input_partial_shape(0).size(); - auto indices_rank = op->get_input_partial_shape(1).size(); - - auto batch_dims = op->get_batch_dims(); + auto input_rank = static_cast(op->get_input_partial_shape(0).size()); + auto indices_rank = static_cast(op->get_input_partial_shape(1).size()); + auto batch_dims = static_cast(op->get_batch_dims()); auto primitive = cldnn::gather_nd(layerName, inputs[0], diff --git a/src/plugins/intel_gpu/src/plugin/ops/lrn.cpp b/src/plugins/intel_gpu/src/plugin/ops/lrn.cpp index 2ff91d0b282..71a6ced9da9 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/lrn.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/lrn.cpp @@ -31,7 +31,7 @@ static void CreateLRNOp(Program& p, const std::shared_ptr& IE_THROW() << "Unsupported axes node type in " << op->get_friendly_name() << " (" << op->get_type_name() << ")"; } auto axis_value = axis_const->cast_vector(); - auto localSize = op->get_nsize(); + auto localSize = static_cast(op->get_nsize()); auto lrnPrim = cldnn::lrn(layerName, inputs[0], diff --git a/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp b/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp index 092914dd650..a20881612b3 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/one_hot.cpp @@ -41,7 +41,7 @@ static void CreateOneHotOp(Program& p, const std::shared_ptr= 0; i--) { + for (int i = static_cast(dims.size() - 1); i >= 0; i--) { if (dims[i] == 1) axis--; else diff --git a/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp b/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp index 414df4c77d3..dbec5aebe7a 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/proposal.cpp @@ -20,15 +20,15 @@ static void CreateProposalOp(Program& p, const std::shared_ptrget_attrs(); float nms_thresh = attrs.nms_thresh; - int min_size = attrs.min_size; - int feature_stride = attrs.feat_stride; - int pre_nms_topn = attrs.pre_nms_topn; - int post_nms_topn = attrs.post_nms_topn; + int min_size = static_cast(attrs.min_size); + int feature_stride = static_cast(attrs.feat_stride); + int pre_nms_topn = static_cast(attrs.pre_nms_topn); + int post_nms_topn = static_cast(attrs.post_nms_topn); const std::vector ratio = attrs.ratio; const std::vector scale = attrs.scale; float box_coordinate_scale = attrs.box_coordinate_scale; float box_size_scale = attrs.box_size_scale; - int base_size = attrs.base_size; + int base_size = static_cast(attrs.base_size); std::string framework = attrs.framework; bool normalize = attrs.normalize; bool clip_before_nms = attrs.clip_before_nms; diff --git a/src/plugins/intel_gpu/src/plugin/ops/region_yolo.cpp b/src/plugins/intel_gpu/src/plugin/ops/region_yolo.cpp index c61f2e8c427..58ce0285aa3 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/region_yolo.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/region_yolo.cpp @@ -17,11 +17,11 @@ static void CreateRegionYoloOp(Program& p, const std::shared_ptrget_num_coords(); - uint32_t classes = op->get_num_classes(); - uint32_t num = op->get_num_regions(); + uint32_t coords = static_cast(op->get_num_coords()); + uint32_t classes = static_cast(op->get_num_classes()); + uint32_t num = static_cast(op->get_num_regions()); bool do_softmax = op->get_do_softmax(); - uint32_t mask_size = op->get_mask().size(); + uint32_t mask_size = static_cast(op->get_mask().size()); auto regionPrim = cldnn::region_yolo(layerName, inputs[0], diff --git a/src/plugins/intel_gpu/src/plugin/ops/reorg_yolo.cpp b/src/plugins/intel_gpu/src/plugin/ops/reorg_yolo.cpp index e182dfc122f..9982fa08a9f 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/reorg_yolo.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/reorg_yolo.cpp @@ -17,7 +17,7 @@ static void CreateReorgYoloOp(Program& p, const std::shared_ptrget_strides()[0]; + uint32_t stride = static_cast(op->get_strides()[0]); auto reorgPrim = cldnn::reorg_yolo(layerName, inputs[0], diff --git a/src/plugins/intel_gpu/src/plugin/ops/reverse_sequence.cpp b/src/plugins/intel_gpu/src/plugin/ops/reverse_sequence.cpp index 5a6f28667c3..1953a1c47d4 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/reverse_sequence.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/reverse_sequence.cpp @@ -17,8 +17,8 @@ static void CreateReverseSequenceOp(Program& p, const std::shared_ptrget_batch_axis(); - size_t seq_axis = op->get_sequence_axis(); + auto batch_axis = static_cast(op->get_batch_axis()); + auto seq_axis = static_cast(op->get_sequence_axis()); auto reverseSequencePrim = cldnn::reverse_sequence(layerName, inputs[0], inputs[1], diff --git a/src/plugins/intel_gpu/src/plugin/ops/rnn.cpp b/src/plugins/intel_gpu/src/plugin/ops/rnn.cpp index 398f0b52e73..bfcbe84da82 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/rnn.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/rnn.cpp @@ -83,9 +83,9 @@ static void CreateLSTMCellOp(Program& p, const std::shared_ptrget_input_shape(2).size() != 2) IE_THROW() << "Wrong input shapes for LSTMCell op " << op->get_friendly_name(); - lstm_input_size = in_dims0.back(); - lstm_batch_size = in_dims0.at(in_dims0.size()-2); - lstm_hidden_size = out_dims0.back(); + lstm_input_size = static_cast(in_dims0.back()); + lstm_batch_size = static_cast(in_dims0.at(in_dims0.size()-2)); + lstm_hidden_size = static_cast(out_dims0.back()); } std::vector activations; @@ -179,10 +179,10 @@ static void CreateLSTMSequenceOp(Program& p, const std::shared_ptrget_input_shape(2).size() != 3) IE_THROW() << "Wrong input shapes for LSTMSequence op " << op->get_friendly_name(); - lstm_input_size = in_dims0.back(); - lstm_sequence_len = in_dims0.at(in_dims0.size() - 2); - lstm_batch_size = in_dims0.at(in_dims0.size() - 3); - lstm_hidden_size = out_dims0.back(); + lstm_input_size = static_cast(in_dims0.back()); + lstm_sequence_len = static_cast(in_dims0.at(in_dims0.size() - 2)); + lstm_batch_size = static_cast(in_dims0.at(in_dims0.size() - 3)); + lstm_hidden_size = static_cast(out_dims0.back()); } std::vector activations; diff --git a/src/plugins/intel_gpu/src/plugin/ops/roi_pooling.cpp b/src/plugins/intel_gpu/src/plugin/ops/roi_pooling.cpp index 9a8a017f904..053819f8cc3 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/roi_pooling.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/roi_pooling.cpp @@ -68,8 +68,8 @@ static void CreatePSROIPoolingOp(Program& p, const std::shared_ptrget_mode()); - int group_size = op->get_group_size(); - int output_dim = op->get_output_dim(); + int group_size = static_cast(op->get_group_size()); + int output_dim = static_cast(op->get_output_dim()); float spatial_scale = op->get_spatial_scale(); int spatial_bins_x = op->get_spatial_bins_x(); int spatial_bins_y = op->get_spatial_bins_y(); @@ -96,8 +96,8 @@ static void CreateROIPoolingOp(Program& p, const std::shared_ptrget_output_size(); - int pooled_height = out_size[0]; - int pooled_width = out_size[1]; + int pooled_height = static_cast(out_size[0]); + int pooled_width = static_cast(out_size[1]); float spatial_scale = op->get_spatial_scale(); bool position_sensitive = false; diff --git a/src/plugins/intel_gpu/src/plugin/ops/roll.cpp b/src/plugins/intel_gpu/src/plugin/ops/roll.cpp index 57b838e5e73..a80cb0222cf 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/roll.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/roll.cpp @@ -23,7 +23,7 @@ void CreateRollOp(Program& p, const std::shared_ptr& op) { const auto& input_pshape = op->get_input_partial_shape(0); OPENVINO_ASSERT(input_pshape.is_static(), "Dynamic shapes are not supported for Roll operation yet"); const auto& input_shape = input_pshape.to_shape(); - const uint8_t rank = input_shape.size(); + const auto rank = static_cast(input_shape.size()); const auto format = cldnn::format::get_default_format(rank); const auto default_rank = format.dimension(); @@ -53,7 +53,7 @@ void CreateRollOp(Program& p, const std::shared_ptr& op) { } // Normalize shift - for (size_t s = 0; s < rank; ++s) { + for (int s = 0; s < rank; ++s) { auto& sh = shift[s]; const auto dim = static_cast(input_shape[s]); sh %= dim; diff --git a/src/plugins/intel_gpu/src/plugin/ops/split.cpp b/src/plugins/intel_gpu/src/plugin/ops/split.cpp index 626b174c51a..f6f493e4598 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/split.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/split.cpp @@ -21,14 +21,14 @@ static void CreateCommonSplitOp(Program& p, const std::shared_ptr& auto inputs = p.GetInputInfo(op); if (p.use_new_shape_infer() || op->is_dynamic()) { cldnn::crop_ngraph_op_mode op_mode = cldnn::crop_ngraph_op_mode::variadic_split; - size_t num_splits = 1; + auto num_splits = static_cast(1); if (ngraph::is_type(op)) { num_splits = ngraph::as_type_ptr(op)->get_num_splits(); op_mode = cldnn::crop_ngraph_op_mode::split; } for (size_t i = 0; i < op->get_output_size(); i++) { - auto cropPrim = cldnn::crop(get_layer_name(i), inputs, cldnn::tensor(1), cldnn::tensor(0), op_mode, i, num_splits); + auto cropPrim = cldnn::crop(get_layer_name(i), inputs, cldnn::tensor(1), cldnn::tensor(0), op_mode, static_cast(i), num_splits); p.add_primitive(*op, cropPrim); } } else { diff --git a/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp b/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp index 360fa7819a5..4b9ab81efb1 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/strided_slice.cpp @@ -91,9 +91,11 @@ static void CreateStridedSliceOp(Program& p, const std::shared_ptr(begin.size() - axis - num_new_axis_after_ellipses - 1); + unsigned long num_of_hidden_dims = + static_cast(input_shape.size() - num_input_axis_after_ellipses + - num_input_axis_before_ellipses); for (size_t i = 0; i < num_of_hidden_dims; ++i) { axes.emplace_back(uniq_id); uniq_id++; @@ -207,7 +209,7 @@ static void CreateStridedSliceOp(Program& p, const std::shared_ptr 3) { IE_THROW() << "Invalid crop axis: " << std::to_string(axes[i]) << " in op " + op->get_friendly_name(); } - offset_tensor[axes[i]] = offset[i]; + offset_tensor[axes[i]] = static_cast(offset[i]); } ngraph::Shape crop_shape(reshape_pattern); diff --git a/src/plugins/intel_gpu/src/plugin/ops/topk.cpp b/src/plugins/intel_gpu/src/plugin/ops/topk.cpp index 13870b5f52a..8830a54915c 100644 --- a/src/plugins/intel_gpu/src/plugin/ops/topk.cpp +++ b/src/plugins/intel_gpu/src/plugin/ops/topk.cpp @@ -22,7 +22,7 @@ static void CreateTopKOp(Program& p, const std::shared_ptr ov::op::TopKMode mode = op->get_mode(); ov::op::TopKSortType stype = op->get_sort_type(); - uint32_t top_k = op->get_k(); + uint32_t top_k = static_cast(op->get_k()); uint64_t chosen_axis = op->get_axis(); if (p.use_new_shape_infer()) { diff --git a/src/plugins/intel_gpu/src/plugin/program.cpp b/src/plugins/intel_gpu/src/plugin/program.cpp index 8c078256de5..cffe8a8c7b1 100644 --- a/src/plugins/intel_gpu/src/plugin/program.cpp +++ b/src/plugins/intel_gpu/src/plugin/program.cpp @@ -186,7 +186,7 @@ Program::Program(InferenceEngine::CNNNetwork& network, cldnn::engine& engine, co } int m_bv_sz = GetMaxBatchSizeForSingleProgram(); - m_max_batch = m_config.get_property(ov::intel_gpu::max_dynamic_batch); + m_max_batch = static_cast(m_config.get_property(ov::intel_gpu::max_dynamic_batch)); if (dyn_shape_batch_found || m_max_batch > 1) { // compile log2 networks to serve dynamic batch requests @@ -307,7 +307,7 @@ int Program::GetMaxBatchSizeForSingleProgram() { auto max_dynamic_batch = m_config.get_property(ov::intel_gpu::max_dynamic_batch); if (max_dynamic_batch > 1) { // calculate number of networks necessary based on binary log - unsigned int tmp = max_dynamic_batch; + unsigned int tmp = static_cast(max_dynamic_batch); unsigned int mask = 1U << 31; unsigned int ldigit = 31; @@ -469,9 +469,10 @@ std::vector Program::GetInputInfo(const std::shared_ptrget_input_source_output(i).get_index())); + inputInfo.push_back( + cldnn::input_info(primitive_ids.at(prevName), is_legacy_multiple_outputs ? 0: static_cast(op->get_input_source_output(i).get_index()))); } else { - inputInfo.push_back(cldnn::input_info(prevName, is_legacy_multiple_outputs ? 0 : op->get_input_source_output(i).get_index())); + inputInfo.push_back(cldnn::input_info(prevName, is_legacy_multiple_outputs ? 0 : static_cast(op->get_input_source_output(i).get_index()))); } } return inputInfo; diff --git a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp index 8e402587853..78ccd48ab34 100644 --- a/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp +++ b/src/plugins/intel_gpu/src/plugin/transformations_pipeline.cpp @@ -354,14 +354,14 @@ void TransformationsPipeline::apply(std::shared_ptr func) { auto axesVal = axesNode->cast_vector(); auto& mvnShape = mvn->get_output_partial_shape(0); for (int32_t& axis : axesVal) - axis = axis < 0 ? axis + mvnShape.size() : axis; + axis = axis < 0 ? axis + static_cast(mvnShape.size()) : axis; std::sort(axesVal.begin(), axesVal.end()); if (mvnShape.size() == 1) return false; if (mvnShape.size() > 5 || (mvnShape.size() != axesVal.size() + 1 && mvnShape.size() != axesVal.size() + 2)) return false; - int value = mvnShape.size() - 1; - for (int i = axesVal.size() - 1; i >= 0; i--, value--) { + int value = static_cast(mvnShape.size()) - 1; + for (int i = static_cast(axesVal.size()) - 1; i >= 0; i--, value--) { if (axesVal[i] != value) return false; } diff --git a/src/plugins/intel_gpu/src/runtime/layout.cpp b/src/plugins/intel_gpu/src/runtime/layout.cpp index 544dfea131c..ee825df8ec5 100644 --- a/src/plugins/intel_gpu/src/runtime/layout.cpp +++ b/src/plugins/intel_gpu/src/runtime/layout.cpp @@ -197,7 +197,10 @@ std::vector layout::get_dims() const { if (is_dynamic()) throw std::runtime_error("[GPU] get_dims() is called for dynamic shape"); auto shape = size.to_shape(); - std::vector res(shape.begin(), shape.end()); + std::vector res; + for (auto dim : shape) { + res.push_back(static_cast(dim)); + } if (res.size() < format.dimension()) res.insert(res.end(), format.dimension() - res.size(), 1); @@ -333,7 +336,10 @@ tensor layout::get_tensor() const { shape = size.to_shape(); } - std::vector dims(shape.begin(), shape.end()); + std::vector dims; + for (auto dim : shape) { + dims.push_back(static_cast(dim)); + } auto rank = std::max(format.dimension(), dims.size()); auto default_fmt = format::get_default_format(rank, format::is_weights_format(format), format::is_grouped(format)); @@ -513,7 +519,10 @@ ov::PartialShape layout::transform(cldnn::format new_fmt) const { cldnn::tensor::value_type default_size = -1; auto shape = size.to_shape(); - std::vector dims(shape.begin(), shape.end()); + std::vector dims; + for (auto dim : shape) { + dims.push_back(static_cast(dim)); + } const cldnn::format default_fmt = cldnn::format::bfwzyx; auto old_sizes = convert_dimensions(dims, format.order(), default_fmt.internal_order()); // convert to internal order (bfxyzw) @@ -614,7 +623,7 @@ ov::PartialShape layout::transform(cldnn::format new_fmt) const { } auto new_dims = convert_dimensions(new_sizes, default_fmt.internal_order(), new_fmt.order()); - for (int idx = (new_dims.size() - 1); idx >= 0; idx--) { + for (int idx = static_cast(new_dims.size() - 1); idx >= 0; idx--) { if (new_dims[idx] == -1) new_dims.erase((new_dims.begin() + idx)); else if (new_dims[idx] < 0) diff --git a/src/plugins/intel_gpu/tests/gtest_main_gpu.cpp b/src/plugins/intel_gpu/tests/gtest_main_gpu.cpp index aa299e0dc6f..856c541a607 100644 --- a/src/plugins/intel_gpu/tests/gtest_main_gpu.cpp +++ b/src/plugins/intel_gpu/tests/gtest_main_gpu.cpp @@ -47,7 +47,7 @@ GTEST_API_ int main(int argc, char** argv) { cldnn::device_query::device_id = FLAGS_device_suffix; //restore cmdline arg for gtest auto varg=gflags::GetArgvs(); - int new_argc=varg.size(); + int new_argc = static_cast(varg.size()); char** new_argv=new char*[new_argc]; for(int i=0;i(device_id); } device_info get_info() const override { return _info; } diff --git a/src/plugins/intel_gpu/tests/module_tests/layout_test.cpp b/src/plugins/intel_gpu/tests/module_tests/layout_test.cpp index 4be438acda5..4e110514f11 100644 --- a/src/plugins/intel_gpu/tests/module_tests/layout_test.cpp +++ b/src/plugins/intel_gpu/tests/module_tests/layout_test.cpp @@ -34,8 +34,8 @@ TEST_P(data_layout_test, size_check) { auto l = layout(p.dt, p.fmt, tensor{default_fmt, p.size}); - size_t expected_count = std::accumulate(p.size.begin(), p.size.end(), 1, std::multiplies()); - size_t expected_bytes_count = std::accumulate(p.expected_aligned_size.begin(), p.expected_aligned_size.end(), 1, std::multiplies()) * + size_t expected_count = std::accumulate(p.size.begin(), p.size.end(), 1, std::multiplies()); + size_t expected_bytes_count = std::accumulate(p.expected_aligned_size.begin(), p.expected_aligned_size.end(), 1, std::multiplies()) * data_type_traits::size_of(p.dt); ASSERT_EQ(l.bytes_count(), expected_bytes_count); @@ -117,8 +117,11 @@ TEST_P(weights_layout_test, size_check) { auto l = layout(p.dt, p.fmt, tensor{default_fmt, p.size}); - size_t expected_count = std::accumulate(p.size.begin(), p.size.end(), 1, std::multiplies()); - size_t expected_bytes_count = std::accumulate(p.expected_aligned_size.begin(), p.expected_aligned_size.end(), 1, std::multiplies()) * + size_t expected_count = std::accumulate(p.size.begin(), p.size.end(), 1, std::multiplies()); + size_t expected_bytes_count = std::accumulate(p.expected_aligned_size.begin(), + p.expected_aligned_size.end(), + 1, + std::multiplies()) * data_type_traits::size_of(p.dt); ASSERT_EQ(l.bytes_count(), expected_bytes_count); diff --git a/src/plugins/intel_gpu/tests/shape_infer/crop_si_test.cpp b/src/plugins/intel_gpu/tests/shape_infer/crop_si_test.cpp index 73c84a8ff80..c7db35f5d73 100644 --- a/src/plugins/intel_gpu/tests/shape_infer/crop_si_test.cpp +++ b/src/plugins/intel_gpu/tests/shape_infer/crop_si_test.cpp @@ -61,7 +61,7 @@ TEST_P(crop_si_test, shape_infer) { for (size_t output_idx = 0; output_idx < p.expected_layouts.size(); output_idx++) { auto prim_id = "crop.out" + std::to_string(output_idx); - auto crop_prim = std::make_shared(prim_id, input_prim_ids, p.reference_input_size, p.offsets[output_idx], op_mode, output_idx, p.param_num_splits); + auto crop_prim = std::make_shared(prim_id, input_prim_ids, p.reference_input_size, p.offsets[output_idx], op_mode, static_cast(output_idx), p.param_num_splits); auto& crop_node = prog.get_or_create(crop_prim); for (auto& prim : input_prims) { diff --git a/src/plugins/intel_gpu/tests/test_cases/adaptive_max_pooling_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/adaptive_max_pooling_gpu_test.cpp index 878761b7b6c..2c582dd4b57 100644 --- a/src/plugins/intel_gpu/tests/test_cases/adaptive_max_pooling_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/adaptive_max_pooling_gpu_test.cpp @@ -182,8 +182,8 @@ public: return; const auto block_sizes = format::traits(target_layout).block_sizes; - const auto index_offset = std::accumulate(block_sizes.begin(), block_sizes.end(), 1u, - [](size_t total, const std::pair& b) { + const auto index_offset = std::accumulate(block_sizes.begin(), block_sizes.end(), 1, + [](int total, const std::pair& b) { return total * b.second; } ); diff --git a/src/plugins/intel_gpu/tests/test_cases/broadcast_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/broadcast_gpu_test.cpp index 56fb98b79e7..0ed3d621325 100644 --- a/src/plugins/intel_gpu/tests/test_cases/broadcast_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/broadcast_gpu_test.cpp @@ -133,7 +133,10 @@ void start_broadcast_test_dynamic(format input_format, topology.add( broadcast("broadcast", input_info("reorder"), input_info("target_shape"), ov::AxisSet(broadcast_axes))); topology.add(reorder("output", input_info("broadcast"), fmt, input_data_type)); - std::vector target_shape_data(output_shape.begin(), output_shape.end()); + std::vector target_shape_data; + for (auto out_shape : output_shape) { + target_shape_data.push_back(static_cast(out_shape)); + } set_values(target_shape_mem, target_shape_data); } diff --git a/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp index fd00fa0b308..c8607728374 100644 --- a/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/convolution_gpu_test.cpp @@ -8732,15 +8732,18 @@ public: auto pad = convolution->pad; tensor weights_size = generic_params->input_layouts[1].get_tensor(); - int kernel_extent_y = dilation[dilation.size() - 2] * (weights_size.spatial[1] - 1) + 1; - int kernel_extent_x = dilation[dilation.size() - 1] * (weights_size.spatial[0] - 1) + 1; + auto kernel_extent_y = dilation[dilation.size() - 2] * (weights_size.spatial[1] - 1) + 1; + auto kernel_extent_x = dilation[dilation.size() - 1] * (weights_size.spatial[0] - 1) + 1; // Calculate output size - int output_size_y = 1 + (input_size.spatial[1] - kernel_extent_y + 2 * pad[0]) / stride[0]; - int output_size_x = 1 + (input_size.spatial[0] - kernel_extent_x + 2 * pad[1]) / stride[1]; - int output_features = weights_size.batch[0]; + auto output_size_y = 1 + (input_size.spatial[1] - kernel_extent_y + 2 * pad[0]) / stride[0]; + auto output_size_x = 1 + (input_size.spatial[0] - kernel_extent_x + 2 * pad[1]) / stride[1]; + auto output_features = weights_size.batch[0]; - return cldnn::tensor(input_size.batch[0], output_features, output_size_x, output_size_y); + return cldnn::tensor(input_size.batch[0], + static_cast(output_features), + static_cast(output_size_x), + static_cast(output_size_y)); } void prepare_input_for_test(std::vector& inputs) override { @@ -8841,19 +8844,19 @@ public: int output_fi = out_f; int output_yi = y; int output_xi = x; - int output_index = (output_bi * output_buffer_size.feature[0] + output_fi) * output_buffer_size.spatial[1] * output_buffer_size.spatial[0]; + auto output_index = (output_bi * output_buffer_size.feature[0] + output_fi) * output_buffer_size.spatial[1] * output_buffer_size.spatial[0]; tensor lower_output_padding = convolution->output_paddings[0].lower_size(); output_index += (lower_output_padding.spatial[1] + output_yi) * output_buffer_size.spatial[0] + lower_output_padding.spatial[0] + output_xi; for (int kernel_y = 0; kernel_y < weights_size.spatial[1]; kernel_y++) { - int input_yi = y * stride[0] - pad[0] + kernel_y * dilation[0]; - if ((input_yi < 0) || (input_yi >= input_size.spatial[1])) { + int input_yi = static_cast(y * stride[0] - pad[0] + kernel_y * dilation[0]); + if ((input_yi < 0) || (input_yi >= static_cast(input_size.spatial[1]))) { continue; } for (int kernel_x = 0; kernel_x < weights_size.spatial[0]; kernel_x++) { - int input_xi = x * stride[1] - pad[1] + kernel_x * dilation[1]; - if ((input_xi < 0) || (input_xi >= input_size.spatial[0])) { + int input_xi = static_cast(x * stride[1] - pad[1] + kernel_x * dilation[1]); + if ((input_xi < 0) || (input_xi >= static_cast(input_size.spatial[0]))) { continue; } diff --git a/src/plugins/intel_gpu/tests/test_cases/crop_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/crop_gpu_test.cpp index 0c147209c0b..d0492aa55b1 100644 --- a/src/plugins/intel_gpu/tests/test_cases/crop_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/crop_gpu_test.cpp @@ -1460,7 +1460,7 @@ TEST(crop_gpu, static_split_batch) { topology.add(crop("crop3", { input_info("input") }, tensor(1, 4, 1, 1), { tensor(2, 0, 0, 0) }, op_mode, 2)); std::vector input_vec(12); - for (size_t i = 0; i < 12; i++) { + for (int32_t i = 0; i < 12; i++) { input_vec[i] = i; } diff --git a/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp index 2a41b952054..4c1a6431488 100644 --- a/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/deconvolution_gpu_test.cpp @@ -2408,7 +2408,7 @@ struct deconvolution_random_test_params { static std::string print_params(const testing::TestParamInfo& param_info) { auto& param = param_info.param; - auto to_string_neg = [](int v) { + auto to_string_neg = [](int64_t v) { if (v >= 0) { return std::to_string(v); } else { diff --git a/src/plugins/intel_gpu/tests/test_cases/eltwise_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/eltwise_gpu_test.cpp index 4a7e3eaa5b7..4bbe8ae9413 100644 --- a/src/plugins/intel_gpu/tests/test_cases/eltwise_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/eltwise_gpu_test.cpp @@ -2924,10 +2924,10 @@ TEST(eltwise_gpu_f16, fs_b_yx_fsv32_broadcast) return; } - size_t input_b = 2; - size_t input_f = 72; - size_t input1_y = 10, input1_x = 10; - size_t input2_y = 1, input2_x = 1; + tensor::value_type input_b = 2; + tensor::value_type input_f = 72; + tensor::value_type input1_y = 10, input1_x = 10; + tensor::value_type input2_y = 1, input2_x = 1; tensor input1_tensor(input_b, input_f, input1_x, input1_y); tensor input2_tensor(input_b, input_f, input2_x, input2_y); @@ -2989,9 +2989,9 @@ TEST(eltwise_gpu_f16, fs_b_yx_fsv32_broadcast_bfyx) return; } - size_t input_b = 2; - size_t input_f = 72; - size_t input1_y = 10, input1_x = 10; + tensor::value_type input_b = 2; + tensor::value_type input_f = 72; + tensor::value_type input1_y = 10, input1_x = 10; tensor input1_tensor(input_b, input_f, input1_x, input1_y); tensor input2_tensor(1, input_f, 1, 1); diff --git a/src/plugins/intel_gpu/tests/test_cases/gather_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/gather_gpu_test.cpp index fe3d11630e0..9f421837ca4 100644 --- a/src/plugins/intel_gpu/tests/test_cases/gather_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/gather_gpu_test.cpp @@ -17,7 +17,7 @@ using namespace ::tests; template int get_not_one_dim(const T& a) { - int ret = a.size(); + int ret = static_cast(a.size()); while (ret - 1 >= 0 && a[ret - 1] == 1) ret--; return ret; diff --git a/src/plugins/intel_gpu/tests/test_cases/lru_caches_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/lru_caches_gpu_test.cpp index 8016ccddad6..627d7eb499f 100644 --- a/src/plugins/intel_gpu/tests/test_cases/lru_caches_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/lru_caches_gpu_test.cpp @@ -43,11 +43,11 @@ TEST(lru_cache, basic_data_type) std::vector> expected_value; for (size_t i = ca.size(); i > 0; i--) { // 5, 1, 2, 4 - int idx = input_values.size() - i; + auto idx = input_values.size() - i; expected_value.push_back(input_values[idx]); } - int idx = expected_value.size() - 1; + auto idx = expected_value.size() - 1; for (auto key : ca.get_all_keys()) { ASSERT_EQ(key, expected_value[idx--].first); } @@ -118,7 +118,7 @@ TEST(lru_cache, custom_data_type) { expected_keys.push_back(inputs[inputs.size() - i]->key); } - int idx = expected_keys.size() - 1; + auto idx = expected_keys.size() - 1; for (auto key : ca.get_all_keys()) { ASSERT_EQ(key, expected_keys[idx--]); } diff --git a/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp index adb69a1b002..42312c5bfd9 100644 --- a/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/pooling_gpu_test.cpp @@ -1216,7 +1216,7 @@ static void generic_average_wo_padding_test(format fmt, tensor output, tensor in tensor off(0); for (size_t i = 0; i < offset.size(); i++) { - off.spatial[i] = offset[offset.size() - i - 1]; + off.spatial[i] = static_cast(offset[offset.size() - i - 1]); } auto pool_in = "in"; @@ -2081,15 +2081,15 @@ public: for (size_t fi = 0; fi < this->input_features(); ++fi) { reference[bi][fi] = reference_pooling( this->_input[bi][fi], - this->pool_x(), - this->pool_y(), - this->pool_z(), - this->stride_x(), - this->stride_y(), - this->stride_z(), - this->offset_x(), - this->offset_y(), - this->offset_z()); + static_cast(this->pool_x()), + static_cast(this->pool_y()), + static_cast(this->pool_z()), + static_cast(this->stride_x()), + static_cast(this->stride_y()), + static_cast(this->stride_z()), + static_cast(this->offset_x()), + static_cast(this->offset_y()), + static_cast(this->offset_z())); } } return reference; @@ -3008,14 +3008,14 @@ public: cldnn::pooling_mode pooling_mode = pooling->mode; - int pad_width = pooling->pads_begin[1]; - int pad_height = pooling->pads_begin[0]; + int pad_width = static_cast(pooling->pads_begin[1]); + int pad_height = static_cast(pooling->pads_begin[0]); - int kernel_width = pooling->size[1]; - int kernel_height = pooling->size[0]; + int kernel_width = static_cast(pooling->size[1]); + int kernel_height = static_cast(pooling->size[0]); - int stride_width = pooling->stride[1]; - int stride_height = pooling->stride[0]; + int stride_width = static_cast(pooling->stride[1]); + int stride_height = static_cast(pooling->stride[0]); auto output_tensor = get_expected_output_tensor(); @@ -3060,9 +3060,9 @@ public: const size_t output_index = get_linear_index(output->get_layout(), b, f, h, w, output_desc); - for (int y = pad_y_start; y < pad_y_end; y++) + for (auto y = pad_y_start; y < pad_y_end; y++) { - for (int x = pad_x_start; x < pad_x_end; x++) + for (auto x = pad_x_start; x < pad_x_end; x++) { const size_t input_index = get_linear_index(inputs[0]->get_layout(), b, f, y, x, input_desc); @@ -3081,10 +3081,12 @@ public: case cldnn::pooling_mode::average: case cldnn::pooling_mode::average_no_padding: { - int pool_size_w = pooling->size[1]; - int pool_size_h = pooling->size[0]; - auto dynamic_mode = (((output_tensor.spatial[0] - 1) * stride_width) + pool_size_w) > -2 * pad_width + width || - (((output_tensor.spatial[1] - 1) * stride_height) + pool_size_h) > -2 * pad_height + height; + auto pool_size_w = pooling->size[1]; + auto pool_size_h = pooling->size[0]; + auto dynamic_mode = static_cast(((output_tensor.spatial[0] - 1) * stride_width) + pool_size_w) > + -2 * pad_width + width || + static_cast(((output_tensor.spatial[1] - 1) * stride_height) + pool_size_h) > + -2 * pad_height + height; auto divider = [=](int actual_x, int actual_y) { auto x = kernel_width; diff --git a/src/plugins/intel_gpu/tests/test_cases/set_output_memory_gpu_test.cpp b/src/plugins/intel_gpu/tests/test_cases/set_output_memory_gpu_test.cpp index 19ad6572b81..2e6c9f55bc7 100644 --- a/src/plugins/intel_gpu/tests/test_cases/set_output_memory_gpu_test.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/set_output_memory_gpu_test.cpp @@ -36,7 +36,7 @@ void test_basic(bool is_caching_test) { auto input_data = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } }); auto output_mem = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } }); - const int inputSize = input_data->get_layout().count(); + const auto inputSize = input_data->get_layout().count(); auto inputVals = generateVector(inputSize); set_values(input_data, inputVals); @@ -80,7 +80,7 @@ TEST(set_output_memory_gpu, basic_const) { auto output_mem = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } }); auto output_const_mem = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } }); - const int inputSize = input_data->get_layout().count(); + const int inputSize = static_cast(input_data->get_layout().count()); auto inputVals = generateVector(inputSize); auto constVals = generateVector(inputSize); set_values(input_data, inputVals); @@ -129,7 +129,7 @@ TEST(set_output_memory_gpu, basic_mutable) { auto md = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } }); auto output_mem = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } }); auto output_mutable_mem = engine.allocate_memory({ data_types::f32, format::bfyx, { b, f, x, y } }); - const int inputSize = input_data->get_layout().count(); + const auto inputSize = input_data->get_layout().count(); auto inputVals = generateVector(inputSize); auto mutableVals = generateVector(inputSize); set_values(input_data, inputVals); diff --git a/src/plugins/intel_gpu/tests/test_cases/slice.cpp b/src/plugins/intel_gpu/tests/test_cases/slice.cpp index ebb6b3f28ea..9e06a840b5e 100644 --- a/src/plugins/intel_gpu/tests/test_cases/slice.cpp +++ b/src/plugins/intel_gpu/tests/test_cases/slice.cpp @@ -30,7 +30,7 @@ public: assert(input_shape_.size() == 4 || input_shape_.size() == 5); format input_format = input_shape_.size() == 4 ? format::bfyx : format::bfzyx; layout data_layout ( input_type_, input_format, tensor{input_shape_} ); - std::vector input_vals = GenInput(data_layout.get_linear_size()); + std::vector input_vals = GenInput(static_cast(data_layout.get_linear_size())); memory::ptr input = engine_.allocate_memory(data_layout); set_values(input, input_vals); topology topology;