From 9cfe909e1ec5e60f023b198e2d54c91f8ef08e0f Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 22 Jan 2021 17:41:15 +0300 Subject: [PATCH] Unused variables (#3963) * Added -Wused-variable flag * Fixes for clang compiler * Removed wrong -Wno-error from protobuf compilation * More fixes --- .../compile_flags/os_flags.cmake | 1 + docs/CMakeLists.txt | 4 + docs/snippets/CMakeLists.txt | 10 +- docs/snippets/MULTI3.cpp | 2 +- docs/template_plugin/CMakeLists.txt | 4 + ...renceEngineDeveloperPackageConfig.cmake.in | 7 ++ .../object_detection_sample_ssd.h | 4 - inference-engine/samples/CMakeLists.txt | 6 ++ .../src/cldnn_engine/cldnn_config.cpp | 1 + .../src/cldnn_engine/cldnn_graph.cpp | 2 - .../src/cldnn_engine/ops/convolution.cpp | 2 - .../src/cldnn_engine/ops/matmul.cpp | 1 - .../cldnn_engine/ops/non_max_suppression.cpp | 5 - .../src/cldnn_engine/ops/strided_slice.cpp | 1 - .../src/gna_plugin/backend/dnn_types.cpp | 57 +++++++++++ .../src/gna_plugin/backend/dnn_types.h | 52 +--------- .../gna_plugin/frontend/model_quantizer.hpp | 5 - .../gna_plugin/frontend/scale_factor_calc.hpp | 1 - .../src/gna_plugin/gna_graph_compiler.cpp | 14 +-- .../src/gna_plugin/gna_plugin.cpp | 1 - .../gna_plugin/optimizer/gna_pass_manager.cpp | 11 --- .../runtime/gna_float_runtime_op.cpp | 1 - .../src/gna_plugin/runtime/pwl.cpp | 1 - .../cnn_network_ngraph_impl.cpp | 2 - .../legacy_api/src/ie_layer_validators.cpp | 4 - .../mkldnn_plugin/mkldnn_graph_optimizer.cpp | 1 - .../mkldnn_plugin/nodes/common/cpu_memcpy.h | 1 - .../nodes/mkldnn_interpolate_node.cpp | 2 - .../mkldnn_plugin/nodes/mkldnn_mvn_node.cpp | 1 - .../nodes/mkldnn_normalize_node.cpp | 1 - .../nodes/mkldnn_reduce_node.cpp | 2 +- .../nodes/mkldnn_roi_align_node.cpp | 1 - .../nodes/mkldnn_scatter_update_node.cpp | 1 - .../src/readers/ir_reader/ie_ir_parser.cpp | 1 - .../ir_reader_v7/ie_layer_validators.cpp | 4 - .../src/vpu/common/CMakeLists.txt | 1 - .../src/vpu/graph_transformer/CMakeLists.txt | 1 - .../async_infer_request_test.cpp | 2 +- .../inference_engine/blob_copy_test.cpp | 1 - .../cnn_network/matmul_sr_tests.cpp | 1 - .../inference_engine/cnn_network_test.cpp | 4 +- .../inference_engine/executable_network.cpp | 2 +- ...ermediate_with_constant_transformation.cpp | 1 - .../multiply_transformation.cpp | 2 - .../inference_engine/parameter_tests.cpp | 12 +-- .../inference_engine/task_executor_tests.cpp | 1 - .../transformations/convert_nms5_test.cpp | 5 - .../convert_nms_to_nms_ie_internal_test.cpp | 4 - .../convert_nms_to_nms_ie_test.cpp | 4 - .../transformations/fq_reshape_fusion.cpp | 1 - .../cpu/single_layer_tests/normalize.cpp | 1 - .../cpu/single_layer_tests/region_yolo.cpp | 3 - .../behavior/core_threading_tests.cpp | 2 +- .../dynamic_to_static_shape_clamp.cpp | 4 +- .../dynamic_to_static_shape_convert.cpp | 4 +- .../dynamic_to_static_shape_reduce.cpp | 2 - .../dynamic_to_static_shape_transpose.cpp | 4 +- ...amic_to_static_shape_unary_elementwise.cpp | 6 +- .../myriad/subgraph_tests/dsr_reshape.cpp | 12 +-- .../plugin/myriad/subgraph_tests/dsr_topk.cpp | 1 - .../subgraph_tests/nonzero_transpose.cpp | 6 +- .../plugin/shared/include/behavior/config.hpp | 2 +- .../include/behavior/core_integration.hpp | 7 +- .../include/behavior/core_threading_tests.hpp | 1 - .../shared/include/behavior/infer_request.hpp | 5 +- .../shared/include/behavior/preprocessing.hpp | 10 -- .../shared/include/behavior/test_plugin.hpp | 4 +- .../subgraph_tests/concat_quantization.hpp | 2 +- .../plugin/shared/src/hetero/synthetic.cpp | 1 - ...put_layers_handling_in_transformations.cpp | 1 - ...handling_in_transformations_for_concat.cpp | 3 - ...ansformations_for_concat_multi_channel.cpp | 1 - .../src/base/layer_test_utils.cpp | 1 + .../single_layer/extract_image_patches.cpp | 2 +- .../src/single_layer/reorg_yolo.cpp | 1 - .../src/single_layer/space_to_batch.cpp | 2 +- .../src/subgraph/basic_lstm.cpp | 1 - .../src/subgraph/concat_multi_input.cpp | 2 +- .../src/subgraph/constant_result.cpp | 1 - .../memory_eltwise_reshape_concat.cpp | 2 +- .../src/subgraph/parameter_result.cpp | 1 - .../subgraph/reshape_squeeze_reshape_relu.cpp | 1 - .../src/subgraph/trivial_concat.cpp | 3 - .../two_fake_quantize_to_fullyconnected.cpp | 2 +- .../common_test_utils/data_utils.hpp | 20 ++-- .../common_test_utils/unicode_utils.cpp | 25 +++++ .../common_test_utils/unicode_utils.hpp | 28 ++---- .../common_test_utils/w_dirent.h | 1 + .../functional_test_utils/blob_utils.hpp | 94 ++++++++++--------- ...fake_quantize_and_convolution_function.cpp | 1 - .../ngraph_functions/subgraph_builders.hpp | 28 +++--- .../ngraph_functions/utils/data_utils.hpp | 6 +- .../onnx_import/onnx_importer_test.cpp | 1 - .../passes_tests/adjust_data_batch_tests.cpp | 2 + .../tests_deprecated/CMakeLists.txt | 4 + .../vpu/myriad_tests/aot_behavior_tests.cpp | 4 +- .../ie_tests/src/custom_matcher.cpp | 14 +-- .../functional/ie_tests/src/raw_matcher.cpp | 1 - .../ie_tests/src/segmentation_matcher.cpp | 3 - .../single_layer_tests/region_yolo_tests.cpp | 1 - .../def_conv_ref.cpp | 2 - .../single_layer_tests.hpp | 1 - .../shared_tests/lstm/lstm_ir_test.hpp | 16 ++-- .../functional/shared_tests/lstm/rnn_util.cpp | 2 - .../layers/myriad_layers_conv_nd_test.hpp | 1 - .../layers/myriad_layers_convolution1x1.hpp | 1 - .../layers/myriad_layers_convolution3x3.hpp | 7 -- .../layers/myriad_layers_convolution_test.hpp | 3 - .../common/layers/myriad_layers_crop_test.hpp | 2 - .../layers/myriad_layers_custom_test.hpp | 3 - .../myriad_layers_fully_connected_tests.hpp | 2 - .../common/layers/myriad_layers_gemm_test.hpp | 1 - .../common/layers/myriad_layers_lstm_cell.hpp | 4 - .../common/layers/myriad_layers_pad_test.hpp | 8 -- .../layers/myriad_layers_permute_test.hpp | 1 - .../layers/myriad_layers_pool_nd_test.hpp | 1 - .../layers/myriad_layers_pooling_test.hpp | 2 - .../myriad_layers_psroipooling_test.hpp | 1 - ...riad_layers_roi_feature_extractor_test.hpp | 3 - .../layers/myriad_layers_roi_pooling_test.hpp | 3 - .../myriad_layers_scatter_update_test.hpp | 4 - .../layers/myriad_layers_select_test.hpp | 1 - .../common/layers/myriad_layers_tile_test.hpp | 1 - .../vpu/vpu_base/myriad_layers_tests.hpp | 1 - .../vpu/vpu_base/vpu_layers_tests.cpp | 1 - .../functional/vpu/vpu_base/vpu_test_net.cpp | 1 - .../unit/engines/gna/gna_memory_test.cpp | 1 - .../engines/gna/matchers/conv_matcher.hpp | 5 +- .../layers/extensions/normalize_tests.cpp | 1 - .../layers/internal/graph_concat_test.cpp | 3 - .../graph/structure/graph_structure_test.cpp | 1 - .../thirdparty/clDNN/CMakeLists.txt | 2 +- ...on_kernel_bfyx_to_bs_fs_yx_bsv16_fsv16.cpp | 1 - ...onvolution_kernel_imad_b_fs_yx_fsv4_dw.cpp | 1 - .../reorder_kernel_fs_b_yx_fsv32_to_bfyx.cpp | 1 - .../clDNN/src/gpu/ocl_queue_wrapper.cpp | 26 ----- .../thirdparty/clDNN/src/gpu/ocl_toolkit.cpp | 26 ----- .../src/include/fused_conv_eltwise_inst.h | 2 +- .../clDNN/src/include/program_node.h | 2 +- .../thirdparty/clDNN/tests/CMakeLists.txt | 1 - .../tests/test_cases/convolution_gpu_test.cpp | 6 -- .../test_cases/depth_concatenate_gpu_test.cpp | 12 +-- .../clDNN/tests/test_cases/lstm_gpu_test.cpp | 4 +- .../tests/test_cases/reduce_gpu_test.cpp | 12 +-- .../tests/test_cases/reorder_gpu_test.cpp | 4 +- .../tests/test_cases/reshape_gpu_test.cpp | 4 +- .../tests/test_cases/softmax_gpu_test.cpp | 1 - .../clDNN/tests/test_utils/network_test.h | 1 - .../clDNN/tests_core_internal/CMakeLists.txt | 1 - ngraph/cmake/external_protobuf.cmake | 2 - .../builder/src/builder/make_constant.cpp | 1 - .../reference/embedding_segments_sum.hpp | 2 - .../reference/extract_image_patches.hpp | 4 - .../ngraph/runtime/reference/interpolate.hpp | 3 - .../include/ngraph/runtime/reference/lrn.hpp | 1 - .../ngraph/runtime/reference/region_yolo.hpp | 1 - .../src/runtime/reference/interpolate.cpp | 1 - .../runtime/reference/non_max_suppression.cpp | 2 - ngraph/core/src/op/binary_convolution.cpp | 1 - .../src/op/ctc_greedy_decoder_seq_len.cpp | 1 - ngraph/core/src/op/deformable_convolution.cpp | 1 - ngraph/core/src/op/depth_to_space.cpp | 1 - ngraph/core/src/op/gather.cpp | 1 - ngraph/core/src/op/loop.cpp | 1 - ngraph/core/src/op/non_max_suppression.cpp | 2 - ngraph/core/src/op/one_hot.cpp | 1 - ngraph/core/src/op/reshape.cpp | 1 - ngraph/core/src/op/reverse.cpp | 1 - ngraph/core/src/op/shuffle_channels.cpp | 1 - ngraph/core/src/op/space_to_batch.cpp | 1 - ngraph/core/src/op/space_to_depth.cpp | 1 - ngraph/frontend/onnx_import/src/op/loop.cpp | 1 - ngraph/frontend/onnx_import/src/op/lstm.cpp | 2 - ngraph/test/backend/fused_op.in.cpp | 2 - ngraph/test/backend/group_convolution.in.cpp | 1 - ngraph/test/backend/region_yolo.in.cpp | 2 - ngraph/test/backend/roi_pooling.in.cpp | 1 - ngraph/test/onnx/onnx_import.in.cpp | 1 - .../runtime/interpreter/evaluates_map.cpp | 4 - 179 files changed, 303 insertions(+), 523 deletions(-) create mode 100644 inference-engine/src/gna_plugin/backend/dnn_types.cpp create mode 100644 inference-engine/tests/ie_test_utils/common_test_utils/unicode_utils.cpp diff --git a/cmake/developer_package/compile_flags/os_flags.cmake b/cmake/developer_package/compile_flags/os_flags.cmake index 8e3a5606ab7..87359245b54 100644 --- a/cmake/developer_package/compile_flags/os_flags.cmake +++ b/cmake/developer_package/compile_flags/os_flags.cmake @@ -271,6 +271,7 @@ else() ie_add_compiler_flags(-fdiagnostics-show-option) ie_add_compiler_flags(-Wundef) ie_add_compiler_flags(-Wreturn-type) + ie_add_compiler_flags(-Wunused-variable) # Disable noisy warnings diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index a4ee2f62aa5..e34e8fe3ade 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -3,6 +3,10 @@ # if(NOT ENABLE_DOCKER) + if(CMAKE_COMPILER_IS_GNUCXX) + ie_add_compiler_flags(-Wall) + endif() + add_subdirectory(snippets) # Detect nGraph diff --git a/docs/snippets/CMakeLists.txt b/docs/snippets/CMakeLists.txt index 25e7cadf726..d033653e9de 100644 --- a/docs/snippets/CMakeLists.txt +++ b/docs/snippets/CMakeLists.txt @@ -54,4 +54,12 @@ if(NGRAPH_ONNX_IMPORT_ENABLE) target_link_libraries(${TARGET_NAME} PRIVATE onnx_importer) endif() -target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api ngraph inference_engine_transformations) +if(NOT MSVC) + target_compile_options(${TARGET_NAME} PRIVATE -Wno-unused-variable) + if(CMAKE_COMPILER_IS_GNUCXX) + target_compile_options(${TARGET_NAME} PRIVATE -Wno-unused-but-set-variable) + endif() +endif() + +target_link_libraries(${TARGET_NAME} PRIVATE inference_engine_plugin_api + ngraph inference_engine_transformations) diff --git a/docs/snippets/MULTI3.cpp b/docs/snippets/MULTI3.cpp index 03426a603dd..757ff32fa7f 100644 --- a/docs/snippets/MULTI3.cpp +++ b/docs/snippets/MULTI3.cpp @@ -8,7 +8,7 @@ int main() { auto cnnNetwork = ie.ReadNetwork("sample.xml"); std::string allDevices = "MULTI:"; std::vector myriadDevices = ie.GetMetric("MYRIAD", METRIC_KEY(AVAILABLE_DEVICES)); - for (int i = 0; i < myriadDevices.size(); ++i) { + for (size_t i = 0; i < myriadDevices.size(); ++i) { allDevices += std::string("MYRIAD.") + myriadDevices[i] + std::string(i < (myriadDevices.size() -1) ? "," : ""); diff --git a/docs/template_plugin/CMakeLists.txt b/docs/template_plugin/CMakeLists.txt index f18c1076c68..dcd55e272ed 100644 --- a/docs/template_plugin/CMakeLists.txt +++ b/docs/template_plugin/CMakeLists.txt @@ -11,6 +11,10 @@ set(IE_MAIN_TEMPLATE_PLUGIN_SOURCE_DIR ${InferenceEngineTemplatePlugin_SOURCE_DI find_package(InferenceEngineDeveloperPackage REQUIRED) +if(CMAKE_COMPILER_IS_GNUCXX) + ie_add_compiler_flags(-Wall) +endif() + add_subdirectory(src) if(ENABLE_TESTS) diff --git a/inference-engine/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in b/inference-engine/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in index 31dd6bb422e..092a6af0876 100644 --- a/inference-engine/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in +++ b/inference-engine/cmake/templates/InferenceEngineDeveloperPackageConfig.cmake.in @@ -54,6 +54,13 @@ find_package(IEDevScripts REQUIRED NO_CMAKE_FIND_ROOT_PATH NO_DEFAULT_PATH) +if(NOT MSVC) + ie_add_compiler_flags(-Wno-error=unused-variable) + if(CMAKE_COMPILER_IS_GNUCXX) + ie_add_compiler_flags(-Wno-error=unused-but-set-variable) + endif() +endif() + # Don't threat deprecated API warnings as errors in 3rd party apps ie_deprecated_no_errors() diff --git a/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/object_detection_sample_ssd.h b/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/object_detection_sample_ssd.h index 631d2fb4439..47acc4efc47 100644 --- a/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/object_detection_sample_ssd.h +++ b/inference-engine/ie_bridges/c/samples/object_detection_sample_ssd/object_detection_sample_ssd.h @@ -15,10 +15,6 @@ static const char* model_message = "Required. Path to an .xml file with a traine /// @brief message for images argument static const char *image_message = "Required. Path to one or more .bmp images."; -/// @brief message for plugin argument -static const char *plugin_message = "Plugin name. For example MKLDNNPlugin. If this parameter is pointed, " \ -"the sample will look for this plugin only"; - /// @brief message for assigning cnn calculation to device static const char *target_device_message = "Optional. Specify the target device to infer on (the list of available devices is shown below). " \ "Default value is CPU. Use \"-d HETERO:\" format to specify HETERO plugin. " \ diff --git a/inference-engine/samples/CMakeLists.txt b/inference-engine/samples/CMakeLists.txt index 15a795046ec..dfe9f2303d8 100644 --- a/inference-engine/samples/CMakeLists.txt +++ b/inference-engine/samples/CMakeLists.txt @@ -76,6 +76,8 @@ else() set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror") #treating warnings as errors endif() + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall") if (APPLE) set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=unused-command-line-argument") elseif(UNIX) @@ -116,6 +118,10 @@ set (BUILD_TESTING OFF) if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/thirdparty/gflags") function(add_gflags) + if(NOT WIN32) + set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-all") + set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-all") + endif() set(BUILD_SHARED_LIBS OFF) add_subdirectory(thirdparty/gflags EXCLUDE_FROM_ALL) set_target_properties(gflags_nothreads_static PROPERTIES FOLDER thirdparty) diff --git a/inference-engine/src/cldnn_engine/cldnn_config.cpp b/inference-engine/src/cldnn_engine/cldnn_config.cpp index 9d0a422907f..2c474428ef1 100644 --- a/inference-engine/src/cldnn_engine/cldnn_config.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_config.cpp @@ -191,6 +191,7 @@ void Config::UpdateFromMap(const std::map& configMap) // Validate if passed value is postivie number. try { int val_i = std::stoi(val); + (void)val_i; } catch (const std::exception&) { THROW_IE_EXCEPTION << "Wrong value for property key " << PluginConfigParams::KEY_DEVICE_ID << ". DeviceIDs are only represented by positive numbers"; diff --git a/inference-engine/src/cldnn_engine/cldnn_graph.cpp b/inference-engine/src/cldnn_engine/cldnn_graph.cpp index 998a6d1ac3c..bff05e90077 100644 --- a/inference-engine/src/cldnn_engine/cldnn_graph.cpp +++ b/inference-engine/src/cldnn_engine/cldnn_graph.cpp @@ -265,7 +265,6 @@ InferenceEngine::CNNNetwork CLDNNGraph::GetExecGraphInfoByPrimitivesInfo(std::ve ngraph::OutputVector inputs; auto& deps = prim_info.c_dependencies; - size_t in_size = deps.size(); // Decrease expected dependencies count if there is a const input without original id in the IR for (auto& dep : deps) { @@ -321,7 +320,6 @@ InferenceEngine::CNNNetwork CLDNNGraph::GetExecGraphInfoByPrimitivesInfo(std::ve }; auto create_ngraph_node = [&](const cldnn::primitive_info& prim_info) { - const auto& deps = prim_info.c_dependencies; const auto& user_ids = prim_info.c_users; size_t output_size = user_ids.size(); bool is_output = user_ids.empty(); diff --git a/inference-engine/src/cldnn_engine/ops/convolution.cpp b/inference-engine/src/cldnn_engine/ops/convolution.cpp index f25388e05da..9fda5d94e25 100644 --- a/inference-engine/src/cldnn_engine/ops/convolution.cpp +++ b/inference-engine/src/cldnn_engine/ops/convolution.cpp @@ -234,7 +234,6 @@ void CreateDeformableConvolutionOp(Program& p, const std::shared_ptrget_pads_begin(), op->get_dilations(), op->get_strides(), op->get_group()); auto outDims = op->get_output_shape(0); - auto outPrecision = op->get_output_element_type(0); std::vector weights = {inputs[2]}; if (params.groups > 1) { @@ -302,7 +301,6 @@ void CreateBinaryConvolutionOp(Program& p, const std::shared_ptrget_pads_begin(), op->get_dilations(), op->get_strides(), 1); auto outDims = op->get_output_shape(0); - auto outPrecision = op->get_output_element_type(0); std::vector weights = {inputs[1]}; cldnn::data_types calc_precision = DataTypeFromPrecision(op->get_output_element_type(0)); diff --git a/inference-engine/src/cldnn_engine/ops/matmul.cpp b/inference-engine/src/cldnn_engine/ops/matmul.cpp index 8f7753c6c06..6db97d9d9f5 100644 --- a/inference-engine/src/cldnn_engine/ops/matmul.cpp +++ b/inference-engine/src/cldnn_engine/ops/matmul.cpp @@ -71,7 +71,6 @@ void CreateMatMulOp(Program& p, const std::shared_ptr& o THROW_IE_EXCEPTION << "MatMul " << op->get_friendly_name() << " shapes are inconsistent."; } size_t K = *(shape_a_aligned.end() - 1); - size_t O = *(shape_b_aligned.end() - 1); auto inputName = inputPrimitives[0]; auto weightsName = inputPrimitives[1]; diff --git a/inference-engine/src/cldnn_engine/ops/non_max_suppression.cpp b/inference-engine/src/cldnn_engine/ops/non_max_suppression.cpp index 5720c75e0d6..3aeec61e1a0 100644 --- a/inference-engine/src/cldnn_engine/ops/non_max_suppression.cpp +++ b/inference-engine/src/cldnn_engine/ops/non_max_suppression.cpp @@ -59,11 +59,6 @@ void CreateNonMaxSuppressionIEInternalOp(Program& p, const std::shared_ptrget_output_shape(0)[0]; auto boxesShape = op->get_input_shape(0); - int32_t num_batches = boxesShape.at(0); - int32_t num_boxes = boxesShape.at(1); - - auto scoresShape = op->get_input_shape(1); - int32_t num_classes = scoresShape.at(1); std::size_t num_output = op->get_output_size(); diff --git a/inference-engine/src/cldnn_engine/ops/strided_slice.cpp b/inference-engine/src/cldnn_engine/ops/strided_slice.cpp index 38d05dd8499..46e968a386b 100644 --- a/inference-engine/src/cldnn_engine/ops/strided_slice.cpp +++ b/inference-engine/src/cldnn_engine/ops/strided_slice.cpp @@ -226,7 +226,6 @@ void CreateStridedSliceOp(Program& p, const std::shared_ptr::value, "DnnActivation is not trival type"); -static const char *intel_dnn_activation_name[kActNumType] = { - "kActNone", - "kActSigmoid", - "kActTanh", - "kActRelu", - "kActLeakyRelu", - "kActIdentity", - "kActKaldiLstmClipping", - "kActExp", - "kActLog", - "kActSign", - "kActAbs", - "kActNegLog", - "kActNegHalfLog", - "kActCustom", - "kActSoftSign", - "kActPow", - "kActFakeQuantize" -}; +extern const char *intel_dnn_activation_name[kActNumType]; typedef enum DnnSoftmaxType { kSoftmaxNone, @@ -99,12 +81,7 @@ typedef enum DnnSoftmaxType { kSoftmaxNumType } intel_dnn_softmax_type_t; -static const char *intel_dnn_softmax_name[kSoftmaxNumType] = { - "kSoftmaxNone", - "kSoftmaxKaldiSumGroup", - "kSoftmaxKaldiApplyLog", - "kSoftmaxGoogle" -}; +extern const char *intel_dnn_softmax_name[kSoftmaxNumType]; typedef enum { kDnnUnknownOrientation = 100, @@ -128,19 +105,7 @@ typedef enum { kDnnNumOp } intel_dnn_operation_t; -static const char* intel_dnn_operation_name[kDnnNumOp] = { - "kDnnNullOp", - "kDnnAffineOp", - "kDnnDiagonalOp", - "kDnnConvolutional1dOp", - "kDnnConvolutional2dOp", - "kDnnPiecewiselinearOp", - "kDnnMaxPoolOp", - "kDnnRecurrentOp", - "kDnnInterleaveOp", - "kDnnDeinterleaveOp", - "kDnnCopyOp" -}; +extern const char* intel_dnn_operation_name[kDnnNumOp]; typedef enum { kDnnMacroOpNone, @@ -149,11 +114,7 @@ typedef enum { kDnnNumMacroOp } intel_dnn_macro_operation_t; -static const char *intel_dnn_macro_operation_name[kDnnNumMacroOp] = { - "kDnnMacroOpNone", - "kDnnMacroOpLstm", - "kDnnMacroOpBiLstm" -}; +extern const char *intel_dnn_macro_operation_name[kDnnNumMacroOp]; typedef enum { kDnnFloat, @@ -161,10 +122,7 @@ typedef enum { kDnnNumNumberType } intel_dnn_number_type_t; -static const char *intel_dnn_number_type_name[kDnnNumNumberType] = { - "kDnnFloat", - "kDnnInt" -}; +extern const char *intel_dnn_number_type_name[kDnnNumNumberType]; typedef struct { uint32_t num_bytes_per_weight; diff --git a/inference-engine/src/gna_plugin/frontend/model_quantizer.hpp b/inference-engine/src/gna_plugin/frontend/model_quantizer.hpp index 56eac819698..42e5cabb222 100644 --- a/inference-engine/src/gna_plugin/frontend/model_quantizer.hpp +++ b/inference-engine/src/gna_plugin/frontend/model_quantizer.hpp @@ -50,11 +50,6 @@ class ModelQuantizer { IE_ASSERT(copiedNet.get() != nullptr); copiedNet = InferenceEngine::CNNNetCopy(*copiedNet, visitor); - // TODO: probably not the best way of using dynamic cast in order to transform Precision - // one of solution is to create not copyNet overloads, that accepts 2 functors, one for layer copy - // and another one for net copy - auto rawNet = dynamic_cast(copiedNet.get()); - // allow client code to access copied topology, to avoid copies if user would like to chain quantisation with // another preprocessing cb(copiedNet, false); diff --git a/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp b/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp index 4e28250536a..c11078021d6 100644 --- a/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp +++ b/inference-engine/src/gna_plugin/frontend/scale_factor_calc.hpp @@ -162,7 +162,6 @@ class ScaleFactorPerLayer { if (CNNNetHasPrevLayer(cnnLayer)) { auto prevLayer = CNNNetPrevLayer(cnnLayer); - auto prevInfo = LayerInfo(prevLayer); auto inputQuant = InferenceEngine::getInjectedData(prevLayer); // locating corresponding memory layers with same ID for (auto&& input : CNNNetGetAllInputLayers(cnnLayer)) { diff --git a/inference-engine/src/gna_plugin/gna_graph_compiler.cpp b/inference-engine/src/gna_plugin/gna_graph_compiler.cpp index ac2ac1b10d4..694f3ecc9ac 100644 --- a/inference-engine/src/gna_plugin/gna_graph_compiler.cpp +++ b/inference-engine/src/gna_plugin/gna_graph_compiler.cpp @@ -572,11 +572,7 @@ void GNAGraphCompiler::finalizeConvolution2DPrimitive(InferenceEngine::CNNLayerP uint32_t num_feature_map_rows = (in_channels * in_height * in_width) / num_feature_map_columns; uint32_t filter_n = convolution._out_depth; - uint32_t num_columns_in = num_inputs; - uint32_t original_num_feature_map_rows = num_feature_map_rows; - uint32_t original_input_padding = num_input_padding; - uint32_t additional_padding = 0; // if kernel padding to multiple of 8 will cause missed outputs, need to pad further if (num_input_padding == 0) { @@ -689,11 +685,10 @@ void GNAGraphCompiler::finalizeConvolution2DPrimitive(InferenceEngine::CNNLayerP transposedWeights.resize(transposedWeights.size() + kernelPad); } - const auto t = convolution._weights->byteSize(); - gnamem->readonly().push_local_ptr(ptr_weights, - transposedWeights.data(), - transposedWeights.size(), - 64); + gnamem->readonly().push_local_ptr(ptr_weights, + transposedWeights.data(), + transposedWeights.size(), + 64); if (convolution._biases) { gnamem->readonly().push_ptr(ptr_biases, @@ -2011,6 +2006,7 @@ void GNAGraphCompiler::CreateLayerPrimitive(CNNLayerPtr layer) { {{"LSTMCell"}, SKIP}, {{"FakeQuantize"}, CREATE(FakeQuantizePrimitive)} // TODO: fakequantize layer should be properly converted to GNA scale factors for integer case }; + (void)layersBuilder; auto it = LayersBuilder::getStorage().find(layer->type); if (it != LayersBuilder::getStorage().end()) { it->second(this, layer); diff --git a/inference-engine/src/gna_plugin/gna_plugin.cpp b/inference-engine/src/gna_plugin/gna_plugin.cpp index da14d28b4fb..6bf456e8f26 100644 --- a/inference-engine/src/gna_plugin/gna_plugin.cpp +++ b/inference-engine/src/gna_plugin/gna_plugin.cpp @@ -1097,7 +1097,6 @@ uint32_t GNAPlugin::QueueInference(const InferenceEngine::BlobMap &inputs, Infer } if (CNN2DAtInput) { auto dims = input.second->getTensorDesc().getDims(); - auto layout = input.second->getTensorDesc().getLayout(); auto hwDim = dims[2] * dims[3]; auto chanelsDim = dims[1]; RotateFeatures(reinterpret_cast(inputsDesc->getPtrInputsGlobal(input.first)[idx]), diff --git a/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp b/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp index c5803f950cd..1a8d3b2fa33 100644 --- a/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp +++ b/inference-engine/src/gna_plugin/optimizer/gna_pass_manager.cpp @@ -63,8 +63,6 @@ static const char softSignLayersCounter[] = "numSoftSignLayers"; * @brief helper injections of diagonal layer with certain value */ -static const char diagonalLayerCounterName[] = "diagonalLayerCounter"; - static void insertDiagonalLayerBetween(InferenceEngine::CNNLayerPtr prevLayer, InferenceEngine::CNNLayerPtr nextLayer, std::shared_ptr passmanager, @@ -550,13 +548,6 @@ void ReversePermutationsPass::run() { return prev; }; - auto prevLayerSkipReshape = [&prevLayerSkipCertain](CNNLayerPtr layer) -> CNNLayerPtr { - return prevLayerSkipCertain(layer, [] (CNNLayerPtr l2) { - return LayerInfo(l2).isNonFunctional(); - }); - }; - - std::function nextLayerSkipReshape = [&nextLayerSkipReshape](CNNLayerPtr layer) -> CNNLayerPtr { if (layer->outData.empty()) { return nullptr; @@ -1445,7 +1436,6 @@ void SubstituteScaleShiftBroadCastPass::run() { auto batchSize = dataDims[0]; auto nElements = product(begin(dataDims), end(dataDims)) / batchSize; auto weightsElements = scaleShift->_weights->size(); - auto weightsBytes = scaleShift->_weights->byteSize(); if (!reshape_batch && nElements == weightsElements) { continue; @@ -1941,7 +1931,6 @@ void MoveFakeQuantizeLayerIntoQuantParamsPass :: run() { } float fqLevels = fqLayer.getLevels(); - float scaleInput = (fqLevels - 1) / (inputRange.second[0] - inputRange.first[0]); float scaleOutputs = (fqLevels - 1) / (outputRange.second[0] - outputRange.first[0]); // Before FQ layer is removed, the previous layer has to be updated with its quantization data diff --git a/inference-engine/src/gna_plugin/runtime/gna_float_runtime_op.cpp b/inference-engine/src/gna_plugin/runtime/gna_float_runtime_op.cpp index 4f6e44978f6..f62d02ac1db 100644 --- a/inference-engine/src/gna_plugin/runtime/gna_float_runtime_op.cpp +++ b/inference-engine/src/gna_plugin/runtime/gna_float_runtime_op.cpp @@ -68,7 +68,6 @@ void FP::ApplyDiagonalTransform(intel_dnn_component_t *component) { auto transform = &component->op.affine; int m = component->num_rows_out; int n = component->num_columns_in; - int ldb = component->num_columns_in; int ldc = component->num_columns_out; auto A = reinterpret_cast(transform->ptr_weights); diff --git a/inference-engine/src/gna_plugin/runtime/pwl.cpp b/inference-engine/src/gna_plugin/runtime/pwl.cpp index ce7d6aafdbd..b7d087c7b0e 100644 --- a/inference-engine/src/gna_plugin/runtime/pwl.cpp +++ b/inference-engine/src/gna_plugin/runtime/pwl.cpp @@ -74,7 +74,6 @@ double pivot_search(std::vector& result, double max_epsilon = 0.0; double max_epsilon_prev; double min_epsilon; - double min_epsilon2; double sgn = (negative) ? -1.0 : 1.0; int j; diff --git a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp index 49dab1dca22..8e031de5373 100644 --- a/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp +++ b/inference-engine/src/inference_engine/cnn_network_ngraph_impl.cpp @@ -283,8 +283,6 @@ std::shared_ptr CNNNetworkNGraphImpl::cloneFunction(bool const } void CNNNetworkNGraphImpl::reshape() { - ResponseDesc desc; - // Disable reshape for generic nodes ::ngraph::op::GenericIE::DisableReshape noReshape(_ngraph_function); reshape({}); diff --git a/inference-engine/src/legacy_api/src/ie_layer_validators.cpp b/inference-engine/src/legacy_api/src/ie_layer_validators.cpp index b32155c26cb..cd21dc63468 100644 --- a/inference-engine/src/legacy_api/src/ie_layer_validators.cpp +++ b/inference-engine/src/legacy_api/src/ie_layer_validators.cpp @@ -16,10 +16,6 @@ #include #include "ie_layer_validators.hpp" -#ifdef __clang__ -#pragma clang diagnostic ignored "-Wunused-variable" -#endif - namespace InferenceEngine { using namespace details; diff --git a/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp b/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp index d6017ff5c1f..f94c874d8db 100644 --- a/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp +++ b/inference-engine/src/mkldnn_plugin/mkldnn_graph_optimizer.cpp @@ -1522,7 +1522,6 @@ void MKLDNNGraphOptimizer::FuseInterpolateAndSimpleOperation(MKLDNNGraph &graph) } auto childNode = parentNode->getChildEdgeAt(0)->getChild(); - auto interpolateNode = dynamic_cast(parentNode.get()); if (!isSutableChildNode(parentNode, childNode)) { parent++; continue; diff --git a/inference-engine/src/mkldnn_plugin/nodes/common/cpu_memcpy.h b/inference-engine/src/mkldnn_plugin/nodes/common/cpu_memcpy.h index f4c84369ba9..eae2ac68508 100755 --- a/inference-engine/src/mkldnn_plugin/nodes/common/cpu_memcpy.h +++ b/inference-engine/src/mkldnn_plugin/nodes/common/cpu_memcpy.h @@ -32,7 +32,6 @@ inline void cpu_memcpy(void* dst, const void* src, size_t count) { } inline int cpu_memcpy_s(void* dst, size_t dst_size, const void* src, size_t count) { - size_t i; if (!src || count > dst_size || count > (dst > src ? ((uintptr_t)dst - (uintptr_t)src) : ((uintptr_t)src - (uintptr_t)dst))) { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_interpolate_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_interpolate_node.cpp index f6f0acee7cb..a8a28385681 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_interpolate_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_interpolate_node.cpp @@ -2084,7 +2084,6 @@ void MKLDNNInterpolateNode::buildTblCubic(SizeVector& srcDimPad5d, SizeVector& d } void MKLDNNInterpolateNode::setPostOps(mkldnn::primitive_attr &attr, bool initWeights) { - int blob_idx = 0; mkldnn::post_ops ops; for (auto &node : fusedWith) { @@ -2643,7 +2642,6 @@ void MKLDNNInterpolateNode::cubicCGathered(const uint8_t *in_ptr_, uint8_t *out_ } void MKLDNNInterpolateNode::cubicPlanar(const uint8_t *in_ptr_, uint8_t *out_ptr_, int B, int C, int IH, int IW, int OH, int OW) { - const int idxNum = 1; int tblAdvance = 0; int *xOrigin = static_cast(&indexTable[tblAdvance]); tblAdvance += OW; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp index 3418ed8c1fe..d6cd9a0f388 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_mvn_node.cpp @@ -638,7 +638,6 @@ void MKLDNNMVNNode::createPrimitive() { } void MKLDNNMVNNode::setPostOps(mkldnn::primitive_attr &attr, bool initWeights) { - int blob_idx = 0; mkldnn::post_ops ops; for (auto &node : fusedWith) { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.cpp index 64aab3faed7..47f42e208ab 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_normalize_node.cpp @@ -858,7 +858,6 @@ void MKLDNNNormalizeNode::initSupportedPrimitiveDescriptors() { } void MKLDNNNormalizeNode::setPostOps(mkldnn::primitive_attr &attr, bool initWeights) { - int blob_idx = 0; mkldnn::post_ops ops; for (auto &node : fusedWith) { diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.cpp index 8c1dcda1298..50d0cc3ea9c 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_reduce_node.cpp @@ -1685,7 +1685,7 @@ void MKLDNNReduceNode::reduce_BLK_concern_padding(const uint8_t *in_ptr, uint8_t size_t ob = ReduceN ? 0 : ib; GET_PTR_N_BLK; if (!ReduceD && ReduceH && ReduceW) { for (size_t icb = 0; icb < ICB; icb++) { - size_t ocb = 0; GET_PTR_NC_BLK; + size_t ocb = 0;; size_t ic = icb * blk_size; parallel_for(ID, [&](size_t id) { size_t od = id; GET_PTR_NCD_BASE_PTR_N_BLK; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.cpp index ab05d6730fa..65636d7e583 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_roi_align_node.cpp @@ -151,7 +151,6 @@ template void MKLDNNROIAlignNode::executeSpecified() { auto &srcMemory0 = getParentEdgeAt(0)->getMemory(); auto &srcMemory1 = getParentEdgeAt(1)->getMemory(); - auto &srcMemory2 = getParentEdgeAt(2)->getMemory(); auto &dstMemory = getChildEdgeAt(0)->getMemory(); auto srcBlockDesc = srcMemory0.GetDescriptor().data.layout_desc.blocking; diff --git a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.cpp b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.cpp index b83159f2865..711c071746f 100644 --- a/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.cpp +++ b/inference-engine/src/mkldnn_plugin/nodes/mkldnn_scatter_update_node.cpp @@ -438,7 +438,6 @@ void MKLDNNScatterUpdateNode::scatterElementsUpdate(uint8_t *indices, uint8_t *u SizeVector srcDataDim = getParentEdgeAt(DATA_ID)->getDesc().getDims(); SizeVector updateDim = getParentEdgeAt(UPDATE_ID)->getDesc().getDims(); SizeVector indicesDim = getParentEdgeAt(INDICES_ID)->getDesc().getDims(); - size_t srcRank = srcDataDim.size(); size_t updateRank = updateDim.size(); std::vector srcBlockND = getBlockND(srcDataDim); diff --git a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp index 6d9d3d2e7c1..5318adb61ea 100644 --- a/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp +++ b/inference-engine/src/readers/ir_reader/ie_ir_parser.cpp @@ -548,7 +548,6 @@ void V10Parser::parsePreProcess(CNNNetwork& network, const pugi::xml_node& root, if (!meanSegmentPrecision || meanSegmentPrecision == Precision::MIXED) THROW_IE_EXCEPTION << "mean blob defined without specifying precision."; - ResponseDesc resp; InferenceEngine::PreProcessChannel::Ptr preProcessChannel; int lastChanNo = -1; diff --git a/inference-engine/src/readers/ir_reader_v7/ie_layer_validators.cpp b/inference-engine/src/readers/ir_reader_v7/ie_layer_validators.cpp index e96c0592ba0..acd7b825d29 100644 --- a/inference-engine/src/readers/ir_reader_v7/ie_layer_validators.cpp +++ b/inference-engine/src/readers/ir_reader_v7/ie_layer_validators.cpp @@ -19,10 +19,6 @@ #include #include "xml_parse_utils.h" -#ifdef __clang__ -#pragma clang diagnostic ignored "-Wunused-variable" -#endif - namespace InferenceEngine { using namespace details; diff --git a/inference-engine/src/vpu/common/CMakeLists.txt b/inference-engine/src/vpu/common/CMakeLists.txt index ad900610d16..5c31c9a7a35 100644 --- a/inference-engine/src/vpu/common/CMakeLists.txt +++ b/inference-engine/src/vpu/common/CMakeLists.txt @@ -16,7 +16,6 @@ function(add_common_target TARGET_NAME STATIC_IE) if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") # TODO: enable some day and fix all warnings # target_compile_options(${TARGET_NAME} PRIVATE "-Wall") - target_compile_options(${TARGET_NAME} PRIVATE "-Werror=unused-variable") target_compile_options(${TARGET_NAME} PRIVATE "-Werror=unused-function") target_compile_options(${TARGET_NAME} PRIVATE "-Werror=strict-aliasing") endif() diff --git a/inference-engine/src/vpu/graph_transformer/CMakeLists.txt b/inference-engine/src/vpu/graph_transformer/CMakeLists.txt index 8bcdad0d604..97bd4caa976 100644 --- a/inference-engine/src/vpu/graph_transformer/CMakeLists.txt +++ b/inference-engine/src/vpu/graph_transformer/CMakeLists.txt @@ -12,7 +12,6 @@ function(add_graph_transformer_target TARGET_NAME STATIC_IE) if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") # TODO: enable some day and fix all warnings # target_compile_options(${TARGET_NAME} PRIVATE "-Wall") - target_compile_options(${TARGET_NAME} PRIVATE "-Werror=unused-variable") target_compile_options(${TARGET_NAME} PRIVATE "-Werror=unused-function") target_compile_options(${TARGET_NAME} PRIVATE "-Werror=strict-aliasing") endif() diff --git a/inference-engine/tests/functional/inference_engine/async_infer_request_test.cpp b/inference-engine/tests/functional/inference_engine/async_infer_request_test.cpp index 1ce30df9738..5a84d5f0b95 100644 --- a/inference-engine/tests/functional/inference_engine/async_infer_request_test.cpp +++ b/inference-engine/tests/functional/inference_engine/async_infer_request_test.cpp @@ -81,7 +81,7 @@ TEST(InferRequestCPPTests, throwsOnUninitializedSetCompletionCallback) { TEST(InferRequestCPPTests, throwsOnUninitializedCast) { InferRequest req; - ASSERT_THROW(auto &ireq = static_cast(req), InferenceEngine::details::InferenceEngineException); + ASSERT_THROW((void)static_cast(req), InferenceEngine::details::InferenceEngineException); } TEST(InferRequestCPPTests, throwsOnUninitializedQueryState) { diff --git a/inference-engine/tests/functional/inference_engine/blob_copy_test.cpp b/inference-engine/tests/functional/inference_engine/blob_copy_test.cpp index 7505324bb76..baa9b3b3478 100644 --- a/inference-engine/tests/functional/inference_engine/blob_copy_test.cpp +++ b/inference-engine/tests/functional/inference_engine/blob_copy_test.cpp @@ -107,7 +107,6 @@ template void FillBlobRandom(Blob::Ptr& inputBlob) { srand(1); auto inputBlobData = inputBlob->buffer().as(); - unsigned int seed = RAND_MAX; for (size_t i = 0; i < inputBlob->size(); i++) { inputBlobData[i] = (T) (GenerateRandom(RAND_MAX) / static_cast(RAND_MAX) * 100); } diff --git a/inference-engine/tests/functional/inference_engine/cnn_network/matmul_sr_tests.cpp b/inference-engine/tests/functional/inference_engine/cnn_network/matmul_sr_tests.cpp index 4941cc90067..ca572e70c35 100644 --- a/inference-engine/tests/functional/inference_engine/cnn_network/matmul_sr_tests.cpp +++ b/inference-engine/tests/functional/inference_engine/cnn_network/matmul_sr_tests.cpp @@ -35,7 +35,6 @@ struct ReshapeMatMulTestCase { class SmartReshapeMatMulTests : public CommonTestUtils::TestsCommon, public testing::WithParamInterface> { public: void SetUp() override { - const auto& parameters = GetParam(); const auto& test_case = std::get<0>(GetParam()); std::shared_ptr ngraph; diff --git a/inference-engine/tests/functional/inference_engine/cnn_network_test.cpp b/inference-engine/tests/functional/inference_engine/cnn_network_test.cpp index c07a59916e9..726975e9e7f 100644 --- a/inference-engine/tests/functional/inference_engine/cnn_network_test.cpp +++ b/inference-engine/tests/functional/inference_engine/cnn_network_test.cpp @@ -41,12 +41,12 @@ TEST_F(CNNNetworkTests, throwsOnUninitializedGetName) { TEST_F(CNNNetworkTests, throwsOnUninitializedCastToICNNNetwork) { CNNNetwork network; - ASSERT_THROW(auto & net = static_cast(network), InferenceEngine::details::InferenceEngineException); + ASSERT_THROW((void)static_cast(network), InferenceEngine::details::InferenceEngineException); } TEST_F(CNNNetworkTests, throwsOnConstUninitializedCastToICNNNetwork) { const CNNNetwork network; - ASSERT_THROW(const auto & net = static_cast(network), InferenceEngine::details::InferenceEngineException); + ASSERT_THROW((void)static_cast(network), InferenceEngine::details::InferenceEngineException); } TEST_F(CNNNetworkTests, throwsOnUninitializedGetFunction) { diff --git a/inference-engine/tests/functional/inference_engine/executable_network.cpp b/inference-engine/tests/functional/inference_engine/executable_network.cpp index 89f3b75b98f..937328b4ffd 100644 --- a/inference-engine/tests/functional/inference_engine/executable_network.cpp +++ b/inference-engine/tests/functional/inference_engine/executable_network.cpp @@ -37,7 +37,7 @@ TEST(ExecutableNetworkTests, throwsOnUninitializedExportStream) { TEST(ExecutableNetworkTests, nothrowsOnUninitializedCast) { ExecutableNetwork exec; - ASSERT_NO_THROW(auto &enet = static_cast(exec)); + ASSERT_NO_THROW((void)static_cast(exec)); } TEST(ExecutableNetworkTests, throwsOnUninitializedGetExecGraphInfo) { diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_intermediate_with_constant_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_intermediate_with_constant_transformation.cpp index dc2936bd687..ba21ec876f6 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_intermediate_with_constant_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/concat_with_intermediate_with_constant_transformation.cpp @@ -116,7 +116,6 @@ public: } static std::string getTestCaseName(testing::TestParamInfo obj) { - const ngraph::element::Type precision = std::get<0>(obj.param); const ngraph::Shape shape = std::get<1>(obj.param); ConcatTransformationTestValues testValues = std::get<2>(obj.param); diff --git a/inference-engine/tests/functional/inference_engine/lp_transformations/multiply_transformation.cpp b/inference-engine/tests/functional/inference_engine/lp_transformations/multiply_transformation.cpp index 095233dbcab..73a9bb09d12 100644 --- a/inference-engine/tests/functional/inference_engine/lp_transformations/multiply_transformation.cpp +++ b/inference-engine/tests/functional/inference_engine/lp_transformations/multiply_transformation.cpp @@ -50,9 +50,7 @@ typedef std::tuple< class MultiplyTransformation : public LayerTransformation, public testing::WithParamInterface { public: void SetUp() override { - const ngraph::element::Type precision = std::get<0>(GetParam()); const ngraph::Shape shape = std::get<1>(GetParam()); - const bool broadcast = std::get<2>(GetParam()); const MultiplyTransformationTestValues testParams = std::get<3>(GetParam()); actualFunction = MultiplyFunction::get(shape, testParams.actual); diff --git a/inference-engine/tests/functional/inference_engine/parameter_tests.cpp b/inference-engine/tests/functional/inference_engine/parameter_tests.cpp index aabd2d2c234..fc7ba4f6d5b 100644 --- a/inference-engine/tests/functional/inference_engine/parameter_tests.cpp +++ b/inference-engine/tests/functional/inference_engine/parameter_tests.cpp @@ -98,8 +98,8 @@ TEST_F(ParameterTests, StringParameterAsInt) { Parameter p = "4"; ASSERT_FALSE(p.is()); ASSERT_TRUE(p.is()); - ASSERT_THROW(int test = p, std::bad_cast); - ASSERT_THROW(int test = p.as(), std::bad_cast); + ASSERT_THROW((void)static_cast(p), std::bad_cast); + ASSERT_THROW((void)p.as(), std::bad_cast); } TEST_F(ParameterTests, ParameterAsTensorDesc) { @@ -259,10 +259,10 @@ TEST_F(ParameterTests, CompareParametersWithoutEqualOperator) { Parameter parB = b; Parameter parC = c; - ASSERT_THROW(bool equal = parA == parB, details::InferenceEngineException); - ASSERT_THROW(bool equal = parA != parB, details::InferenceEngineException); - ASSERT_THROW(bool equal = parA == parC, details::InferenceEngineException); - ASSERT_THROW(bool equal = parA != parC, details::InferenceEngineException); + ASSERT_THROW((void)(parA == parB), details::InferenceEngineException); + ASSERT_THROW((void)(parA != parB), details::InferenceEngineException); + ASSERT_THROW((void)(parA == parC), details::InferenceEngineException); + ASSERT_THROW((void)(parA != parC), details::InferenceEngineException); } TEST_F(ParameterTests, ParameterRemovedRealObject) { diff --git a/inference-engine/tests/functional/inference_engine/task_executor_tests.cpp b/inference-engine/tests/functional/inference_engine/task_executor_tests.cpp index 3e5dfb503ae..a263f95ad59 100644 --- a/inference-engine/tests/functional/inference_engine/task_executor_tests.cpp +++ b/inference-engine/tests/functional/inference_engine/task_executor_tests.cpp @@ -194,7 +194,6 @@ static auto Executors = ::testing::Values( streams, threads/streams, IStreamsExecutor::ThreadBindingType::NONE}); }, [] { - auto threads = parallel_get_max_threads(); return std::make_shared(); } ); diff --git a/inference-engine/tests/functional/inference_engine/transformations/convert_nms5_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/convert_nms5_test.cpp index c5f0c2e3c80..ed2ca43e7ea 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/convert_nms5_test.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/convert_nms5_test.cpp @@ -283,7 +283,6 @@ TEST(TransformationTests, ConvertNMS5ToNMSIEDynamic1SixInputs) { f = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - const auto &orig_selected_indices_shape = f->get_output_partial_shape(0); pass::Manager manager; manager.register_pass(); manager.register_pass(); @@ -337,7 +336,6 @@ TEST(TransformationTests, ConvertNMS5ToNMSIEDynamic1FiveInputs) { f = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - const auto &orig_selected_indices_shape = f->get_output_partial_shape(0); pass::Manager manager; manager.register_pass(); manager.register_pass(); @@ -386,7 +384,6 @@ TEST(TransformationTests, ConvertNMS5ToNMSIEDynamic1FourInputs) { f = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - const auto &orig_selected_indices_shape = f->get_output_partial_shape(0); pass::Manager manager; manager.register_pass(); manager.register_pass(); @@ -434,7 +431,6 @@ TEST(TransformationTests, ConvertNMS5ToNMSIEDynamic1ThreeInputs) { f = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - const auto &orig_selected_indices_shape = f->get_output_partial_shape(0); pass::Manager manager; manager.register_pass(); manager.register_pass(); @@ -481,7 +477,6 @@ TEST(TransformationTests, ConvertNMS5ToNMSIEDynamic1TwoInputs) { f = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - const auto &orig_selected_indices_shape = f->get_output_partial_shape(0); pass::Manager manager; manager.register_pass(); manager.register_pass(); diff --git a/inference-engine/tests/functional/inference_engine/transformations/convert_nms_to_nms_ie_internal_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/convert_nms_to_nms_ie_internal_test.cpp index 219eb611c91..cfbe65302bc 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/convert_nms_to_nms_ie_internal_test.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/convert_nms_to_nms_ie_internal_test.cpp @@ -38,7 +38,6 @@ TEST(TransformationTests, ConvertNMS1ToNMSIEInternal) { f = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - const auto & orig_shape = f->get_output_partial_shape(0); ngraph::pass::Manager manager; manager.register_pass(); manager.register_pass(); @@ -80,7 +79,6 @@ TEST(TransformationTests, ConvertNMS3ToNMSIEInternal) { f = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - const auto & orig_shape = f->get_output_partial_shape(0); ngraph::pass::Manager manager; manager.register_pass(); manager.register_pass(); @@ -121,7 +119,6 @@ TEST(TransformationTests, ConvertNMS4ToNMSIEInternal) { f = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - const auto & orig_shape = f->get_output_partial_shape(0); ngraph::pass::Manager manager; manager.register_pass(); manager.register_pass(); @@ -163,7 +160,6 @@ TEST(TransformationTests, ConvertNMS5ToNMSIEInternal) { f = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - const auto & orig_shape = f->get_output_partial_shape(0); ngraph::pass::Manager manager; manager.register_pass(); manager.register_pass(); diff --git a/inference-engine/tests/functional/inference_engine/transformations/convert_nms_to_nms_ie_test.cpp b/inference-engine/tests/functional/inference_engine/transformations/convert_nms_to_nms_ie_test.cpp index 64a8ca7d6ad..35916319b19 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/convert_nms_to_nms_ie_test.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/convert_nms_to_nms_ie_test.cpp @@ -37,7 +37,6 @@ TEST(TransformationTests, ConvertNMSToNMSIEStatic) { f = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - const auto & orig_shape = f->get_output_partial_shape(0); ngraph::pass::Manager manager; manager.register_pass(); manager.register_pass(); @@ -156,7 +155,6 @@ TEST(TransformationTests, ConvertNMST1oNMSIE) { f = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - const auto & orig_shape = f->get_output_partial_shape(0); ngraph::pass::Manager manager; manager.register_pass(); manager.register_pass(); @@ -196,7 +194,6 @@ TEST(TransformationTests, ConvertNMST3oNMSIE) { f = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - const auto & orig_shape = f->get_output_partial_shape(0); ngraph::pass::Manager manager; manager.register_pass(); manager.register_pass(); @@ -235,7 +232,6 @@ TEST(TransformationTests, ConvertNMST4oNMSIE) { f = std::make_shared(NodeVector{nms}, ParameterVector{boxes, scores}); - const auto & orig_shape = f->get_output_partial_shape(0); ngraph::pass::Manager manager; manager.register_pass(); manager.register_pass(); diff --git a/inference-engine/tests/functional/inference_engine/transformations/fq_reshape_fusion.cpp b/inference-engine/tests/functional/inference_engine/transformations/fq_reshape_fusion.cpp index 8f166131c1a..a6a15bed5eb 100644 --- a/inference-engine/tests/functional/inference_engine/transformations/fq_reshape_fusion.cpp +++ b/inference-engine/tests/functional/inference_engine/transformations/fq_reshape_fusion.cpp @@ -36,7 +36,6 @@ public: std::shared_ptr f, ref_f; void SetUp() override { - const auto& parameters = GetParam(); const auto& test_case = std::get<0>(GetParam()); f = get_initial_function(test_case); if (test_case.is_negative) diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/normalize.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/normalize.cpp index 279559e7962..b13b19e6e22 100755 --- a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/normalize.cpp +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/normalize.cpp @@ -22,7 +22,6 @@ public: static std::string getTestCaseName(testing::TestParamInfo obj) { LayerTestsDefinitions::NormalizeL2LayerTestParams basicParamsSet; CPUSpecificParams cpuParams; - Precision inputPrecision, outputPrecision; std::tie(basicParamsSet, cpuParams) = obj.param; std::ostringstream result; diff --git a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/region_yolo.cpp b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/region_yolo.cpp index e92e2e432a9..59cf14b865a 100644 --- a/inference-engine/tests/functional/plugin/cpu/single_layer_tests/region_yolo.cpp +++ b/inference-engine/tests/functional/plugin/cpu/single_layer_tests/region_yolo.cpp @@ -117,9 +117,6 @@ const std::vector> masks = { const std::vector do_softmax = {true, false}; const std::vector classes = {80, 20}; const std::vector num_regions = {5, 9}; -const size_t coords = 4; -const int start_axis = 1; -const int end_axis = 3; const regionYoloAttributes yoloV3attr = {80, 4, 9, false, 1, 3}; diff --git a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/core_threading_tests.cpp b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/core_threading_tests.cpp index 745fe67eb50..56e3c38a977 100644 --- a/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/core_threading_tests.cpp +++ b/inference-engine/tests/functional/plugin/gpu/shared_tests_instances/behavior/core_threading_tests.cpp @@ -35,7 +35,7 @@ TEST_P(CoreThreadingTestsWithIterations, smoke_LoadNetwork_RemoteContext) { runParallel([&] () { auto value = counter++; auto remote_context = make_shared_context(ie, CommonTestUtils::DEVICE_GPU, ocl_instance->_context.get()); - (void)ie.LoadNetwork(networks[(counter++) % networks.size()], remote_context); + (void)ie.LoadNetwork(networks[value % networks.size()], remote_context); }, numIterations, numThreads); } diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_clamp.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_clamp.cpp index a210d6a5676..db52408fc6f 100644 --- a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_clamp.cpp +++ b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_clamp.cpp @@ -23,8 +23,8 @@ class DynamicToStaticShapeClamp : public CommonTestUtils::TestsCommon, public: void SetUp() override { const auto& parameters = GetParam(); - const auto& dataType = std::get<0>(GetParam()); - const auto& dataDims = std::get<1>(GetParam()); + const auto& dataType = std::get<0>(parameters); + const auto& dataDims = std::get<1>(parameters); ngraph::helpers::CompareFunctions(*transform(dataType, dataDims), *reference(dataType, dataDims)); } diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_convert.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_convert.cpp index 1343041d442..e52cacfacc4 100644 --- a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_convert.cpp +++ b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_convert.cpp @@ -23,8 +23,8 @@ class DynamicToStaticShapeConvert : public CommonTestUtils::TestsCommon, public: void SetUp() override { const auto& parameters = GetParam(); - const auto& dataType = std::get<0>(GetParam()); - const auto& dataDims = std::get<1>(GetParam()); + const auto& dataType = std::get<0>(parameters); + const auto& dataDims = std::get<1>(parameters); ngraph::helpers::CompareFunctions(*transform(dataType, dataDims), *reference(dataType, dataDims)); } diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_reduce.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_reduce.cpp index 49afdaaa9e0..9e6e4b2347b 100644 --- a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_reduce.cpp +++ b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_reduce.cpp @@ -134,8 +134,6 @@ protected: logical_reduce->set_keep_dims(reduce_setup.keep_dims); node->validate_and_infer_types(); - const auto data_rank_value = reduce_setup.data_shape.size(); - ngraph::Output output_shape; if (reduce_setup.keep_dims) { output_shape = std::make_shared( diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_transpose.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_transpose.cpp index 4826a1f59e5..3ec4749e28c 100644 --- a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_transpose.cpp +++ b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_transpose.cpp @@ -43,8 +43,8 @@ class DynamicToStaticShapeTranspose : public CommonTestUtils::TestsCommon, publi public: void SetUp() override { const auto& parameters = GetParam(); - const auto& dataType = std::get<0>(GetParam()); - const auto& dataDims = std::get<1>(GetParam()); + const auto& dataType = std::get<0>(parameters); + const auto& dataDims = std::get<1>(parameters); auto permutation = std::vector(dataDims.size()); std::iota(permutation.begin(), permutation.end(), 0); diff --git a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.cpp b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.cpp index 4a13d98cf2d..c8e90680435 100644 --- a/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.cpp +++ b/inference-engine/tests/functional/plugin/myriad/ngraph/transformations/dynamic_to_static_shape_unary_elementwise.cpp @@ -24,9 +24,9 @@ class DynamicToStaticShapeUnaryElementwise : public CommonTestUtils::TestsCommon public: void SetUp() override { const auto& parameters = GetParam(); - const auto& dataType = std::get<0>(GetParam()); - const auto& dataDims = std::get<1>(GetParam()); - const auto& type_info = std::get<2>(GetParam()); + const auto& dataType = std::get<0>(parameters); + const auto& dataDims = std::get<1>(parameters); + const auto& type_info = std::get<2>(parameters); ngraph::helpers::CompareFunctions(*transform(dataType, dataDims, type_info), *reference(dataType, dataDims, type_info)); } diff --git a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_reshape.cpp b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_reshape.cpp index 2ba3df2e5a5..4065d2b531a 100644 --- a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_reshape.cpp +++ b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_reshape.cpp @@ -20,9 +20,9 @@ class DSR_ReshapeWithStaticDescriptor : public testing::WithParamInterface createTestedOp() override { const auto& parameters = GetParam(); - const auto& inDataType = std::get<0>(GetParam()); - const auto& reshapeTestParams = std::get<1>(GetParam()); - targetDevice = std::get<2>(GetParam()); + const auto& inDataType = std::get<0>(parameters); + const auto& reshapeTestParams = std::get<1>(parameters); + targetDevice = std::get<2>(parameters); const auto& inDataShapes = std::get<0>(reshapeTestParams); const auto& specialZero = std::get<1>(reshapeTestParams); @@ -46,9 +46,9 @@ class DSR_ReshapeWithDynamicDescriptor : public testing::WithParamInterface createTestedOp() override { const auto& parameters = GetParam(); - const auto& inDataType = std::get<0>(GetParam()); - const auto& inDataShapes = std::get<0>(std::get<1>(GetParam())); - targetDevice = std::get<2>(GetParam()); + const auto& inDataType = std::get<0>(parameters); + const auto& inDataShapes = std::get<0>(std::get<1>(parameters)); + targetDevice = std::get<2>(parameters); const auto inputSubgraph = createInputSubgraphWithDSR(inDataType, inDataShapes); diff --git a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_topk.cpp b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_topk.cpp index 0b3dbcac7e1..5083c54466b 100644 --- a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_topk.cpp +++ b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/dsr_topk.cpp @@ -59,7 +59,6 @@ protected: std::shared_ptr createTestedOp() override { const auto& parameters = GetParam(); const auto& dataType = std::get<0>(parameters); - const auto& idxType = std::get<1>(parameters); const auto& topkSetup = std::get<2>(parameters); targetDevice = std::get<3>(parameters); diff --git a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/nonzero_transpose.cpp b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/nonzero_transpose.cpp index 75aba419890..4751dd8236d 100644 --- a/inference-engine/tests/functional/plugin/myriad/subgraph_tests/nonzero_transpose.cpp +++ b/inference-engine/tests/functional/plugin/myriad/subgraph_tests/nonzero_transpose.cpp @@ -23,9 +23,9 @@ protected: SetRefMode(LayerTestsUtils::RefMode::CONSTANT_FOLDING); const auto& parameters = GetParam(); - const auto& dataType = std::get<0>(GetParam()); - const auto& dataDims = std::get<1>(GetParam()); - targetDevice = std::get<2>(GetParam()); + const auto& dataType = std::get<0>(parameters); + const auto& dataDims = std::get<1>(parameters); + targetDevice = std::get<2>(parameters); const auto data = std::make_shared(dataType, dataDims); const auto nonZero = std::make_shared(data); diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/config.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/config.hpp index b6110ae7651..c83ec86d628 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/config.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/config.hpp @@ -90,7 +90,7 @@ namespace BehaviorTestsDefinitions { } else { try { ie->SetConfig(configuration, targetDevice); - } catch (InferenceEngine::details::InferenceEngineException ex) {} + } catch (InferenceEngine::details::InferenceEngineException &) {} } } diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp index d4745c46a09..caabf060614 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/core_integration.hpp @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -1071,8 +1070,9 @@ TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_CONFIG_KEYS, GetMet Parameter deviceConfigValue = deviceExeNetwork.GetConfig(deviceConf); // HETERO returns EXCLUSIVE_ASYNC_REQUESTS as a boolean value - if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) != deviceConf) + if (CONFIG_KEY(EXCLUSIVE_ASYNC_REQUESTS) != deviceConf) { ASSERT_EQ(deviceConfigValue, heteroConfigValue); + } } } @@ -1109,8 +1109,9 @@ TEST_P(IEClassHeteroExecutableNetworkGetMetricTest_SUPPORTED_METRICS, GetMetricN Parameter deviceMetricValue = deviceExeNetwork.GetMetric(deviceMetricName); if (std::find(heteroSpecificMetrics.begin(), heteroSpecificMetrics.end(), deviceMetricName) == - heteroSpecificMetrics.end()) + heteroSpecificMetrics.end()) { ASSERT_TRUE(heteroMetricValue == deviceMetricValue); + } } } diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/core_threading_tests.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/core_threading_tests.hpp index bed550a776a..25148eac87c 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/core_threading_tests.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/core_threading_tests.hpp @@ -89,7 +89,6 @@ public: } static std::string getTestCaseName(testing::TestParamInfo obj) { - unsigned int numThreads, numIterations; std::string deviceName; Config config; std::tie(deviceName, config) = obj.param; diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request.hpp index 28d03e1f91b..793e0eb215a 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/infer_request.hpp @@ -422,7 +422,7 @@ TEST_P(InferRequestTests, canStartAsyncInferWithGetInOutWithStatusOnlyWait) { InferenceEngine::StatusCode sts; sts = req.Wait(InferenceEngine::IInferRequest::WaitMode::STATUS_ONLY); ASSERT_TRUE(sts == InferenceEngine::StatusCode::OK || - InferenceEngine::StatusCode::RESULT_NOT_READY); + sts == InferenceEngine::StatusCode::RESULT_NOT_READY); } // Plugin correct infer request with allocating input and result BlobMaps inside plugin @@ -482,8 +482,6 @@ TEST_P(InferRequestTests, canRun3AsyncRequestsConsistentlyWithWait) { auto req1 = execNet.CreateInferRequest(); auto req2 = execNet.CreateInferRequest(); auto req3 = execNet.CreateInferRequest(); - InferenceEngine::ResponseDesc response1, response2, response3; - InferenceEngine::StatusCode sts1, sts2, sts3; req1.StartAsync(); ASSERT_NO_THROW(req1.Wait(InferenceEngine::IInferRequest::WaitMode::RESULT_READY)); @@ -644,7 +642,6 @@ TEST_P(InferRequestTestsResultNotReady, ReturnResultNotReadyFromWaitInAsyncModeF // Create InferRequest InferenceEngine::InferRequest req; ASSERT_NO_THROW(req = execNet.CreateInferRequest()); - InferenceEngine::ResponseDesc response; InferenceEngine::StatusCode sts = InferenceEngine::StatusCode::OK; std::promise callbackTimeStamp; auto callbackTimeStampFuture = callbackTimeStamp.get_future(); diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/preprocessing.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/preprocessing.hpp index ae825d49c5a..32d959d8e16 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/preprocessing.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/preprocessing.hpp @@ -13,16 +13,6 @@ #include "ie_preprocess.hpp" #include "base/behavior_test_utils.hpp" -namespace { -void setInputNetworkPrecision(InferenceEngine::CNNNetwork &network, InferenceEngine::InputsDataMap &inputs_info, - InferenceEngine::Precision input_precision) { - inputs_info = network.getInputsInfo(); - ASSERT_EQ(1u, inputs_info.size()); - inputs_info.begin()->second->setPrecision(input_precision); -} - -} - namespace BehaviorTestsDefinitions { using PreprocessingPrecisionConvertParams = std::tuple< diff --git a/inference-engine/tests/functional/plugin/shared/include/behavior/test_plugin.hpp b/inference-engine/tests/functional/plugin/shared/include/behavior/test_plugin.hpp index 36468505a85..afb611cff58 100644 --- a/inference-engine/tests/functional/plugin/shared/include/behavior/test_plugin.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/behavior/test_plugin.hpp @@ -87,7 +87,7 @@ TEST_P(BehaviorTestInput, canSetInputPrecisionForNetwork) { InferenceEngine::StatusCode sts = InferenceEngine::StatusCode::OK; try { ie->LoadNetwork(cnnNet, targetDevice, configuration); - } catch (InferenceEngine::details::InferenceEngineException ex) { + } catch (InferenceEngine::details::InferenceEngineException & ex) { msg = ex.what(); sts = ex.getStatus(); } @@ -113,7 +113,7 @@ TEST_P(BehaviorTestOutput, canSetOutputPrecisionForNetwork) { try { InferenceEngine::ExecutableNetwork exeNetwork = ie->LoadNetwork(cnnNet, targetDevice, configuration); - } catch (InferenceEngine::details::InferenceEngineException ex) { + } catch (InferenceEngine::details::InferenceEngineException & ex) { sts = ex.getStatus(); msg = ex.what(); std::cout << "LoadNetwork() threw InferenceEngineException. Status: " << sts << ", message: " << msg << std::endl; diff --git a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/concat_quantization.hpp b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/concat_quantization.hpp index 9ab9f4bd6ef..fae3aff2fd6 100644 --- a/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/concat_quantization.hpp +++ b/inference-engine/tests/functional/plugin/shared/include/subgraph_tests/concat_quantization.hpp @@ -18,7 +18,7 @@ TEST_P(ConcatQuantization, CompareWithRefImpl) { InferenceEngine::CNNNetwork cnnNetwork = InferenceEngine::CNNNetwork{ function }; executableNetwork = core->LoadNetwork(cnnNetwork, targetDevice); } - catch (InferenceEngine::details::InferenceEngineException ex) { + catch (InferenceEngine::details::InferenceEngineException & ex) { FAIL() << ex.what(); } }; diff --git a/inference-engine/tests/functional/plugin/shared/src/hetero/synthetic.cpp b/inference-engine/tests/functional/plugin/shared/src/hetero/synthetic.cpp index 06567643092..73e2731f305 100644 --- a/inference-engine/tests/functional/plugin/shared/src/hetero/synthetic.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/hetero/synthetic.cpp @@ -118,7 +118,6 @@ void HeteroSyntheticTest::TearDown() { } std::string HeteroSyntheticTest::SetUpAffinity() { - int id = 0; auto& param = GetParam(); std::string affinities; auto& pluginParameters = std::get(param); diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp index c939167450a..650fb351bac 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations.cpp @@ -43,7 +43,6 @@ InferenceEngine::Blob::Ptr OutputLayersHandlingInTransformations::GenerateInput( const float hight = 255.f / k; InferenceEngine::Blob::Ptr input = FuncTestUtils::createAndFillBlobConsistently(info.getTensorDesc(), hight - low, static_cast(low), 1ul); - const auto buffer = input->buffer().as(); return input; } diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp index fae2f8fdaa1..460abe74c18 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat.cpp @@ -46,7 +46,6 @@ InferenceEngine::Blob::Ptr OutputLayersHandlingInTransformationsForConcat::Gener const float low = 0.f / k; const float hight = 255.f / k; InferenceEngine::Blob::Ptr input = FuncTestUtils::createAndFillBlobConsistently(info.getTensorDesc(), hight - low, static_cast(low), 1ul); - const auto buffer = input->buffer().as(); return input; } @@ -74,8 +73,6 @@ void OutputLayersHandlingInTransformationsForConcat::SetUp() { const auto input1 = std::make_shared(ngPrecision, ngraph::Shape(inputShape1)); input1->set_friendly_name("input1"); - const float low = 0.f; - const float hight = 255.f; const auto fakeQuantize1 = ngraph::builder::makeFakeQuantize( input1->output(0), ngPrecision, 256ul, { 1ul }, { 0.f }, { 255.f }, { 0.f }, { 255.f }); diff --git a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp index ab28134d34b..ec7572b0bd0 100644 --- a/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp +++ b/inference-engine/tests/functional/plugin/shared/src/low_precision_transformations/output_layers_handling_in_transformations_for_concat_multi_channel.cpp @@ -56,7 +56,6 @@ InferenceEngine::Blob::Ptr OutputLayersHandlingInTransformationsForConcatMultiCh const float hight = interval.second / k; InferenceEngine::Blob::Ptr input = FuncTestUtils::createAndFillBlobConsistently(info.getTensorDesc(), hight - low, static_cast(low), 1ul); - const auto buffer = input->buffer().as(); return input; } diff --git a/inference-engine/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp b/inference-engine/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp index e44e9af32f5..5b6d6e8faa3 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/base/layer_test_utils.cpp @@ -116,6 +116,7 @@ void TestEnvironment::TearDown() { for (const auto &op : opsInfo) { std::string name = std::string(op.name) + "-" + std::to_string(op.version); pugi::xml_node entry = opsNode.append_child(name.c_str()); + (void)entry; } pugi::xml_node resultsNode = root.child("results"); diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp index a10acf218f0..750b3ba8a04 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/extract_image_patches.cpp @@ -13,7 +13,7 @@ std::string ExtractImagePatchesTest::getTestCaseName(const testing::TestParamInf ngraph::op::PadType pad_type; InferenceEngine::Precision netPrc; InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; + InferenceEngine::Layout inLayout; std::string targetName; std::tie(inputShape, kernel, strides, rates, pad_type, netPrc, inPrc, outPrc, inLayout, targetName) = obj.param; std::ostringstream result; diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/reorg_yolo.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/reorg_yolo.cpp index bd4087cb228..0e10ec6a5c3 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/reorg_yolo.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/reorg_yolo.cpp @@ -25,7 +25,6 @@ void ReorgYoloLayerTest::SetUp() { size_t stride; InferenceEngine::Precision netPrecision; std::tie(inputShape, stride, netPrecision, targetDevice) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); auto param = std::make_shared(ngraph::element::f32, inputShape); auto reorg_yolo = std::make_shared(param, stride); function = std::make_shared(std::make_shared(reorg_yolo), ngraph::ParameterVector{param}, "ReorgYolo"); diff --git a/inference-engine/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp b/inference-engine/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp index 264d42e924a..8d1820ab6c0 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/single_layer/space_to_batch.cpp @@ -32,7 +32,7 @@ std::string SpaceToBatchLayerTest::getTestCaseName(const testing::TestParamInfo< void SpaceToBatchLayerTest::SetUp() { std::vector inputShape; std::vector blockShape, padsBegin, padsEnd; - InferenceEngine::Precision inputPrecision, netPrecision; + InferenceEngine::Precision netPrecision; std::tie(blockShape, padsBegin, padsEnd, inputShape, netPrecision, inPrc, outPrc, inLayout, outLayout, targetDevice) = this->GetParam(); auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp index d0ab98bdc30..529b748ea1b 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/basic_lstm.cpp @@ -30,7 +30,6 @@ void Basic_LSTM_S::SetUp() { InferenceEngine::Precision netPrecision; std::tie(netPrecision, targetDevice, configuration) = this->GetParam(); - auto ngPrc = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(netPrecision); hidden_size = 118; outPrc = InferenceEngine::Precision::FP32; diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp index 4e29db92b5a..a49133d8cfb 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/concat_multi_input.cpp @@ -76,7 +76,7 @@ void ConcatMultiInput::GenerateConstOnlyModel() { std::vector res; std::uniform_real_distribution dist(min, max); - for (int i = 0; i < vec_len; i++) + for (std::size_t i = 0; i < vec_len; i++) res.emplace_back(static_cast(dist(gen))); return res; diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp index a6fc6c11ed8..7137feaed98 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/constant_result.cpp @@ -16,7 +16,6 @@ std::string ConstantResultSubgraphTest::getTestCaseName(testing::TestParamInfoGetParam(); std::vector data(300); for (size_t i = 0; i < 300; i++) diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp index 985252c06a4..2980bfc1ec0 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/memory_eltwise_reshape_concat.cpp @@ -40,7 +40,7 @@ void MemoryEltwiseReshapeConcatTest::SetUp() { std::vector res; std::uniform_real_distribution dist(min, max); - for (int i = 0; i < vec_len; i++) + for (std::size_t i = 0; i < vec_len; i++) res.emplace_back(static_cast(dist(gen))); return res; diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp index 19b6d162c51..b09e16a6805 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/parameter_result.cpp @@ -16,7 +16,6 @@ std::string ParameterResultSubgraphTest::getTestCaseName(testing::TestParamInfo< void ParameterResultSubgraphTest::SetUp() { InferenceEngine::SizeVector inputShapes; - InferenceEngine::Precision inputPrecision; std::tie(targetDevice) = this->GetParam(); auto parameter = std::make_shared(ngraph::element::Type_t::f32, ngraph::Shape{1, 3, 10, 10}); diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/reshape_squeeze_reshape_relu.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/reshape_squeeze_reshape_relu.cpp index 573a022b07d..599fd8604f9 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/subgraph/reshape_squeeze_reshape_relu.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/reshape_squeeze_reshape_relu.cpp @@ -10,7 +10,6 @@ namespace SubgraphTestsDefinitions { ShapeAxesTuple squeezeShape; InferenceEngine::Precision netPrecision; std::string targetName; - bool is_squeeze; ngraph::helpers::SqueezeOpType opType; std::tie(squeezeShape, netPrecision, targetName, opType) = obj.param; std::ostringstream results; diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp index 7b4d2a3982b..5bd1936b92e 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/trivial_concat.cpp @@ -7,11 +7,8 @@ namespace SubgraphTestsDefinitions { std::string TrivialConcatLayerTest::getTestCaseName(const testing::TestParamInfo &obj) { - int axis; std::vector inputShapes; InferenceEngine::Precision netPrecision; - InferenceEngine::Precision inPrc, outPrc; - InferenceEngine::Layout inLayout, outLayout; std::string targetName; std::map config; std::tie(inputShapes, netPrecision, targetName, config) = obj.param; diff --git a/inference-engine/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp b/inference-engine/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp index dc71ff14ed4..7b17c979d61 100644 --- a/inference-engine/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp +++ b/inference-engine/tests/functional/shared_test_classes/src/subgraph/two_fake_quantize_to_fullyconnected.cpp @@ -75,7 +75,7 @@ void FakeQuantizeSubgraphTest::SetUp() { std::vector res; std::uniform_real_distribution dist(min, max); - for (int i = 0; i < vec_len; i++) + for (std::size_t i = 0; i < vec_len; i++) res.emplace_back(static_cast(dist(gen))); return res; diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/data_utils.hpp b/inference-engine/tests/ie_test_utils/common_test_utils/data_utils.hpp index 9a3fffd17b8..d040199687e 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/data_utils.hpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/data_utils.hpp @@ -16,7 +16,7 @@ namespace CommonTestUtils { -static void fill_data(float *data, size_t size, size_t duty_ratio = 10) { +inline void fill_data(float *data, size_t size, size_t duty_ratio = 10) { for (size_t i = 0; i < size; i++) { if ((i / duty_ratio) % 2 == 1) { data[i] = 0.0f; @@ -26,7 +26,7 @@ static void fill_data(float *data, size_t size, size_t duty_ratio = 10) { } } -static void fill_data_sine(float *data, size_t size, float center, float ampl, float omega) { +inline void fill_data_sine(float *data, size_t size, float center, float ampl, float omega) { for (size_t i = 0; i < size; i++) { data[i] = center + ampl * sin(static_cast(i) * omega); } @@ -36,12 +36,12 @@ static void fill_data_sine(float *data, size_t size, float center, float ampl, f * @brief Create vector of floats with length of vec_len, with values ranging from min to max, * with initial seed equal to variable seed with default of 0 */ -static inline std::vector generate_float_numbers(std::size_t vec_len, float min, float max, int seed = 0) { +inline std::vector generate_float_numbers(std::size_t vec_len, float min, float max, int seed = 0) { std::vector res; std::mt19937 gen(static_cast(seed)); std::uniform_real_distribution dist(min, max); - for (int i = 0; i < vec_len; i++) + for (std::size_t i = 0; i < vec_len; i++) res.emplace_back(static_cast(dist(gen))); return res; @@ -96,7 +96,7 @@ void fill_data_const(InferenceEngine::Blob::Ptr& blob, float val); */ size_t byte_size(const InferenceEngine::TensorDesc &tdesc); -static void fill_data_bbox(float *data, size_t size, int height, int width, float omega) { +inline void fill_data_bbox(float *data, size_t size, int height, int width, float omega) { float center_h = (height - 1.0f) / 2; float center_w = (width - 1.0f) / 2; for (size_t i = 0; i < size; i = i + 5) { @@ -123,7 +123,7 @@ static void fill_data_bbox(float *data, size_t size, int height, int width, floa } } -static void fill_data_roi(float *data, size_t size, const uint32_t range, const int height, const int width, const float omega, +inline void fill_data_roi(float *data, size_t size, const uint32_t range, const int height, const int width, const float omega, const bool is_roi_max_mode, const int seed = 1) { std::default_random_engine random(seed); std::uniform_int_distribution distribution(0, range); @@ -342,21 +342,21 @@ void inline fill_data_random(InferenceEngine:: template typename std::enable_if::value, T>::type -static ie_abs(const T &val) { +inline ie_abs(const T &val) { return std::abs(val); } template typename std::enable_if::value, T>::type -static ie_abs(const T &val) { +inline ie_abs(const T &val) { return val; } -static ngraph::bfloat16 ie_abs(const ngraph::bfloat16& val) { +inline ngraph::bfloat16 ie_abs(const ngraph::bfloat16& val) { return ngraph::bfloat16::from_bits(val.to_bits() & 0x7FFF); } -static ngraph::float16 ie_abs(const ngraph::float16& val) { +inline ngraph::float16 ie_abs(const ngraph::float16& val) { return ngraph::float16::from_bits(val.to_bits() ^ 0x8000); } diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/unicode_utils.cpp b/inference-engine/tests/ie_test_utils/common_test_utils/unicode_utils.cpp new file mode 100644 index 00000000000..a5e2b1e7864 --- /dev/null +++ b/inference-engine/tests/ie_test_utils/common_test_utils/unicode_utils.cpp @@ -0,0 +1,25 @@ +// Copyright (C) 2019 Intel Corporation +// SPDX-License-Identifier: Apache-2.0 +// + +#include +#include "unicode_utils.hpp" + +#ifdef ENABLE_UNICODE_PATH_SUPPORT + +namespace CommonTestUtils { + +const std::vector test_unicode_postfix_vector = { + L"unicode_Яㅎあ", + L"ひらがな日本語", + L"大家有天分", + L"עפצקרשתםןףץ", + L"ث خ ذ ض ظ غ", + L"그것이정당하다", + L"АБВГДЕЁЖЗИЙ", + L"СТУФХЦЧШЩЬЮЯ" +}; + +} // namespace CommonTestUtils + +#endif // ENABLE_UNICODE_PATH_SUPPORT diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/unicode_utils.hpp b/inference-engine/tests/ie_test_utils/common_test_utils/unicode_utils.hpp index 44a4523a3a6..196167d201b 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/unicode_utils.hpp +++ b/inference-engine/tests/ie_test_utils/common_test_utils/unicode_utils.hpp @@ -16,19 +16,19 @@ #ifdef ENABLE_UNICODE_PATH_SUPPORT namespace CommonTestUtils { -static void fixSlashes(std::string &str) { +inline void fixSlashes(std::string &str) { std::replace(str.begin(), str.end(), '/', '\\'); } -static void fixSlashes(std::wstring &str) { +inline void fixSlashes(std::wstring &str) { std::replace(str.begin(), str.end(), L'/', L'\\'); } -static std::wstring stringToWString(std::string input) { +inline std::wstring stringToWString(std::string input) { return ::FileUtils::multiByteCharToWString(input.c_str()); } -static bool copyFile(std::wstring source_path, std::wstring dest_path) { +inline bool copyFile(std::wstring source_path, std::wstring dest_path) { #ifndef _WIN32 std::ifstream source(FileUtils::wStringtoMBCSstringChar(source_path), std::ios::binary); std::ofstream dest(FileUtils::wStringtoMBCSstringChar(dest_path), std::ios::binary); @@ -49,11 +49,11 @@ static bool copyFile(std::wstring source_path, std::wstring dest_path) { return result; } -static bool copyFile(std::string source_path, std::wstring dest_path) { +inline bool copyFile(std::string source_path, std::wstring dest_path) { return copyFile(stringToWString(source_path), dest_path); } -static std::wstring addUnicodePostfixToPath(std::string source_path, std::wstring postfix) { +inline std::wstring addUnicodePostfixToPath(std::string source_path, std::wstring postfix) { fixSlashes(source_path); std::wstring result = stringToWString(source_path); std::wstring file_name = result.substr(0, result.size() - 4); @@ -62,7 +62,7 @@ static std::wstring addUnicodePostfixToPath(std::string source_path, std::wstrin return result; } -static void removeFile(std::wstring path) { +inline void removeFile(std::wstring path) { int result = 0; if (!path.empty()) { #ifdef _WIN32 @@ -71,6 +71,7 @@ static void removeFile(std::wstring path) { result = remove(FileUtils::wStringtoMBCSstringChar(path).c_str()); #endif } + (void)result; } inline bool endsWith(const std::wstring& source, const std::wstring& expectedSuffix) { @@ -127,7 +128,7 @@ inline int removeFilesWithExt(std::wstring path, std::wstring ext) { return ret; } -static int removeDir(std::wstring path) { +inline int removeDir(std::wstring path) { int result = 0; if (!path.empty()) { #ifdef _WIN32 @@ -155,16 +156,7 @@ inline bool directoryExists(const std::wstring &path) { return false; } -static const std::vector test_unicode_postfix_vector = { - L"unicode_Яㅎあ", - L"ひらがな日本語", - L"大家有天分", - L"עפצקרשתםןףץ", - L"ث خ ذ ض ظ غ", - L"그것이정당하다", - L"АБВГДЕЁЖЗИЙ", - L"СТУФХЦЧШЩЬЮЯ" -}; +extern const std::vector test_unicode_postfix_vector; } // namespace CommonTestUtils #endif // ENABLE_UNICODE_PATH_SUPPORT diff --git a/inference-engine/tests/ie_test_utils/common_test_utils/w_dirent.h b/inference-engine/tests/ie_test_utils/common_test_utils/w_dirent.h index 933892623b0..c4407767889 100644 --- a/inference-engine/tests/ie_test_utils/common_test_utils/w_dirent.h +++ b/inference-engine/tests/ie_test_utils/common_test_utils/w_dirent.h @@ -36,6 +36,7 @@ #include #include #include +#include #include // Copied from linux libc sys/stat.h: diff --git a/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp b/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp index 4b3c049e8ac..73f445b9d42 100644 --- a/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp +++ b/inference-engine/tests/ie_test_utils/functional_test_utils/include/functional_test_utils/blob_utils.hpp @@ -21,8 +21,7 @@ namespace FuncTestUtils { namespace Bf16TestUtils { -static float reducePrecisionBitwise(const float in); -static short reducePrecisionBitwiseS(const float in); +inline short reducePrecisionBitwiseS(const float in); } // namespace Bf16TestUtils enum CompareType{ @@ -46,10 +45,10 @@ enum CompareType{ * @param printData A flag if data printing is demanded */ template -static void inline compareRawBuffers(const dType *res, const dType *ref, - size_t resSize, size_t refSize, - CompareType compareType, float thr1 = 0.01, float thr2 = 0.01, - bool printData = false) { +inline void compareRawBuffers(const dType *res, const dType *ref, + size_t resSize, size_t refSize, + CompareType compareType, float thr1 = 0.01, float thr2 = 0.01, + bool printData = false) { if (printData) { std::cout << "Reference results: " << std::endl; for (size_t i = 0; i < refSize; i++) { @@ -103,10 +102,10 @@ static void inline compareRawBuffers(const dType *res, const dType *ref, * @param printData Flag if data printing is demanded */ template -static void inline compareRawBuffers(const dType *res, const dType *ref, - size_t resSize, size_t refSize, - float thr = 0.01, - bool printData = false) { +inline void compareRawBuffers(const dType *res, const dType *ref, + size_t resSize, size_t refSize, + float thr = 0.01, + bool printData = false) { compareRawBuffers(res, ref, resSize, refSize, CompareType::ABS_AND_REL, thr, thr, printData); } /** @@ -125,7 +124,7 @@ static void inline compareRawBuffers(const dType *res, const dType *ref, * @param printData A flag if data printing is demanded */ template -static void inline compareRawBuffers(const std::vector res, const std::vector ref, +inline void compareRawBuffers(const std::vector res, const std::vector ref, const std::vector &resSizes, const std::vector &refSizes, CompareType compareType, float thr1 = 0.01, float thr2 = 0.01, bool printData = false) { @@ -150,9 +149,9 @@ static void inline compareRawBuffers(const std::vector res, const std:: * @param printData A flag if data printing is demanded */ template -static void inline compareRawBuffers(const std::vector res, const std::vector ref, - const std::vector &resSizes, const std::vector &refSizes, - float thr = 0.01, bool printData = false) { +inline void compareRawBuffers(const std::vector res, const std::vector ref, + const std::vector &resSizes, const std::vector &refSizes, + float thr = 0.01, bool printData = false) { compareRawBuffers(res, ref, resSizes, refSizes, CompareType::ABS_AND_REL, thr, thr, printData); } /** @@ -171,7 +170,7 @@ static void inline compareRawBuffers(const std::vector res, const std:: * @param printData A flag if data printing is demanded */ template -static void inline compareRawBuffers(const std::vector res, const std::vector> ref, +inline void compareRawBuffers(const std::vector res, const std::vector> ref, const std::vector &resSizes, const std::vector &refSizes, CompareType compareType, float thr1 = 0.01, float thr2 = 0.01, bool printData = false) { @@ -196,14 +195,14 @@ static void inline compareRawBuffers(const std::vector res, const std:: * @param printData A flag if data printing is demanded */ template -static void inline compareRawBuffers(const std::vector res, const std::vector> ref, - const std::vector &resSizes, const std::vector &refSizes, - float thr = 0.01, bool printData = false) { +inline void compareRawBuffers(const std::vector res, const std::vector> ref, + const std::vector &resSizes, const std::vector &refSizes, + float thr = 0.01, bool printData = false) { compareRawBuffers(res, ref, resSizes, refSizes, CompareType::ABS_AND_REL, thr, thr, printData); } template -void inline +inline void compareBlobData(const InferenceEngine::Blob::Ptr &res, const InferenceEngine::Blob::Ptr &ref, float max_diff = 0.01, const std::string &assertDetails = "", bool printData = false) { using dataType = typename InferenceEngine::PrecisionTrait::value_type; @@ -243,13 +242,12 @@ compareBlobData(const InferenceEngine::Blob::Ptr &res, const InferenceEngine::Bl template -void inline +inline void compareBlobData(const std::vector &res, const std::vector &ref, float max_diff = 0.01, const std::string &assertDetails = "", bool printData = false) { IE_ASSERT(res.size() == ref.size()) << "Length of comparing and references blobs vector are not equal!" << assertDetails; - using dataType = typename InferenceEngine::PrecisionTrait::value_type; for (size_t i = 0; i < res.size(); i++) { if (printData) std::cout << "BEGIN CHECK BLOB [" << i << "]" << std::endl; @@ -259,7 +257,7 @@ compareBlobData(const std::vector &res, const std::v } } -void inline +inline void compareBlobs(const InferenceEngine::Blob::Ptr &res, const InferenceEngine::Blob::Ptr &ref, float max_diff = 0.01, const std::string &assertDetails = "", bool printData = false) { ASSERT_EQ(res->byteSize(), ref->byteSize()) << "Blobs have different byteSize(): " @@ -284,7 +282,7 @@ compareBlobs(const InferenceEngine::Blob::Ptr &res, const InferenceEngine::Blob: } } -void inline GetComparisonThreshold(InferenceEngine::Precision prc, float &absoluteThreshold, float &relativeThreshold) { +inline void GetComparisonThreshold(InferenceEngine::Precision prc, float &absoluteThreshold, float &relativeThreshold) { switch (prc) { case InferenceEngine::Precision::FP32: absoluteThreshold = relativeThreshold = 1e-4; @@ -302,7 +300,7 @@ void inline GetComparisonThreshold(InferenceEngine::Precision prc, float &absolu } } -float inline GetComparisonThreshold(InferenceEngine::Precision prc) { +inline float GetComparisonThreshold(InferenceEngine::Precision prc) { float res; GetComparisonThreshold(prc, res, res); return res; @@ -310,7 +308,7 @@ float inline GetComparisonThreshold(InferenceEngine::Precision prc) { // Copy from net_pass.h template -void inline convertArrayPrecision(typename InferenceEngine::PrecisionTrait::value_type *dst, +inline void convertArrayPrecision(typename InferenceEngine::PrecisionTrait::value_type *dst, const typename InferenceEngine::PrecisionTrait::value_type *src, size_t nelem) { using dst_type = typename InferenceEngine::PrecisionTrait::value_type; @@ -321,15 +319,14 @@ void inline convertArrayPrecision(typename InferenceEngine::PrecisionTrait -void inline +inline void convertArrayPrecision(float *dst, const short *src, size_t nelem) { - uint16_t a = *reinterpret_cast(src); InferenceEngine::PrecisionUtils::f16tof32Arrays(dst, src, nelem, 1.0f, 0.0f); } template<> -void inline +inline void convertArrayPrecision(float *dst, const short *src, size_t nelem) { auto srcBf16 = reinterpret_cast(src); @@ -339,7 +336,7 @@ convertArrayPrecision -InferenceEngine::Blob::Ptr inline convertBlobPrecision(const InferenceEngine::Blob::Ptr &blob) { +inline InferenceEngine::Blob::Ptr convertBlobPrecision(const InferenceEngine::Blob::Ptr &blob) { using from_d_type = typename InferenceEngine::PrecisionTrait::value_type; using to_d_type = typename InferenceEngine::PrecisionTrait::value_type; @@ -356,7 +353,7 @@ InferenceEngine::Blob::Ptr inline convertBlobPrecision(const InferenceEngine::Bl template -InferenceEngine::Blob::Ptr inline copyBlobWithCast(const InferenceEngine::Blob::Ptr &blob) { +inline InferenceEngine::Blob::Ptr copyBlobWithCast(const InferenceEngine::Blob::Ptr &blob) { InferenceEngine::Blob::Ptr newBlob; switch (blob->getTensorDesc().getPrecision()) { case InferenceEngine::Precision::FP32: @@ -387,7 +384,7 @@ InferenceEngine::Blob::Ptr inline copyBlobWithCast(const InferenceEngine::Blob:: return newBlob; } -InferenceEngine::Blob::Ptr inline createAndFillBlobFloatNormalDistribution(const InferenceEngine::TensorDesc &td, +inline InferenceEngine::Blob::Ptr createAndFillBlobFloatNormalDistribution(const InferenceEngine::TensorDesc &td, const float mean, const float stddev, const int32_t seed = 1) { @@ -412,7 +409,7 @@ InferenceEngine::Blob::Ptr inline createAndFillBlobFloatNormalDistribution(const return blob; } -InferenceEngine::Blob::Ptr inline createAndFillBlobFloat(const InferenceEngine::TensorDesc &td, +inline InferenceEngine::Blob::Ptr createAndFillBlobFloat(const InferenceEngine::TensorDesc &td, const uint32_t range = 10, const int32_t start_from = 0, const int32_t resolution = 1, @@ -439,7 +436,7 @@ InferenceEngine::Blob::Ptr inline createAndFillBlobFloat(const InferenceEngine:: return blob; } -InferenceEngine::Blob::Ptr inline createAndFillBlobWithFloatArray(const InferenceEngine::TensorDesc &td, +inline InferenceEngine::Blob::Ptr createAndFillBlobWithFloatArray(const InferenceEngine::TensorDesc &td, const float values[], const int size) { InferenceEngine::Blob::Ptr blob = make_blob_with_precision(td); @@ -463,7 +460,7 @@ InferenceEngine::Blob::Ptr inline createAndFillBlobWithFloatArray(const Inferenc return blob; } -InferenceEngine::Blob::Ptr inline createAndFillBlob(const InferenceEngine::TensorDesc &td, +inline InferenceEngine::Blob::Ptr createAndFillBlob(const InferenceEngine::TensorDesc &td, const uint32_t range = 10, const int32_t start_from = 0, const int32_t resolution = 1, @@ -491,7 +488,7 @@ InferenceEngine::Blob::Ptr inline createAndFillBlob(const InferenceEngine::Tenso return blob; } -InferenceEngine::Blob::Ptr inline createAndFillBlobConsistently( +inline InferenceEngine::Blob::Ptr createAndFillBlobConsistently( const InferenceEngine::TensorDesc &td, const uint32_t range, const int32_t start_from, @@ -517,7 +514,7 @@ InferenceEngine::Blob::Ptr inline createAndFillBlobConsistently( return blob; } -InferenceEngine::Blob::Ptr inline createAndFillBlobUniqueSequence( +inline InferenceEngine::Blob::Ptr createAndFillBlobUniqueSequence( const InferenceEngine::TensorDesc &td, const int32_t start_from = 0, const int32_t resolution = 1, @@ -543,7 +540,7 @@ InferenceEngine::Blob::Ptr inline createAndFillBlobUniqueSequence( return blob; } -InferenceEngine::Blob::Ptr inline convertBlobLayout(const InferenceEngine::Blob::Ptr& in, +inline InferenceEngine::Blob::Ptr convertBlobLayout(const InferenceEngine::Blob::Ptr& in, InferenceEngine::Layout layout) { IE_ASSERT(in != nullptr) << "Got NULL pointer"; @@ -564,7 +561,7 @@ InferenceEngine::Blob::Ptr inline convertBlobLayout(const InferenceEngine::Blob: } template -static void fillInputsBySinValues(dType* data, size_t size) { +inline void fillInputsBySinValues(dType* data, size_t size) { if (std::is_same::value) { for (size_t i = 0; i < size; i++) { data[i] = sin(static_cast(i)); @@ -577,7 +574,7 @@ static void fillInputsBySinValues(dType* data, size_t size) { } template -static void fillInputsByCosValues(dType* data, size_t size) { +inline void fillInputsByCosValues(dType* data, size_t size) { if (std::is_same::value) { for (size_t i = 0; i < size; i++) { data[i] = sin(static_cast(i)); @@ -589,7 +586,7 @@ static void fillInputsByCosValues(dType* data, size_t size) { } } -static int fillInputsBySinValues(InferenceEngine::Blob::Ptr blob) { +inline int fillInputsBySinValues(InferenceEngine::Blob::Ptr blob) { InferenceEngine::MemoryBlob::Ptr mblob = InferenceEngine::as(blob); if (!mblob) { return -1; @@ -602,7 +599,7 @@ static int fillInputsBySinValues(InferenceEngine::Blob::Ptr blob) { return 0; } -static int fillInputsByCosValues(InferenceEngine::Blob::Ptr blob) { +inline int fillInputsByCosValues(InferenceEngine::Blob::Ptr blob) { InferenceEngine::MemoryBlob::Ptr mblob = InferenceEngine::as(blob); if (!mblob) { return -1; @@ -617,7 +614,13 @@ static int fillInputsByCosValues(InferenceEngine::Blob::Ptr blob) { namespace Bf16TestUtils { -static float reducePrecisionBitwise(const float in) { + +#if defined __GNUC__ +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wstrict-aliasing" +#endif + +inline float reducePrecisionBitwise(const float in) { float f = in; int* i = reinterpret_cast(&f); int t2 = *i & 0xFFFF0000; @@ -629,13 +632,18 @@ static float reducePrecisionBitwise(const float in) { return ft1; } -static short reducePrecisionBitwiseS(const float in) { +inline short reducePrecisionBitwiseS(const float in) { float f = reducePrecisionBitwise(in); int intf = *reinterpret_cast(&f); intf = intf >> 16; short s = intf; return s; } + +#if defined __GNUC__ +# pragma GCC diagnostic pop +#endif + } // namespace Bf16TestUtils enum class BlobKind { diff --git a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_convolution_function.cpp b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_convolution_function.cpp index 04047f40994..8013dfd4760 100644 --- a/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_convolution_function.cpp +++ b/inference-engine/tests/ngraph_helpers/lpt_ngraph_functions/src/fake_quantize_and_convolution_function.cpp @@ -16,7 +16,6 @@ std::shared_ptr FakeQuantizeAndConvolutionFunction::getOrigina const ngraph::Shape& inputShape, const FakeQuantizeOnData& fqOnData, const FakeQuantizeOnWeights& fqOnWeights) { - const float k = 50.f; const auto input = std::make_shared(precision, ngraph::Shape(inputShape)); const auto fakeQuantizeOnActivations = fqOnData.empty() ? diff --git a/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp index 84a3dc31a22..bcd098a62d1 100644 --- a/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/subgraph_builders.hpp @@ -9,7 +9,7 @@ namespace ngraph { namespace builder { namespace subgraph { -static std::shared_ptr makeConvPoolRelu(std::vector inputShape = {1, 1, 32, 32}, +inline std::shared_ptr makeConvPoolRelu(std::vector inputShape = {1, 1, 32, 32}, ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); params.front()->set_friendly_name("Param_1"); @@ -39,7 +39,7 @@ static std::shared_ptr makeConvPoolRelu(std::vector in return fnPtr; } -static std::shared_ptr makeSplitConvConcat(std::vector inputShape = {1, 4, 20, 20}, +inline std::shared_ptr makeSplitConvConcat(std::vector inputShape = {1, 4, 20, 20}, ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); @@ -59,7 +59,7 @@ static std::shared_ptr makeSplitConvConcat(std::vector return fnPtr; } -static std::shared_ptr makeKSOFunction(std::vector inputShape = {1, 4, 20, 20}, +inline std::shared_ptr makeKSOFunction(std::vector inputShape = {1, 4, 20, 20}, ngraph::element::Type_t ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); @@ -78,7 +78,7 @@ static std::shared_ptr makeKSOFunction(std::vector inp return fnPtr; } -static std::shared_ptr makeSplitMultiConvConcat(std::vector inputShape = {1, 4, 20, 20}) { +inline std::shared_ptr makeSplitMultiConvConcat(std::vector inputShape = {1, 4, 20, 20}) { auto ngPrc = ngraph::element::Type_t::f32; auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); @@ -122,7 +122,7 @@ static std::shared_ptr makeSplitMultiConvConcat(std::vector makeTIwithLSTMcell(ngraph::element::Type_t ngPRC = ngraph::element::Type_t::f32) { +inline std::shared_ptr makeTIwithLSTMcell(ngraph::element::Type_t ngPRC = ngraph::element::Type_t::f32) { // That which we iterate over const size_t N = 32; // Batch size const size_t L = 10; // Sequence length @@ -180,7 +180,7 @@ static std::shared_ptr makeTIwithLSTMcell(ngraph::element::Typ return fn_ptr; } -static std::shared_ptr makeSingleConv(std::vector inputShape = {1, 3, 24, 24}, +inline std::shared_ptr makeSingleConv(std::vector inputShape = {1, 3, 24, 24}, ngraph::element::Type_t type = ngraph::element::Type_t::f32) { auto param0 = std::make_shared(type, ngraph::Shape(inputShape)); @@ -192,7 +192,7 @@ static std::shared_ptr makeSingleConv(std::vector inpu return fn_ptr; } -static std::shared_ptr makeMultiSingleConv(std::vector inputShape = {1, 3, 24, 24}) { +inline std::shared_ptr makeMultiSingleConv(std::vector inputShape = {1, 3, 24, 24}) { ngraph::element::Type type = ngraph::element::Type_t::f32; auto param0 = std::make_shared(type, ngraph::Shape(inputShape)); auto conv1 = ngraph::builder::makeConvolution(param0, type, {3, 3}, {1, 1}, {0, 0}, {0, 0}, {1, 1}, @@ -221,7 +221,7 @@ static std::shared_ptr makeMultiSingleConv(std::vector return fn_ptr; } -static std::shared_ptr make2InputSubtract(std::vector inputShape = {1, 3, 24, 24}, +inline std::shared_ptr make2InputSubtract(std::vector inputShape = {1, 3, 24, 24}, ngraph::element::Type_t type = ngraph::element::Type_t::f32) { auto param0 = std::make_shared(type, ngraph::Shape(inputShape)); auto param1 = std::make_shared(type, ngraph::Shape(inputShape)); @@ -232,7 +232,7 @@ static std::shared_ptr make2InputSubtract(std::vector return fn_ptr; } -static std::shared_ptr makeNestedSplitConvConcat(std::vector inputShape = {1, 4, 20, 20}, +inline std::shared_ptr makeNestedSplitConvConcat(std::vector inputShape = {1, 4, 20, 20}, ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape}); auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); @@ -264,7 +264,7 @@ static std::shared_ptr makeNestedSplitConvConcat(std::vector makeSplitConvConcatInputInBranch(std::vector inputShape = {1, 4, 20, 20}, +inline std::shared_ptr makeSplitConvConcatInputInBranch(std::vector inputShape = {1, 4, 20, 20}, ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape, inputShape}); auto split = ngraph::builder::makeSplit(params[0], ngPrc, 2, 1); @@ -294,7 +294,7 @@ static std::shared_ptr makeSplitConvConcatInputInBranch(std::v return fnPtr; } -static std::shared_ptr makeSplitConvConcatNestedInBranch(std::vector inputShape = {1, 4, 20, 20}, +inline std::shared_ptr makeSplitConvConcatNestedInBranch(std::vector inputShape = {1, 4, 20, 20}, ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape, inputShape}); int localId = 0; @@ -355,7 +355,7 @@ static std::shared_ptr makeSplitConvConcatNestedInBranch(std:: return fnPtr; } -static std::shared_ptr makeSplitConvConcatNestedInBranchNestedOut( +inline std::shared_ptr makeSplitConvConcatNestedInBranchNestedOut( std::vector inputShape = {1, 4, 20, 20}, ngraph::element::Type ngPrc = ngraph::element::Type_t::f32) { auto params = ngraph::builder::makeParams(ngPrc, {inputShape, inputShape}); @@ -457,7 +457,7 @@ static std::shared_ptr makeSplitConvConcatNestedInBranchNested return fnPtr; } -static std::shared_ptr makeConvBias(std::vector inputShape = {1, 3, 24, 24}, +inline std::shared_ptr makeConvBias(std::vector inputShape = {1, 3, 24, 24}, ngraph::element::Type type = ngraph::element::Type_t::f32) { auto parameter = ngraph::builder::makeParams(type, {inputShape}); parameter[0]->set_friendly_name("parameter"); @@ -475,7 +475,7 @@ static std::shared_ptr makeConvBias(std::vector inputS return fn_ptr; } -static std::shared_ptr makeReadConcatSplitAssign(std::vector inputShape = {1, 1, 2, 4}, +inline std::shared_ptr makeReadConcatSplitAssign(std::vector inputShape = {1, 1, 2, 4}, ngraph::element::Type type = ngraph::element::Type_t::f32) { auto parameter = ngraph::builder::makeParams(type, {inputShape}); parameter[0]->set_friendly_name("parameter"); diff --git a/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/data_utils.hpp b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/data_utils.hpp index c8d050773c5..7d9a13018b9 100644 --- a/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/data_utils.hpp +++ b/inference-engine/tests/ngraph_helpers/ngraph_functions/include/ngraph_functions/utils/data_utils.hpp @@ -28,7 +28,7 @@ generateVector(size_t vec_len, uint32_t upTo = 10, uint32_t startFrom = 1, int32 // chose values between this range to avoid type overrun (e.g. in case of I8 precision) std::uniform_int_distribution dist(startFrom, upTo); - for (int i = 0; i < vec_len; i++) { + for (size_t i = 0; i < vec_len; i++) { res.push_back( static_cast::value_type>(dist(gen))); } @@ -46,7 +46,7 @@ std::vector inline generateF16Vector(size_t vec_len, uint32_t u // chose values between this range to avoid type overrun (e.g. in case of I8 precision) std::uniform_int_distribution dist(startFrom, upTo); - for (int i = 0; i < vec_len; i++) { + for (size_t i = 0; i < vec_len; i++) { res.emplace_back(ngraph::float16(static_cast(dist(gen)))); } return res; @@ -62,7 +62,7 @@ std::vector inline generateBF16Vector(size_t vec_len, uint32_t // chose values between this range to avoid type overrun (e.g. in case of I8 precision) std::uniform_int_distribution dist(startFrom, upTo); - for (int i = 0; i < vec_len; i++) { + for (size_t i = 0; i < vec_len; i++) { res.emplace_back(ngraph::bfloat16(static_cast(dist(gen)))); } return res; diff --git a/inference-engine/tests/unit/frontends/onnx_import/onnx_importer_test.cpp b/inference-engine/tests/unit/frontends/onnx_import/onnx_importer_test.cpp index baf7b757077..8d9ce065776 100644 --- a/inference-engine/tests/unit/frontends/onnx_import/onnx_importer_test.cpp +++ b/inference-engine/tests/unit/frontends/onnx_import/onnx_importer_test.cpp @@ -64,7 +64,6 @@ TEST(ONNX_Importer_Tests, ImportModelWithMultiOutput) { int count_topk = 0; int count_constants = 0; - int count_goe = 0; int count_parameters = 0; for (auto op : function->get_ops()) { diff --git a/inference-engine/tests/unit/vpu/middleend_tests/passes_tests/adjust_data_batch_tests.cpp b/inference-engine/tests/unit/vpu/middleend_tests/passes_tests/adjust_data_batch_tests.cpp index 67e62174fac..b24016f1f7e 100644 --- a/inference-engine/tests/unit/vpu/middleend_tests/passes_tests/adjust_data_batch_tests.cpp +++ b/inference-engine/tests/unit/vpu/middleend_tests/passes_tests/adjust_data_batch_tests.cpp @@ -347,6 +347,8 @@ TEST_F(VPU_AdjustDataBatchTest, DISABLED_BranchedWithBatchAndSplitItemsInTheEnd) const auto& branch1 = branches[0]; const auto& branch2 = branches[1]; const auto& data4 = CheckSingleConnection(branch1, 3); + (void)data4; const auto& data5 = CheckSingleConnection(branch2, 4); const auto& data6 = checkSingleLoopEnd(data5); + (void)data6; } diff --git a/inference-engine/tests_deprecated/CMakeLists.txt b/inference-engine/tests_deprecated/CMakeLists.txt index b369c1b630b..6e587878d86 100644 --- a/inference-engine/tests_deprecated/CMakeLists.txt +++ b/inference-engine/tests_deprecated/CMakeLists.txt @@ -4,6 +4,10 @@ enable_testing() +if(NOT MSVC) + ie_add_compiler_flags(-Wno-unused-variable) +endif() + add_subdirectory(helpers) if (ENABLE_GAPI_TESTS) diff --git a/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/aot_behavior_tests.cpp b/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/aot_behavior_tests.cpp index 30a0d9d17fa..0e2a458916a 100644 --- a/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/aot_behavior_tests.cpp +++ b/inference-engine/tests_deprecated/behavior/vpu/myriad_tests/aot_behavior_tests.cpp @@ -25,8 +25,6 @@ std::string getTestCaseName(testing::TestParamInfo obj) { return obj.param.device + "_" + obj.param.input_blob_precision.name() + (obj.param.config.size() ? "_" + obj.param.config.begin()->second : ""); } - -const int BLOB_VERSION_MAJOR = 3; } #if (defined(_WIN32) || defined(_WIN64) ) @@ -84,7 +82,7 @@ class AOTBehaviorTests : public BehaviorPluginTest { { ret = core.ImportNetwork("local_tmp.fw", GetParam().device); } - catch (InferenceEngine::details::InferenceEngineException ex) + catch (InferenceEngine::details::InferenceEngineException & ex) { return ex.getStatus(); } diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/custom_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/custom_matcher.cpp index 658c1618a93..8ae040e1f8a 100644 --- a/inference-engine/tests_deprecated/functional/ie_tests/src/custom_matcher.cpp +++ b/inference-engine/tests_deprecated/functional/ie_tests/src/custom_matcher.cpp @@ -250,9 +250,6 @@ void Regression::Matchers::CustomMatcher::checkResult() { */ if (isSaveOutput) { if (!config.fetch_result) { - - decltype(ctx.allOutputs().begin()) output; - // calculating all outputs size SizeVector dimsMerged; for(auto && output : ctx.allOutputs()) { @@ -318,13 +315,12 @@ void Regression::Matchers::CustomMatcher::checkResult() { if (cmpNear || cmpNearAvg) { int idx = 0; - float avgDiff = 0.0; - float sz = 0.0; - float maxDiff = 0.0; - float maxAverageDiff = 0.0; - float rms = 0.0; + float avgDiff = 0.0f; + float maxDiff = 0.0f; + float maxAverageDiff = 0.0f; + float rms = 0.0f; int nFrame = -1; - float avgFrames = 0.0; + float avgFrames = 0.0f; if (!config.fetch_result) { decltype(ctx.allOutputs().begin()) output; diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/raw_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/raw_matcher.cpp index 69c8df447d3..cf9fe953ccd 100644 --- a/inference-engine/tests_deprecated/functional/ie_tests/src/raw_matcher.cpp +++ b/inference-engine/tests_deprecated/functional/ie_tests/src/raw_matcher.cpp @@ -174,7 +174,6 @@ void RawMatcher::match() { for (auto &&item : out) { Blob::Ptr output; auto outputName = item.first; - auto& outBlob = item.second; if (!inferRequest) { output = allocateBlob(item.second->getTensorDesc()); } else { diff --git a/inference-engine/tests_deprecated/functional/ie_tests/src/segmentation_matcher.cpp b/inference-engine/tests_deprecated/functional/ie_tests/src/segmentation_matcher.cpp index 0f671640c67..3aab57a1a6f 100644 --- a/inference-engine/tests_deprecated/functional/ie_tests/src/segmentation_matcher.cpp +++ b/inference-engine/tests_deprecated/functional/ie_tests/src/segmentation_matcher.cpp @@ -127,9 +127,6 @@ void SegmentationMatcher::match() { // Load image to blob ConvertImageToInput(reader->getData().get(), reader->size(), *input); - InferenceEngine::ResponseDesc dsc; - InferenceEngine::StatusCode sts; - auto loadedExecutableNetwork = config.ie_core->LoadNetwork(network, config._device_name, config.plugin_config); InferenceEngine::ExecutableNetwork executableNetwork; if (config.useExportImport) { diff --git a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/region_yolo_tests.cpp b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/region_yolo_tests.cpp index 1d3a0a07c7a..24eef95effe 100644 --- a/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/region_yolo_tests.cpp +++ b/inference-engine/tests_deprecated/functional/mkldnn/single_layer_tests/region_yolo_tests.cpp @@ -66,7 +66,6 @@ static void ref_region_yolo(InferenceEngine::TBlob &src, InferenceEngine: int IW = (src.getTensorDesc().getDims().size() > 3) ? src.getTensorDesc().getDims()[3] : 1; int IH = (src.getTensorDesc().getDims().size() > 2) ? src.getTensorDesc().getDims()[2] : 1; - int IC = (src.getTensorDesc().getDims().size() > 1) ? src.getTensorDesc().getDims()[1] : 1; int B = (src.getTensorDesc().getDims().size() > 0) ? src.getTensorDesc().getDims()[0] : 1; for (int i = 0; i < src.size(); i++) { diff --git a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/def_conv_ref.cpp b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/def_conv_ref.cpp index f62efeb2818..4d17316ef07 100644 --- a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/def_conv_ref.cpp +++ b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/def_conv_ref.cpp @@ -194,8 +194,6 @@ void ref_def_conv_common(const std::vector srcs, size_t OD = dst_dims.size() == 5lu ? dst_dims[2] : 1lu; size_t OC = prm.out_c; - size_t DG = prm.deformable_group; - const auto* src_data = srcs[0]->cbuffer().as(); const auto* trans_data = srcs[1]->cbuffer().as(); auto* dst_data = dst.buffer().as(); diff --git a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/single_layer_tests.hpp b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/single_layer_tests.hpp index e32a3ba9b3f..c96b35462ed 100644 --- a/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/single_layer_tests.hpp +++ b/inference-engine/tests_deprecated/functional/shared_tests/common_single_layer_tests/single_layer_tests.hpp @@ -484,7 +484,6 @@ TEST_P(CommonSingleLayerTest, inferAfterReshape) { BufferWrapper src_ptr(src); BufferWrapper trans_ptr(trans); - float* weights_ptr = weights->buffer().as(); BufferWrapper dst_ptr(dst_ref); } else { request.Infer(); diff --git a/inference-engine/tests_deprecated/functional/shared_tests/lstm/lstm_ir_test.hpp b/inference-engine/tests_deprecated/functional/shared_tests/lstm/lstm_ir_test.hpp index a9e1c9d6f2f..dbbc90511b0 100644 --- a/inference-engine/tests_deprecated/functional/shared_tests/lstm/lstm_ir_test.hpp +++ b/inference-engine/tests_deprecated/functional/shared_tests/lstm/lstm_ir_test.hpp @@ -48,15 +48,15 @@ TEST_P(LSTM_IR_Test, canParseLSTM) { return npy_data; }; - auto _save_to_npy = [&](std::string name, - const std::vector& npy_shape, - const std::vector& npy_data) { - std::replace(name.begin(), name.end(), '\\', '_'); - std::replace(name.begin(), name.end(), '/', '_'); - auto file_path = name + ".npy"; + // auto _save_to_npy = [&](std::string name, + // const std::vector& npy_shape, + // const std::vector& npy_data) { + // std::replace(name.begin(), name.end(), '\\', '_'); + // std::replace(name.begin(), name.end(), '/', '_'); + // auto file_path = name + ".npy"; - npy::SaveArrayAsNumpy(file_path, false, (unsigned int)(npy_shape.size()), npy_shape.data(), npy_data); - }; + // npy::SaveArrayAsNumpy(file_path, false, (unsigned int)(npy_shape.size()), npy_shape.data(), npy_data); + // }; for (auto &info: net.getInputsInfo()) { auto blob = inf_req.GetBlob(info.first); diff --git a/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_util.cpp b/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_util.cpp index 5db473a9e8f..92973c3e3a1 100644 --- a/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_util.cpp +++ b/inference-engine/tests_deprecated/functional/shared_tests/lstm/rnn_util.cpp @@ -175,8 +175,6 @@ static void copy_with_permute(Blob::Ptr &src, Blob::Ptr &dst, const std::vector< float *src_ptr = src->buffer().as(); float *dst_ptr = dst->buffer().as(); - float *_src_ptr = src->buffer().as(); - for (int i0 = 0; i0 < dims[0]; i0++) for (int i1 = 0; i1 < dims[1]; i1++) for (int i2 = 0; i2 < dims[2]; i2++) diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_conv_nd_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_conv_nd_test.hpp index fe9611abdab..a88f2d40b80 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_conv_nd_test.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_conv_nd_test.hpp @@ -437,7 +437,6 @@ private: const int weightsNDims = weightsShape.size(); const int kernelNDims = weightsNDims - 2; - const int biasesNDims = 1; IE_ASSERT(inputNDims == outputNDims); IE_ASSERT(inputNDims >= 3); // CHW, NCHW, NCDHW, ... diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution1x1.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution1x1.hpp index 4a9b993d134..84be867f260 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution1x1.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution1x1.hpp @@ -206,7 +206,6 @@ TEST_P(myriadConvolution1x1LayerTests_smoke, Convolution1x1) { return pair1.second.execution_index < pair2.second.execution_index; }); - unsigned currentIndex = 0; for (auto it = perfVec.begin(); it != perfVec.end(); ++it) { std::string layerName = it->first; InferenceEngine::InferenceEngineProfileInfo info = it->second; diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution3x3.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution3x3.hpp index 54c9875dece..7945608d11e 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution3x3.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution3x3.hpp @@ -60,21 +60,15 @@ void refConvolution3x3(const Blob::Ptr src, InferenceEngine::TBlob::Ptr //the start address after 1 line/column padding(requested by convolution operation with 3x3 kernel ) in = in + 1 + 1 * IW; - int cnt = 0; - for (size_t g = 0; g < group; ++g) { for (size_t oc = 0; oc < dst_channels; ++oc) { - size_t dst_channel = (g * dst_channels + oc); for (size_t oh = 0; oh < OH; oh++) { for (size_t ow = 0; ow < OW; ow++) { - size_t oidx = dst_channel + ow * OC + oh * OC * OW; float val = 0.0f; - ie_fp16 hval = PrecisionUtils::f32tof16(val); - float fval = 0.0f; for (size_t ic = 0; ic < src_channels; ++ic) { @@ -239,7 +233,6 @@ TEST_P(myriadConvolution3x3LayerTests_smoke, Convolution3x3) { return pair1.second.execution_index < pair2.second.execution_index; }); - unsigned currentIndex = 0; for (auto it = perfVec.begin(); it != perfVec.end(); ++it) { std::string layerName = it->first; InferenceEngine::InferenceEngineProfileInfo info = it->second; diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution_test.hpp index 1397a97c467..3c88234d211 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution_test.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_convolution_test.hpp @@ -33,7 +33,6 @@ public: size_t out_channels = get<4>(GetParam()); size_t group = get<5>(GetParam()); param_size dilation_factor = get<6>(GetParam()); - vpu::LayoutPreference layoutPreference = get<7>(GetParam()); size_t out_w = (input_dims.w + 2 * pad.x - dilation_factor.x * (kernel.x - 1) - 1 + stride.x) / stride.x; size_t out_h = (input_dims.h + 2 * pad.y - dilation_factor.y * (kernel.y - 1) - 1 + stride.y) / stride.y; @@ -161,7 +160,6 @@ void FillWeights(uint16_t* ptr, size_t weightsSize) { ASSERT_NE(ptr, nullptr); auto szW = sizeof(s_3X3X3YOLO_Weights)/sizeof(s_3X3X3YOLO_Weights[0]); ASSERT_EQ(szW, weightsSize); - auto sz = szW; size_t indx = 0; for (; indx < szW; ++indx) { ptr[indx] = PrecisionUtils::f32tof16(s_3X3X3YOLO_Weights[indx]); @@ -202,7 +200,6 @@ TEST_P(myriadLayers_3X3X3_ConstInput_smoke, Convolution) { ASSERT_TRUE(generateNetAndInfer( NetworkInitParams().layoutPreference(layoutPreference) )); auto outputBlob = _outputMap.begin()->second; const uint16_t *res_ptr = outputBlob->buffer().as(); - size_t res_size = outputBlob->size(); size_t N = outputBlob->getTensorDesc().getDims()[0]; size_t C = outputBlob->getTensorDesc().getDims()[1]; diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_crop_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_crop_test.hpp index 8f9fa91b6b2..d4e56da0c6b 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_crop_test.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_crop_test.hpp @@ -78,8 +78,6 @@ TEST_P(myriadLayerCropOneInputAndDim_smoke, CropWithOneInputAndDim) { InferenceEngine::SizeVector input_dim2 = {tensor2.n, tensor2.c, tensor2.h, tensor2.w}; ASSERT_EQ(axis_val.size(), offsets.size()); ASSERT_EQ(axis_val.size(), dims.size()); - char prm[256]; - char val[256]; std::string axis; std::string offset; std::string dim; diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_custom_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_custom_test.hpp index 5ad034dc5d4..af8a1d92aaa 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_custom_test.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_custom_test.hpp @@ -301,8 +301,6 @@ static void refBinaryConvolution(const Blob::Ptr src, const Blob::Ptr weights, B static void refExperimentalDetectronPriorGridGenerator( std::vector &inputs, std::vector &outputs, int grid_h, int grid_w, int stride_h, int stride_w) { - int num_priors = inputs[0]->getTensorDesc().getDims()[0]; - uint16_t *src_data = inputs[0]->buffer(); uint16_t *dst_data = outputs[0]->buffer(); @@ -974,7 +972,6 @@ TEST_P(myriadLayersTestsQuantizeBinarize_smoke, Quantize_Binarization) { return pair1.second.execution_index < pair2.second.execution_index; }); - unsigned currentIndex = 0; for (auto it = perfVec.begin(); it != perfVec.end(); ++it) { std::string layerName = it->first; InferenceEngine::InferenceEngineProfileInfo info = it->second; diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_fully_connected_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_fully_connected_tests.hpp index 8ff670b418a..c671d15ba94 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_fully_connected_tests.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_fully_connected_tests.hpp @@ -104,7 +104,6 @@ TEST_P(myriadLayersTestsFullyConnectedBatch_smoke, TestsFullyConnected) size_t sz_weights = IC * IH * IW * out_size; size_t sz_bias = 0; - size_t sz = sz_weights + sz_bias; _genDataCallback = genTestData1; _testNet.addLayer(LayerInitParams("FullyConnected") .params(params) @@ -142,7 +141,6 @@ TEST_P(myriadLayersTestsFullyConnectedPVA_smoke, TestsFullyConnected) size_t sz_weights = IC * IH * IW * out_size; size_t sz_bias = 0; - size_t sz = sz_weights + sz_bias; _genDataCallback = genTestData1; _testNet.addLayer(LayerInitParams("FullyConnected") .params(params) diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gemm_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gemm_test.hpp index 2325fe95da7..3280898eb57 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gemm_test.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_gemm_test.hpp @@ -62,7 +62,6 @@ static void gemm_ref(int M, int N, int K, const int stride_a = (transposeA ? M : K); const int stride_b = (transposeB ? K : N); - const int stride_d = N; const int strideMB2_src1 = (MB2 != MB2_A) ? 0 : 1; const int strideMB2_src2 = (MB2 != MB2_B) ? 0 : 1; diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lstm_cell.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lstm_cell.hpp index 860b8089223..0310b10cff5 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lstm_cell.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_lstm_cell.hpp @@ -239,9 +239,6 @@ void lstm_cell(int input_size, const int n_gates = 4; const int ohf = 0; const int ohi = 1; const int oho = 2; const int ohc = 3; - int num_weights = state_size * (input_size + state_size); - int num_bias = state_size; - /* gates = src_layer * weights_layer */ gemm(1, n_gates * state_size, input_size, src_layer, input_size, @@ -306,7 +303,6 @@ static void matrix_copy_transpose_repack(const ie_fp16 *psrc, ie_fp16 *pdst, int TEST_P(myriadLayersTestsLSTMCell_smoke, LSTMCell) { auto param = GetParam(); - lstmcell_test_params test_params = param; size_t input_size = param.input_size; size_t state_size = param.state_size; diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pad_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pad_test.hpp index 29b61b981d6..013881cd33e 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pad_test.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pad_test.hpp @@ -47,17 +47,9 @@ void ref_pad(const Blob::Ptr src, ASSERT_NE(src_data, nullptr); ASSERT_NE(dst_data, nullptr); - int32_t padb_begin = pad_params.padb_begin; - int32_t padb_end = pad_params.padb_end; - int32_t padc_begin = pad_params.padc_begin; - int32_t padc_end = pad_params.padc_end; - int32_t padh_begin = pad_params.padh_begin; - int32_t padh_end = pad_params.padh_end; - int32_t padw_begin = pad_params.padw_begin; - int32_t padw_end = pad_params.padw_end; int32_t IW = 0; int32_t IH = 0; diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_test.hpp index 47d41201461..a48e444091d 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_test.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_permute_test.hpp @@ -63,7 +63,6 @@ TEST_P(myriadLayersPermuteTests_smoke, Permute) { int32_t IH = 0; int32_t IC = 0; int32_t I_N = 0; - size_t group = 0; auto p = ::testing::WithParamInterface::GetParam(); auto input_tensor = std::get<0>(p); diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pool_nd_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pool_nd_test.hpp index fcf32625fb3..42a22176a7f 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pool_nd_test.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pool_nd_test.hpp @@ -285,7 +285,6 @@ private: // int inputNDims = inputShape.size(); - int kernelNDims = kernelShape.size(); int batchDim = inputNDims > 3 ? 0 : -1; int channelsDim = interleaved ? inputNDims - 1 : batchDim + 1; diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pooling_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pooling_test.hpp index 89822c2a748..2b1a6f50bfd 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pooling_test.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_pooling_test.hpp @@ -97,7 +97,6 @@ TEST_P(myriadLayers_IR3_PoolingTests_smoke, Pooling) { int32_t IH = 0; int32_t IC = 0; int32_t I_N = 0; - size_t group = 0; /*input tensor, kernel, stride, pads_begin, pads_end, auto_pad, exclude_pad method */ auto p = ::testing::WithParamInterface::GetParam(); auto input_tensor = std::get<0>(p); @@ -177,7 +176,6 @@ TEST_P(myriadLayers_IR3_BatchPoolingTests_smoke, Pooling) { int32_t IH = 0; int32_t IC = 0; int32_t I_N = 0; - size_t group = 0; /*input tensor, kernel, stride, pads_begin, pads_end, auto_pad, exclude_pad method */ auto p = ::testing::WithParamInterface::GetParam(); auto input_tensor = std::get<0>(p); diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_psroipooling_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_psroipooling_test.hpp index 06c988522ab..c7bacd25d34 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_psroipooling_test.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_psroipooling_test.hpp @@ -159,7 +159,6 @@ public: for (auto inputInfo: _inputsInfo) { InferenceEngine::SizeVector inputDims = inputInfo.second->getTensorDesc().getDims(); - InferenceEngine::Layout layout = inputInfo.second->getTensorDesc().getLayout(); Blob::Ptr data; ASSERT_NO_THROW(data = _inferRequest.GetBlob(inputInfo.first.c_str())); diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_feature_extractor_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_feature_extractor_test.hpp index b475cc6246e..b4914602611 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_feature_extractor_test.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_feature_extractor_test.hpp @@ -39,9 +39,6 @@ static void genROIs(InferenceEngine::Blob::Ptr rois, const int max_range_width = params.in_net_w * 4 / 5; const int max_range_height = params.in_net_h * 4 / 5; - float scale_width = (float)params.in_net_w; - float scale_height = (float)params.in_net_h; - for (int i = 0; i < num_rois; i++) { int x0 = std::rand() % max_range_width; int x1 = x0 + (std::rand() % (params.in_net_w - x0 - 1)) + 1; diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_pooling_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_pooling_test.hpp index 09a166a15ba..3fb28d41c26 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_pooling_test.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_roi_pooling_test.hpp @@ -93,7 +93,6 @@ public: const int pooled_w = params.pooled_w; const float spatial_scale = params.spatial_scales; const int top_area = pooled_h * pooled_w; - const int top_volume = C * pooled_h * pooled_w; if (method == roi_pooling_max) // generate GT for roi_pooling_max { for (int r = 0; r < R; ++r) { @@ -230,8 +229,6 @@ public: ASSERT_NO_THROW(_outputsInfo = _cnnNetwork.getOutputsInfo()); for (auto inpt : _inputsInfo) { - InferenceEngine::Layout layout = inpt.second->getTensorDesc().getLayout(); - Blob::Ptr data; ASSERT_NO_THROW(data = _inferRequest.GetBlob(inpt.first.c_str())); diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_update_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_update_test.hpp index f557e983f00..4f8d3c79036 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_update_test.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_scatter_update_test.hpp @@ -63,8 +63,6 @@ protected: const int indicesNDims = indicesShape.size(); const int inputNDims = inputShape.size(); - const int axis = 0; - IE_ASSERT(inputNDims > 0); IE_ASSERT(indicesNDims > 0); @@ -77,7 +75,6 @@ protected: const int outputNDims = inputNDims; SizeVector axisShape = {}; - const int axisNDims = 0; // E.g.: // {N, C, H, W} could be shape of `input` and `output` @@ -87,7 +84,6 @@ protected: for (int i = 0; i < outputNDims - 1; i++) { updatesShape.push_back(outputShape[i + 1]); } - const int updatesNDims = updatesShape.size(); // // Initialize input tensors, and compute reference output diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_select_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_select_test.hpp index 1bd176749c1..15a24d01c74 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_select_test.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_select_test.hpp @@ -28,7 +28,6 @@ protected: void RefSelect() { auto itr = _inputMap.begin(); - int coeff_num = 0; const uint16_t *srcData = itr->second->buffer().as(); uint16_t *dstData = _refBlob->buffer().as(); uint32_t src_coords[4]; diff --git a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tile_test.hpp b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tile_test.hpp index d2452b6edc0..48b285ce235 100644 --- a/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tile_test.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/common/layers/myriad_layers_tile_test.hpp @@ -113,7 +113,6 @@ TEST_P(myriadLayerTestTile_smoke, Tile) { tile_test::nd_tensor_test_params input_dims = get<0>(GetParam()); int tiles = get<1>(GetParam()); - int ndims = input_dims.dims.size(); int axis = input_dims.axis; auto dims = input_dims.dims; SetInputTensors({dims}); diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_tests.hpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_tests.hpp index 4e62d468cc3..c789f45e103 100644 --- a/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_tests.hpp +++ b/inference-engine/tests_deprecated/functional/vpu/vpu_base/myriad_layers_tests.hpp @@ -301,7 +301,6 @@ public: if (add_bias) { sz_bias = _par.out_c; } - size_t sz = sz_weights + sz_bias; // @todo: FullyConnected is not present in IRv10. Need to move to MatMul somehow. MatMul need different initializetion here. _testNet.addLayer(LayerInitParams(_irVersion == IRVersion::v10 ? "MatMul" : "FullyConnected") .params(params) diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layers_tests.cpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layers_tests.cpp index 3bcbda47287..fa79676e34a 100644 --- a/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layers_tests.cpp +++ b/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_layers_tests.cpp @@ -84,7 +84,6 @@ void vpuLayersTests::dumpPerformance() { return pair1.second.execution_index < pair2.second.execution_index; }); - unsigned currentIndex = 0; for (auto it = perfVec.begin(); it != perfVec.end(); ++it) { std::string layerName = it->first; InferenceEngine::InferenceEngineProfileInfo info = it->second; diff --git a/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_test_net.cpp b/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_test_net.cpp index efcaf7d2261..a6b58f908b1 100644 --- a/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_test_net.cpp +++ b/inference-engine/tests_deprecated/functional/vpu/vpu_base/vpu_test_net.cpp @@ -160,7 +160,6 @@ VpuTestNet::NetworkSerializedData VpuTestNet::genNetwork(IRVersion version) { IE_ASSERT(!_layers.empty()); IRDumperNetwork IRDumper(version); IRDumper.addInput("input" , _layers.begin()->_inDim); - const size_t inputsSize = _layers.begin()->_inDim.size(); size_t testNetIndex = 0; for (auto& elem : _layers) { diff --git a/inference-engine/tests_deprecated/unit/engines/gna/gna_memory_test.cpp b/inference-engine/tests_deprecated/unit/engines/gna/gna_memory_test.cpp index 98701e7e79d..de09859e0e7 100644 --- a/inference-engine/tests_deprecated/unit/engines/gna/gna_memory_test.cpp +++ b/inference-engine/tests_deprecated/unit/engines/gna/gna_memory_test.cpp @@ -246,7 +246,6 @@ TEST_F(GNAMemoryTest, canPushLocal) { } //poison stack - float input [] = {11,21,31,41}; mem.commit(); ASSERT_FLOAT_EQ(pFuture[0], 1); diff --git a/inference-engine/tests_deprecated/unit/engines/gna/matchers/conv_matcher.hpp b/inference-engine/tests_deprecated/unit/engines/gna/matchers/conv_matcher.hpp index ef9b8920687..bc83c4100b0 100644 --- a/inference-engine/tests_deprecated/unit/engines/gna/matchers/conv_matcher.hpp +++ b/inference-engine/tests_deprecated/unit/engines/gna/matchers/conv_matcher.hpp @@ -17,9 +17,8 @@ class ConvoluionLayerMatcher : public ::testing::MatcherInterfacenLayers; i++) { - if (foo->pLayers[i].nLayerKind != INTEL_CONVOLUTIONAL) continue; - - auto conv = (gna_convolutional_layer_t*)foo->pLayers[i].pLayerStruct; + if (foo->pLayers[i].nLayerKind != INTEL_CONVOLUTIONAL) + continue; return matchInserted; } diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/normalize_tests.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/normalize_tests.cpp index 05406c11934..a5471f0ccf2 100644 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/normalize_tests.cpp +++ b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/extensions/normalize_tests.cpp @@ -365,7 +365,6 @@ public: const SizeVector& data_dims = data_desc.getDims(); InferenceEngine::Precision precision = data_desc.getPrecision(); - Layout layout; if (is_blocked) { int blk_size = InferenceEngine::with_cpu_x86_avx512f() ? 16 : 8; diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_concat_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_concat_test.cpp index 9ca74dec253..301ddd77442 100644 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_concat_test.cpp +++ b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/layers/internal/graph_concat_test.cpp @@ -985,12 +985,9 @@ protected: graph.Infer(srcs, outputBlobs); float *src1_ptr = src2->buffer(); - size_t src1_size = src2->size(); float *src2_ptr = src1->buffer(); - size_t src2_size = src1->size(); float *dst_ptr = outputBlobs["o_concat"]->buffer(); - size_t dst_size = outputBlobs["o_concat"]->size(); int len1 = 1, len2 = 1, cycles; for (int dim = 1; dim < outputBlobs["o_concat"]->getTensorDesc().getDims().size(); dim++) { diff --git a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_structure_test.cpp b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_structure_test.cpp index 301048de2f2..373d3ff7e98 100644 --- a/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_structure_test.cpp +++ b/inference-engine/tests_deprecated/unit/engines/mkldnn/graph/structure/graph_structure_test.cpp @@ -2364,7 +2364,6 @@ TEST_F(MKLDNNGraphStructureTests, TestLoadTopologyWithConstLayer) { InferenceEngine::TBlob *weights = new InferenceEngine::TBlob({ InferenceEngine::Precision::U8, {6400}, InferenceEngine::C }); weights->allocate(); - float * data = weights->buffer(); fill_data((float *) weights->buffer(), weights->size() / sizeof(float)); InferenceEngine::TBlob::Ptr weights_ptr = InferenceEngine::TBlob::Ptr(weights); diff --git a/inference-engine/thirdparty/clDNN/CMakeLists.txt b/inference-engine/thirdparty/clDNN/CMakeLists.txt index 8265bd8b15d..d17e3a72f17 100644 --- a/inference-engine/thirdparty/clDNN/CMakeLists.txt +++ b/inference-engine/thirdparty/clDNN/CMakeLists.txt @@ -965,11 +965,11 @@ set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS include_directories( ${CLDNN_UTILS__RAPIDJSON_INCDIRS} - "${CLDNN__KHR_CLHPP_DIR}" "${CLDNN__CODEGEN_INCDIR}" ) include_directories(SYSTEM + "${CLDNN__KHR_CLHPP_DIR}" ${CLDNN__IOCL_ICD_INCDIRS}) add_library(clDNN_OpenCL UNKNOWN IMPORTED) diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_bs_fs_yx_bsv16_fsv16.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_bs_fs_yx_bsv16_fsv16.cpp index 08e08d66edc..dce0b3f1fdf 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_bs_fs_yx_bsv16_fsv16.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_bfyx_to_bs_fs_yx_bsv16_fsv16.cpp @@ -21,7 +21,6 @@ namespace kernel_selector { -static const size_t sub_group_size = 16; static const size_t feature_block_size = 16; static const size_t batch_block_size = 16; diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_dw.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_dw.cpp index 9594fc36275..c4a67fdf4cf 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_dw.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/convolution/convolution_kernel_imad_b_fs_yx_fsv4_dw.cpp @@ -24,7 +24,6 @@ namespace kernel_selector { namespace { constexpr size_t fsv = 4; -constexpr size_t pref_simd = 16; constexpr size_t max_reg_usage = 64; enum mode : size_t { diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_fs_b_yx_fsv32_to_bfyx.cpp b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_fs_b_yx_fsv32_to_bfyx.cpp index a22f5212aa4..bc8dab0a65a 100644 --- a/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_fs_b_yx_fsv32_to_bfyx.cpp +++ b/inference-engine/thirdparty/clDNN/kernel_selector/core/actual_kernels/reorder/reorder_kernel_fs_b_yx_fsv32_to_bfyx.cpp @@ -19,7 +19,6 @@ namespace kernel_selector { static const size_t fsv = 32; -static const size_t sub_group_size = 16; static const size_t x_block_align = 8; static const std::vector optimal_x_sizes = { 16, 8, 4, 2, 1 }; static const std::vector optimal_feature_sizes = { 16, 8, 1 }; diff --git a/inference-engine/thirdparty/clDNN/src/gpu/ocl_queue_wrapper.cpp b/inference-engine/thirdparty/clDNN/src/gpu/ocl_queue_wrapper.cpp index c06c914326c..2f38a8ae667 100644 --- a/inference-engine/thirdparty/clDNN/src/gpu/ocl_queue_wrapper.cpp +++ b/inference-engine/thirdparty/clDNN/src/gpu/ocl_queue_wrapper.cpp @@ -40,32 +40,6 @@ #pragma GCC diagnostic ignored "-Wignored-attributes" #endif -namespace { -std::string ndrange_to_string(cl::NDRange const& range) { - std::string ret = "("; - for (cl::size_type i = 0; i < range.dimensions(); ++i) ret += (!i ? "" : ", ") + std::to_string(range.get()[i]); - - ret += ")"; - return ret; -} - -std::string events_list_to_string(std::vector events) { - std::string ret = "("; - bool empty = true; - for (auto& ev : events) { - std::string id = "unk"; - if (auto* ocl_ev = dynamic_cast(ev.get())) - id = std::to_string(ocl_ev->get_queue_stamp()); - - ret += (empty ? "" : ", ") + id; - empty = false; - } - - ret += ")"; - return ret; -} -} // namespace - namespace cldnn { namespace gpu { diff --git a/inference-engine/thirdparty/clDNN/src/gpu/ocl_toolkit.cpp b/inference-engine/thirdparty/clDNN/src/gpu/ocl_toolkit.cpp index 775fb922318..003b1b8838c 100644 --- a/inference-engine/thirdparty/clDNN/src/gpu/ocl_toolkit.cpp +++ b/inference-engine/thirdparty/clDNN/src/gpu/ocl_toolkit.cpp @@ -41,32 +41,6 @@ #pragma GCC diagnostic ignored "-Wignored-attributes" #endif -namespace { -std::string ndrange_to_string(cl::NDRange const& range) { - std::string ret = "("; - for (cl::size_type i = 0; i < range.dimensions(); ++i) ret += (!i ? "" : ", ") + std::to_string(range.get()[i]); - - ret += ")"; - return ret; -} - -std::string events_list_to_string(std::vector events) { - std::string ret = "("; - bool empty = true; - for (auto& ev : events) { - std::string id = "unk"; - if (auto* ocl_ev = dynamic_cast(ev.get())) - id = std::to_string(ocl_ev->get_queue_stamp()); - - ret += (empty ? "" : ", ") + id; - empty = false; - } - - ret += ")"; - return ret; -} -} // namespace - // static class memebers - pointers to dynamically obtained OpenCL extension functions cl::PFN_clEnqueueAcquireMediaSurfacesINTEL cl::SharedSurfLock::pfn_acquire = NULL; cl::PFN_clEnqueueReleaseMediaSurfacesINTEL cl::SharedSurfLock::pfn_release = NULL; diff --git a/inference-engine/thirdparty/clDNN/src/include/fused_conv_eltwise_inst.h b/inference-engine/thirdparty/clDNN/src/include/fused_conv_eltwise_inst.h index af7155a73a4..7cec0d0e015 100644 --- a/inference-engine/thirdparty/clDNN/src/include/fused_conv_eltwise_inst.h +++ b/inference-engine/thirdparty/clDNN/src/include/fused_conv_eltwise_inst.h @@ -37,7 +37,7 @@ public: if (get_primitive()->eltw.with_activation) { auto slope = get_primitive()->eltw.activation_negative_slope; if (slope == 0.f) { - this->add_fused_activation(activation_func::relu, {}); + this->add_fused_activation(activation_func::relu, {0.0f, 0.0f}); } else { this->add_fused_activation(activation_func::relu_negative_slope, { slope, 0.f }); } diff --git a/inference-engine/thirdparty/clDNN/src/include/program_node.h b/inference-engine/thirdparty/clDNN/src/include/program_node.h index 63130afc94f..8866b3dbb5c 100644 --- a/inference-engine/thirdparty/clDNN/src/include/program_node.h +++ b/inference-engine/thirdparty/clDNN/src/include/program_node.h @@ -340,7 +340,7 @@ protected: uint8_t user_mark = 0; bool optimized = false; bool share_buffer = true; - std::array _support_padding_in_axis = {}; // zero-initialization + std::array _support_padding_in_axis; mutable bool has_reused_memory = false; mutable uint32_t reused_memory_color = 0; diff --git a/inference-engine/thirdparty/clDNN/tests/CMakeLists.txt b/inference-engine/thirdparty/clDNN/tests/CMakeLists.txt index 7fd252d1111..46f0eb6999d 100644 --- a/inference-engine/thirdparty/clDNN/tests/CMakeLists.txt +++ b/inference-engine/thirdparty/clDNN/tests/CMakeLists.txt @@ -29,7 +29,6 @@ if (NOT MSVC) SET_RAW "-Wno-error=conversion-null" "-Wno-error=type-limits" - "-Wno-error=unused-variable" ) endif () diff --git a/inference-engine/thirdparty/clDNN/tests/test_cases/convolution_gpu_test.cpp b/inference-engine/thirdparty/clDNN/tests/test_cases/convolution_gpu_test.cpp index cbe9fb4a45f..0bb22ad2fc5 100644 --- a/inference-engine/thirdparty/clDNN/tests/test_cases/convolution_gpu_test.cpp +++ b/inference-engine/thirdparty/clDNN/tests/test_cases/convolution_gpu_test.cpp @@ -7524,26 +7524,20 @@ TEST_P(convolution_general_gpu, conv_fp16_cases) { const int input_x = testing::get<0>(GetParam()), input_y = testing::get<1>(GetParam()), - input_z = testing::get<2>(GetParam()), input_f = testing::get<3>(GetParam()), output_f = testing::get<4>(GetParam()), filter_x = testing::get<5>(GetParam()), filter_y = testing::get<6>(GetParam()), - filter_z = testing::get<7>(GetParam()), groups = testing::get<8>(GetParam()), stride = testing::get<9>(GetParam()), batch_num = testing::get<10>(GetParam()), output_padding = 0, - input_offset_z = (filter_z - 1) / 2, input_offset_y = (filter_y - 1) / 2, input_offset_x = (filter_x - 1) / 2; auto input_data_format = testing::get<11>(GetParam()); auto impl_name = testing::get<12>(GetParam()); auto with_bias = testing::get<13>(GetParam()); - const int output_y = 1 + (input_y + 2 * (-input_offset_y) - filter_y) / stride + 2 * output_padding; - const int output_x = 1 + (input_x + 2 * (-input_offset_x) - filter_x) / stride + 2 * output_padding; - auto input_size = tensor(batch_num, input_f, input_x, input_y); auto input_data = generate_random_4d(batch_num, input_f, input_y, input_x, -1, 1); auto input_data_bfyx = flatten_4d(format::bfyx, input_data); diff --git a/inference-engine/thirdparty/clDNN/tests/test_cases/depth_concatenate_gpu_test.cpp b/inference-engine/thirdparty/clDNN/tests/test_cases/depth_concatenate_gpu_test.cpp index 05e32e547d3..88419e197ca 100644 --- a/inference-engine/thirdparty/clDNN/tests/test_cases/depth_concatenate_gpu_test.cpp +++ b/inference-engine/thirdparty/clDNN/tests/test_cases/depth_concatenate_gpu_test.cpp @@ -463,8 +463,8 @@ TEST(depth_concatenate_f32_gpu, test06_padded_input) { topology topology; topology.add(input_layout("input1", input1.get_layout())); topology.add(input_layout("input2", input2.get_layout())); - topology.add(activation("actv1", "input1", activation_func::linear, { 0.75f })); - topology.add(activation("actv2", "input2", activation_func::linear, { 0.5f })); + topology.add(activation("actv1", "input1", activation_func::linear, { 0.75f, 0.0f })); + topology.add(activation("actv2", "input2", activation_func::linear, { 0.5f, 0.0f })); topology.add(data("weights", weights)); topology.add(convolution("conv", "actv2", { "weights" }, tensor(1), tensor(batch(0), feature(0), spatial(-1, -1, 0, 0)))); topology.add(concatenation("depth1", { "actv1", "actv2" }, concatenation::along_f)); @@ -540,8 +540,8 @@ TEST(depth_concatenate_f32_gpu, test07_padded_output) { topology topology; topology.add(input_layout("input1", input1.get_layout())); topology.add(input_layout("input2", input2.get_layout())); - topology.add(activation("actv1", "input1", activation_func::linear, { 0.75f })); - topology.add(activation("actv2", "input2", activation_func::linear, { 0.5f })); + topology.add(activation("actv1", "input1", activation_func::linear, { 0.75f, 0.0f })); + topology.add(activation("actv2", "input2", activation_func::linear, { 0.5f, 0.0f })); topology.add(concatenation("depth1", { "actv1", "actv2" }, concatenation::along_f)); topology.add(data("weights", weights)); topology.add(convolution("conv", "depth1", { "weights" }, tensor(1), tensor(batch(0), feature(0), spatial(-1, -1, 0, 0)))); @@ -603,8 +603,8 @@ TEST(depth_concatenate_f32_gpu, test07_concat_is_output) { topology topology; topology.add(input_layout("input1", input1.get_layout())); topology.add(input_layout("input2", input2.get_layout())); - topology.add(activation("actv1", "input1", activation_func::linear, { 0.75f })); - topology.add(activation("actv2", "input2", activation_func::linear, { 0.5f })); + topology.add(activation("actv1", "input1", activation_func::linear, { 0.75f, 0.0f })); + topology.add(activation("actv2", "input2", activation_func::linear, { 0.5f, 0.0f })); topology.add(concatenation("depth1", { "actv1", "actv2" }, concatenation::along_f)); cldnn::build_options options; diff --git a/inference-engine/thirdparty/clDNN/tests/test_cases/lstm_gpu_test.cpp b/inference-engine/thirdparty/clDNN/tests/test_cases/lstm_gpu_test.cpp index d2a99323bcd..3a6ea85184b 100644 --- a/inference-engine/thirdparty/clDNN/tests/test_cases/lstm_gpu_test.cpp +++ b/inference-engine/thirdparty/clDNN/tests/test_cases/lstm_gpu_test.cpp @@ -286,7 +286,8 @@ void generic_lstm_gemm_gpu_test(int sequence_len, int direction, int batch_size, } template -void generic_lstm_elt_gpu_test(int sequence_len, int direction, int batch_size, int input_size, int hidden_size, bool hasCell = true, +void generic_lstm_elt_gpu_test(int /* sequence_len */, int direction, int batch_size, + int /* input_size */, int hidden_size, bool hasCell = true, T clip_threshold = (T)0.f, bool input_forget = false) { // tempGEMM = [ 1, direction, batch, 4 * hidden_size ] input // cell = [ 1, direction, batch, hidden_size ] optional @@ -1083,7 +1084,6 @@ void lstm_gpu_users_test() { cldnn::memory output_memory = outputs.begin()->second.get_memory(); auto output_ptr = output_memory.pointer(); - int32_t i = 0; for (int32_t b = 0; b < batch_size; ++b) { for (int32_t s = 0; s < 1; ++s) { for (int32_t d = 0; d < directions; ++d) { diff --git a/inference-engine/thirdparty/clDNN/tests/test_cases/reduce_gpu_test.cpp b/inference-engine/thirdparty/clDNN/tests/test_cases/reduce_gpu_test.cpp index 07d15beabfc..e2da55d12d4 100644 --- a/inference-engine/thirdparty/clDNN/tests/test_cases/reduce_gpu_test.cpp +++ b/inference-engine/thirdparty/clDNN/tests/test_cases/reduce_gpu_test.cpp @@ -212,12 +212,12 @@ template ::ty VVVVVVF reference_reduce(VVVVVVF& input, reduce_mode reduce_mode, std::vector reduce_axis, - const int batch, - const int input_f, - const int input_w, - const int input_z, - const int input_y, - const int input_x, + const int /* batch */, + const int /* input_f */, + const int /* input_w */, + const int /* input_z */, + const int /* input_y */, + const int /* input_x */, const int dims, bool keepDims = false) { diff --git a/inference-engine/thirdparty/clDNN/tests/test_cases/reorder_gpu_test.cpp b/inference-engine/thirdparty/clDNN/tests/test_cases/reorder_gpu_test.cpp index 8db61f24ac5..023472962b6 100644 --- a/inference-engine/thirdparty/clDNN/tests/test_cases/reorder_gpu_test.cpp +++ b/inference-engine/thirdparty/clDNN/tests/test_cases/reorder_gpu_test.cpp @@ -1646,7 +1646,7 @@ TEST(reorder_gpu_f32, bfzyx_to_bsv16_fsv16) auto output = outputs.begin()->second.get_memory(); - auto get_bsv16_fsv16_index = [] (int32_t b_size, int32_t f_size, int32_t z_size, int32_t y_size, int32_t x_size, int32_t b, + auto get_bsv16_fsv16_index = [] (int32_t /* b_size */, int32_t /* f_size */, int32_t z_size, int32_t y_size, int32_t x_size, int32_t b, int32_t f_pad_before, int32_t f, int32_t f_pad_after, int32_t z_pad_before, int32_t z, int32_t z_pad_after, int32_t y_pad_before, int32_t y, int32_t y_pad_after, @@ -1728,7 +1728,7 @@ TEST(reorder_gpu_f32, bfzyx_to_bsv16_fsv16_padded) auto output = outputs.begin()->second.get_memory(); - auto get_bsv16_fsv16_index = [] (int32_t b_size, int32_t f_size, int32_t z_size, int32_t y_size, int32_t x_size, int32_t b, + auto get_bsv16_fsv16_index = [] (int32_t /* b_size */, int32_t /* f_size */, int32_t z_size, int32_t y_size, int32_t x_size, int32_t b, int32_t f_pad_before, int32_t f, int32_t f_pad_after, int32_t z_pad_before, int32_t z, int32_t z_pad_after, int32_t y_pad_before, int32_t y, int32_t y_pad_after, diff --git a/inference-engine/thirdparty/clDNN/tests/test_cases/reshape_gpu_test.cpp b/inference-engine/thirdparty/clDNN/tests/test_cases/reshape_gpu_test.cpp index a4bc8272e03..bc42597fcb8 100644 --- a/inference-engine/thirdparty/clDNN/tests/test_cases/reshape_gpu_test.cpp +++ b/inference-engine/thirdparty/clDNN/tests/test_cases/reshape_gpu_test.cpp @@ -39,7 +39,9 @@ void verify_int(const int32_t& output_value, const int32_t& value) { } template -void generic_reshape_test(format fmt, tensor const& input_size, tensor const& reshape_size, bool in_place, padding const& input_padd = padding(), padding const& output_padd = padding()) { +void generic_reshape_test(format fmt, tensor const& input_size, tensor const& reshape_size, + bool /* in_place */, padding const& input_padd = padding(), + padding const& output_padd = padding()) { const auto& engine = get_test_engine(); //allocate input memory diff --git a/inference-engine/thirdparty/clDNN/tests/test_cases/softmax_gpu_test.cpp b/inference-engine/thirdparty/clDNN/tests/test_cases/softmax_gpu_test.cpp index 3855175ecc1..f85537b49b1 100644 --- a/inference-engine/thirdparty/clDNN/tests/test_cases/softmax_gpu_test.cpp +++ b/inference-engine/thirdparty/clDNN/tests/test_cases/softmax_gpu_test.cpp @@ -489,7 +489,6 @@ TEST(softmax_gpu_yxfb_f32, normalize_f) { out_buffer[i] = get_value(output_ptr, i); } - float sum = 0; float expected_sum = 1.0f; float temp_max = 0; diff --git a/inference-engine/thirdparty/clDNN/tests/test_utils/network_test.h b/inference-engine/thirdparty/clDNN/tests/test_utils/network_test.h index 74cd673fde8..0cef155a535 100644 --- a/inference-engine/thirdparty/clDNN/tests/test_utils/network_test.h +++ b/inference-engine/thirdparty/clDNN/tests/test_utils/network_test.h @@ -241,7 +241,6 @@ VVVVF fully_connected_reference_typed_3d(VVVVF& input, VVVVF output(output_b, VVVF(input_f, VVF(output_f, VF(1)))); OutputT res; for (size_t b = 0; b < output_b; ++b) { diff --git a/inference-engine/thirdparty/clDNN/tests_core_internal/CMakeLists.txt b/inference-engine/thirdparty/clDNN/tests_core_internal/CMakeLists.txt index 828d87e6156..f4fb9fd9a70 100644 --- a/inference-engine/thirdparty/clDNN/tests_core_internal/CMakeLists.txt +++ b/inference-engine/thirdparty/clDNN/tests_core_internal/CMakeLists.txt @@ -30,7 +30,6 @@ if (NOT MSVC) SET_RAW "-Wno-error=conversion-null" "-Wno-error=type-limits" - "-Wno-error=unused-variable" ) endif () diff --git a/ngraph/cmake/external_protobuf.cmake b/ngraph/cmake/external_protobuf.cmake index dce7d8826a0..b98bd76c71b 100644 --- a/ngraph/cmake/external_protobuf.cmake +++ b/ngraph/cmake/external_protobuf.cmake @@ -25,8 +25,6 @@ set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE OFF) if (MSVC) set(protobuf_MSVC_STATIC_RUNTIME OFF CACHE BOOL "") -else() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error") endif() # This version of PROTOBUF is required by Microsoft ONNX Runtime. diff --git a/ngraph/core/builder/src/builder/make_constant.cpp b/ngraph/core/builder/src/builder/make_constant.cpp index fc7afb52778..7394298b7b9 100644 --- a/ngraph/core/builder/src/builder/make_constant.cpp +++ b/ngraph/core/builder/src/builder/make_constant.cpp @@ -24,7 +24,6 @@ namespace ngraph make_constant_from_double(const element::Type& type, const Shape& shape, double num) { auto ceil_func = [](double x) { return ceil(x); }; - auto floor_func = [](double x) { return floor(x); }; std::shared_ptr result = nullptr; switch (type) diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/embedding_segments_sum.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/embedding_segments_sum.hpp index 6d97425116e..13479b07a44 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/embedding_segments_sum.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/embedding_segments_sum.hpp @@ -25,8 +25,6 @@ namespace ngraph { const size_t indices_len = indicesShape[0]; const size_t segments_num = outShape[0]; - const size_t inDimsSize = outShape.size(); - const size_t embDimsNum = outShape.size() - 1; size_t embDepth = 1lu; for (size_t i = 1; i < outShape.size(); i++) diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/extract_image_patches.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/extract_image_patches.hpp index ee8dce97973..cbd2c3318ba 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/extract_image_patches.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/extract_image_patches.hpp @@ -39,9 +39,6 @@ namespace ngraph const int64_t OH = outShape[dimsSize - 2]; const int64_t OW = outShape[dimsSize - 1]; - int64_t ihStart = 0; - int64_t iwStart = 0; - int64_t iwStep = KW + (RW - 1) * (KW - 1); int64_t ihStep = KH + (RH - 1) * (KH - 1); @@ -51,7 +48,6 @@ namespace ngraph const int64_t IH_IW = IH * IW; const int64_t IC_IH_IW = IC * IH_IW; const int64_t IB_IC_IH_IW = IC_IH_IW * IB; - const int64_t KH_KW = KH * KW; int64_t PL = 0, PT = 0; diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/interpolate.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/interpolate.hpp index 2810ed02431..77b317b2b7a 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/interpolate.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/interpolate.hpp @@ -414,9 +414,6 @@ namespace ngraph template void InterpolateEval::linear_func(const T* input_data, T* out) { - size_t input_rank = m_input_data_shape.size(); - size_t num_of_axes = m_axes.size(); - auto info = helper.get_info_for_linear_mode(); CoordinateTransform output_transform(m_out_shape); diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/lrn.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/lrn.hpp index 06d36a208fd..ae46c9c3f4c 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/lrn.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/lrn.hpp @@ -44,7 +44,6 @@ namespace ngraph const std::vector& begin, const Shape& slice_shape) { - size_t begin_idx = begin[0]; size_t slice_size = shape_size(slice_shape); size_t rank = begin.size(); auto coord = begin; diff --git a/ngraph/core/reference/include/ngraph/runtime/reference/region_yolo.hpp b/ngraph/core/reference/include/ngraph/runtime/reference/region_yolo.hpp index 45ddeb76fdf..52252bb056a 100644 --- a/ngraph/core/reference/include/ngraph/runtime/reference/region_yolo.hpp +++ b/ngraph/core/reference/include/ngraph/runtime/reference/region_yolo.hpp @@ -93,7 +93,6 @@ namespace ngraph NGRAPH_CHECK(input_shape.size() == 4); const int batches = input_shape[0]; - const int channels = input_shape[1]; const int height = input_shape[2]; const int width = input_shape[3]; diff --git a/ngraph/core/reference/src/runtime/reference/interpolate.cpp b/ngraph/core/reference/src/runtime/reference/interpolate.cpp index 8d69870bb6b..262f545a16c 100644 --- a/ngraph/core/reference/src/runtime/reference/interpolate.cpp +++ b/ngraph/core/reference/src/runtime/reference/interpolate.cpp @@ -177,7 +177,6 @@ float InterpolateEvalHelper::get_in_coord(float coord, int64_t axis_idx) InterpolateEvalHelper::InfoForLinearMode InterpolateEvalHelper::get_info_for_linear_mode() { - std::size_t input_rank = m_input_data_shape.size(); std::size_t num_of_axes = m_axes.size(); bool is_downsample = false; for (std::size_t scale : m_scales) diff --git a/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp b/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp index edaa7148623..b016f5948e3 100644 --- a/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp +++ b/ngraph/core/reference/src/runtime/reference/non_max_suppression.cpp @@ -173,8 +173,6 @@ namespace ngraph size_t boxes_per_class = static_cast(max_output_boxes_per_class); - int64_t num_of_valid_boxes = 0; - std::vector filteredBoxes; for (int64_t batch = 0; batch < num_batches; batch++) diff --git a/ngraph/core/src/op/binary_convolution.cpp b/ngraph/core/src/op/binary_convolution.cpp index 86ef21100f5..c695fb21541 100644 --- a/ngraph/core/src/op/binary_convolution.cpp +++ b/ngraph/core/src/op/binary_convolution.cpp @@ -76,7 +76,6 @@ void op::v1::BinaryConvolution::validate_and_infer_types() const PartialShape& data_batch_shape = get_input_partial_shape(0); element::Type data_batch_et = get_input_element_type(0); const PartialShape& filters_shape = get_input_partial_shape(1); - element::Type filters_et = get_input_element_type(1); PartialShape result_shape = PartialShape::dynamic(); if (data_batch_shape.rank().is_static()) diff --git a/ngraph/core/src/op/ctc_greedy_decoder_seq_len.cpp b/ngraph/core/src/op/ctc_greedy_decoder_seq_len.cpp index 322575d1730..9bd2916df08 100644 --- a/ngraph/core/src/op/ctc_greedy_decoder_seq_len.cpp +++ b/ngraph/core/src/op/ctc_greedy_decoder_seq_len.cpp @@ -55,7 +55,6 @@ void op::v6::CTCGreedyDecoderSeqLen::validate_and_infer_types() NGRAPH_OP_SCOPE(v6_CTCGreedyDecoderSeqLen_validate_and_infer_types); const auto& logits_pshape = get_input_partial_shape(0); const auto& seq_len_pshape = get_input_partial_shape(1); - auto input_et = get_input_element_type(0); const bool logits_is_static_rank = logits_pshape.rank().is_static(); const bool seq_len_is_static_rank = seq_len_pshape.rank().is_static(); diff --git a/ngraph/core/src/op/deformable_convolution.cpp b/ngraph/core/src/op/deformable_convolution.cpp index 39c6680d2a0..e106f689b58 100644 --- a/ngraph/core/src/op/deformable_convolution.cpp +++ b/ngraph/core/src/op/deformable_convolution.cpp @@ -70,7 +70,6 @@ void op::v1::DeformableConvolution::validate_and_infer_types() const PartialShape& filters_shape = get_input_partial_shape(2); element::Type data_batch_et = get_input_element_type(0); - element::Type deformable_values_et = get_input_element_type(1); element::Type filters_et = get_input_element_type(2); if (deformable_values_shape.rank().is_static()) diff --git a/ngraph/core/src/op/depth_to_space.cpp b/ngraph/core/src/op/depth_to_space.cpp index a62859d6f33..3bc804dd1be 100644 --- a/ngraph/core/src/op/depth_to_space.cpp +++ b/ngraph/core/src/op/depth_to_space.cpp @@ -121,7 +121,6 @@ bool op::DepthToSpace::evaluate_depth_to_space(const HostTensorVector& outputs, { const auto& data = inputs[0]; const auto& out = outputs[0]; - const auto& out_shape = out->get_shape(); size_t elem_size = data->get_element_type().size(); if (data->get_partial_shape().is_dynamic()) diff --git a/ngraph/core/src/op/gather.cpp b/ngraph/core/src/op/gather.cpp index 61d0e58f377..2510711770f 100644 --- a/ngraph/core/src/op/gather.cpp +++ b/ngraph/core/src/op/gather.cpp @@ -81,7 +81,6 @@ void op::v1::Gather::validate_and_infer_types() } element::Type result_et = get_input_element_type(PARAMS); - element::Type indices_et = get_input_element_type(INDICES); const PartialShape& params_shape = get_input_partial_shape(PARAMS); const PartialShape& indices_shape = get_input_partial_shape(INDICES); diff --git a/ngraph/core/src/op/loop.cpp b/ngraph/core/src/op/loop.cpp index adfdd98ae50..abd3b3458b9 100644 --- a/ngraph/core/src/op/loop.cpp +++ b/ngraph/core/src/op/loop.cpp @@ -221,7 +221,6 @@ void op::v5::Loop::validate_and_infer_types() auto body_value = m_body->get_results().at(merged_input_description->m_body_value_index); - const auto& body_value_partial_shape = body_value->get_input_partial_shape(0); auto body_parameter = m_body->get_parameters().at(merged_input_description->m_body_parameter_index); diff --git a/ngraph/core/src/op/non_max_suppression.cpp b/ngraph/core/src/op/non_max_suppression.cpp index 5ae5b63cda3..b0c1a9c1458 100644 --- a/ngraph/core/src/op/non_max_suppression.cpp +++ b/ngraph/core/src/op/non_max_suppression.cpp @@ -695,8 +695,6 @@ std::shared_ptr namespace { - constexpr size_t boxes_port = 0; - constexpr size_t scores_port = 1; constexpr size_t max_output_boxes_port = 2; constexpr size_t iou_threshold_port = 3; constexpr size_t score_threshold_port = 4; diff --git a/ngraph/core/src/op/one_hot.cpp b/ngraph/core/src/op/one_hot.cpp index 6a7ad7ad934..4a50ae32da9 100644 --- a/ngraph/core/src/op/one_hot.cpp +++ b/ngraph/core/src/op/one_hot.cpp @@ -143,7 +143,6 @@ namespace detail const int64_t axis) { const auto& indices = input_values[0]; - const auto& depth = input_values[1]; const auto& on_value = input_values[2]; const auto& off_value = input_values[3]; diff --git a/ngraph/core/src/op/reshape.cpp b/ngraph/core/src/op/reshape.cpp index ff4d627eb2d..5ed23ff6c27 100644 --- a/ngraph/core/src/op/reshape.cpp +++ b/ngraph/core/src/op/reshape.cpp @@ -286,7 +286,6 @@ bool op::v1::Reshape::evaluate_reshape(const HostTensorVector& outputs, { // infer and set output shape if the output shape contain -1 // and zero value dimension - size_t output_rank = inputs[1]->get_shape()[0]; std::vector out_shape_val; switch (inputs[1]->get_element_type()) diff --git a/ngraph/core/src/op/reverse.cpp b/ngraph/core/src/op/reverse.cpp index 38252934fc2..20b872fb9f1 100644 --- a/ngraph/core/src/op/reverse.cpp +++ b/ngraph/core/src/op/reverse.cpp @@ -175,7 +175,6 @@ bool op::v1::Reverse::evaluate_reverse(const HostTensorVector& outputs, const HostTensorVector& inputs) const { AxisSet axes{}; - size_t axes_rank = inputs[1]->get_element_count(); if (get_mode() == op::v1::Reverse::Mode::INDEX) { switch (inputs[1]->get_element_type()) diff --git a/ngraph/core/src/op/shuffle_channels.cpp b/ngraph/core/src/op/shuffle_channels.cpp index b25e6db2bb7..77e4ef2e957 100644 --- a/ngraph/core/src/op/shuffle_channels.cpp +++ b/ngraph/core/src/op/shuffle_channels.cpp @@ -166,7 +166,6 @@ bool op::ShuffleChannels::evaluate_shuffle_channels(const HostTensorVector& outp { reshaped_out_shape[3] *= ds[i]; } - size_t data_size = shape_size(data_shape) * elem_size; // first reshape from data_shape to reshaped_out_shape is skipped since it doesn't affect // out diff --git a/ngraph/core/src/op/space_to_batch.cpp b/ngraph/core/src/op/space_to_batch.cpp index cc0c294213c..2d61fe3c3a5 100644 --- a/ngraph/core/src/op/space_to_batch.cpp +++ b/ngraph/core/src/op/space_to_batch.cpp @@ -149,7 +149,6 @@ bool ngraph::op::v1::SpaceToBatch::evaluate_space_to_batch(const HostTensorVecto { const auto& data = inputs[0]; const auto& out = outputs[0]; - const auto& out_shape = out->get_shape(); size_t elem_size = data->get_element_type().size(); if (data->get_partial_shape().is_dynamic()) diff --git a/ngraph/core/src/op/space_to_depth.cpp b/ngraph/core/src/op/space_to_depth.cpp index 77439178d82..7df396a6b28 100644 --- a/ngraph/core/src/op/space_to_depth.cpp +++ b/ngraph/core/src/op/space_to_depth.cpp @@ -118,7 +118,6 @@ bool ngraph::op::v0::SpaceToDepth::evaluate_space_to_depth(const HostTensorVecto { const auto& data = inputs[0]; const auto& out = outputs[0]; - const auto& out_shape = out->get_shape(); size_t elem_size = data->get_element_type().size(); if (data->get_partial_shape().is_dynamic()) diff --git a/ngraph/frontend/onnx_import/src/op/loop.cpp b/ngraph/frontend/onnx_import/src/op/loop.cpp index f20f03c683a..eec315049d5 100644 --- a/ngraph/frontend/onnx_import/src/op/loop.cpp +++ b/ngraph/frontend/onnx_import/src/op/loop.cpp @@ -141,7 +141,6 @@ namespace ngraph for (size_t i = loop_carried_dependencies.size() + 1; i < body_outputs.size(); ++i) { - const auto& body_output_shape = body_outputs[i].get_partial_shape(); body_outputs[i] = std::make_shared( body_outputs[i], concat_axis_const); } diff --git a/ngraph/frontend/onnx_import/src/op/lstm.cpp b/ngraph/frontend/onnx_import/src/op/lstm.cpp index ba2e571aeaf..f25ab909fbc 100644 --- a/ngraph/frontend/onnx_import/src/op/lstm.cpp +++ b/ngraph/frontend/onnx_import/src/op/lstm.cpp @@ -67,8 +67,6 @@ namespace ngraph const auto& ng_inputs = node.get_ng_inputs(); // We have input, output, forget and cell gates constexpr std::size_t gates_count{4}; - // Peepholes add additional connections to input, output and forget gates. - constexpr std::size_t peepholes_count{3}; // ----- Mandatory inputs ------ // Packed input sequences. diff --git a/ngraph/test/backend/fused_op.in.cpp b/ngraph/test/backend/fused_op.in.cpp index 0ea05077a87..93ac5b0d5c4 100644 --- a/ngraph/test/backend/fused_op.in.cpp +++ b/ngraph/test/backend/fused_op.in.cpp @@ -1585,7 +1585,6 @@ NGRAPH_TEST(${BACKEND_NAME}, DISABLED_lstm_cell__bias_peepholes_clip_input_forge const size_t hidden_size = 3; const size_t gates_count = 4; const float clip_threshold = 3.5f; - bool input_forget = true; const auto X = make_shared(element::f32, Shape{batch_size, input_size}); const auto W = @@ -1684,7 +1683,6 @@ NGRAPH_TEST(${BACKEND_NAME}, DISABLED_lstm_cell__activaction_functions) const size_t hidden_size = 3; const size_t gates_count = 4; const float clip_threshold = 3.5f; - bool input_forget = true; vector activations{"sigmoid", "tanh", "hardsigmoid"}; vector activation_alpha{0.f, 0.f, 1.8345f}; vector activation_beta{0.f, 0.f, 3.05f}; diff --git a/ngraph/test/backend/group_convolution.in.cpp b/ngraph/test/backend/group_convolution.in.cpp index 5061e807779..d371326c80c 100644 --- a/ngraph/test/backend/group_convolution.in.cpp +++ b/ngraph/test/backend/group_convolution.in.cpp @@ -46,7 +46,6 @@ NGRAPH_TEST(${BACKEND_NAME}, dyn_group_convolution_backprop_data) auto dilations = Strides{1, 1}; auto padding_begin = CoordinateDiff{0, 0}; auto padding_end = CoordinateDiff{0, 0}; - size_t groups = 3; auto conv_bprop_data = make_shared( data_batch, filters, deltas, strides, padding_begin, padding_end, dilations); diff --git a/ngraph/test/backend/region_yolo.in.cpp b/ngraph/test/backend/region_yolo.in.cpp index c477733892e..1761fd256e2 100644 --- a/ngraph/test/backend/region_yolo.in.cpp +++ b/ngraph/test/backend/region_yolo.in.cpp @@ -39,7 +39,6 @@ NGRAPH_TEST(${BACKEND_NAME}, region_yolo_v2_caffe) const size_t channels = 125; const size_t width = 13; const size_t height = 13; - const size_t count = width * height * channels; const std::vector mask{0, 1, 2}; Shape input_shape{batch, channels, height, width}; @@ -69,7 +68,6 @@ NGRAPH_TEST(${BACKEND_NAME}, region_yolo_v3_mxnet) const std::vector mask{0, 1, 2}; Shape shape{batch, channels, height, width}; - const auto count = shape_size(shape); const auto A = make_shared(element::f32, shape); const auto R = make_shared(A, coords, classes, num, false, mask, 1, 3); diff --git a/ngraph/test/backend/roi_pooling.in.cpp b/ngraph/test/backend/roi_pooling.in.cpp index e02365d574d..d895a71e1a9 100644 --- a/ngraph/test/backend/roi_pooling.in.cpp +++ b/ngraph/test/backend/roi_pooling.in.cpp @@ -194,7 +194,6 @@ NGRAPH_TEST(${BACKEND_NAME}, roi_pooling_2x2_bilinear) 0.75f, 0.8f}; - const auto count = shape_size(output_shape); const vector expected_vect = {1.225f, 1.645f, 4.585f, diff --git a/ngraph/test/onnx/onnx_import.in.cpp b/ngraph/test/onnx/onnx_import.in.cpp index 846b27a8100..be68bc70094 100644 --- a/ngraph/test/onnx/onnx_import.in.cpp +++ b/ngraph/test/onnx/onnx_import.in.cpp @@ -1503,7 +1503,6 @@ NGRAPH_TEST(${BACKEND_NAME}, onnx_resize11_up_scales_linear_asymmetric) const Shape expected_output_shape{2, 1, 4, 8}; auto test_case = test::TestCase(function); - const size_t input_size = 8; std::vector input_data{1.0f, 3.0f, 4.0f, 8.0f, 6.0f, 2.0f, 7.0f, 11.0f}; test_case.add_input(input_data); test_case.add_expected_output( diff --git a/ngraph/test/runtime/interpreter/evaluates_map.cpp b/ngraph/test/runtime/interpreter/evaluates_map.cpp index efca1a69756..d1c0bc065e8 100644 --- a/ngraph/test/runtime/interpreter/evaluates_map.cpp +++ b/ngraph/test/runtime/interpreter/evaluates_map.cpp @@ -484,10 +484,6 @@ namespace constexpr size_t boxes_port = 0; constexpr size_t scores_port = 1; - constexpr size_t max_output_boxes_port = 2; - constexpr size_t iou_threshold_port = 3; - constexpr size_t score_threshold_port = 4; - constexpr size_t soft_nms_sigma_port = 5; PartialShape infer_selected_indices_shape(const std::vector>& inputs,