Added suggest-override flag (#6631)

This commit is contained in:
Ilya Lavrenov 2021-07-16 18:55:05 +03:00 committed by GitHub
parent 22fddb4315
commit bc36425381
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
147 changed files with 406 additions and 548 deletions

View File

@ -42,50 +42,6 @@ endforeach()
# Build
#
function(build_ngraph)
function(ngraph_set option value)
if(NOT DEFINED ${option})
set(${option} ${value} CACHE BOOL "" FORCE)
endif()
endfunction()
if(ENABLE_TESTS AND NOT ANDROID)
ngraph_set(NGRAPH_UNIT_TEST_ENABLE ON)
else()
ngraph_set(NGRAPH_UNIT_TEST_ENABLE OFF)
endif()
if(NOT (ANDROID OR WINDOWS_STORE OR (MSVC AND (ARM OR AARCH64)) ))
ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE ON)
ngraph_set(NGRAPH_PDPD_FRONTEND_ENABLE ON)
else()
ngraph_set(NGRAPH_ONNX_IMPORT_ENABLE OFF)
ngraph_set(NGRAPH_PDPD_FRONTEND_ENABLE OFF)
endif()
if(CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$")
ie_add_compiler_flags(-Wno-error=uninitialized -Wno-error=literal-conversion)
elseif(UNIX)
ie_add_compiler_flags(-Wno-error=maybe-uninitialized -Wno-error=return-type)
endif()
# WA for GCC 7.0
if (UNIX)
ie_add_compiler_flags(-Wno-error=return-type -Wno-undef)
elseif(WIN32)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4308 /wd4146 /wd4703 /wd4244 /wd4819")
endif()
if(ENABLE_LTO)
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION_RELEASE ON)
endif()
ie_cpack_add_component(ngraph REQUIRED)
ie_cpack_add_component(ngraph_dev REQUIRED DEPENDS ngraph)
add_subdirectory(ngraph)
endfunction()
function(openvino_developer_export_targets)
cmake_parse_arguments(EXPORT "" "COMPONENT" "TARGETS" ${ARGN})
@ -118,9 +74,12 @@ function(openvino_developer_export_targets)
"A list of OpenVINO exported components" FORCE)
endfunction()
ie_cpack_add_component(ngraph REQUIRED)
ie_cpack_add_component(ngraph_dev REQUIRED DEPENDS ngraph)
add_subdirectory(thirdparty)
add_subdirectory(openvino)
build_ngraph()
add_subdirectory(ngraph)
add_subdirectory(inference-engine)
# for Template plugin

View File

@ -3,6 +3,7 @@
#
include(ProcessorCount)
include(CheckCXXCompilerFlag)
#
# Disables deprecated warnings generation
@ -292,9 +293,14 @@ else()
elseif(UNIX)
ie_add_compiler_flags(-Wuninitialized -Winit-self)
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
ie_add_compiler_flags(-Wno-error=switch)
ie_add_compiler_flags(-Wno-error=switch
-Winconsistent-missing-override)
else()
ie_add_compiler_flags(-Wmaybe-uninitialized)
check_cxx_compiler_flag("-Wsuggest-override" SUGGEST_OVERRIDE_SUPPORTED)
if(SUGGEST_OVERRIDE_SUPPORTED)
set(CMAKE_CXX_FLAGS "-Wsuggest-override ${CMAKE_CXX_FLAGS}")
endif()
endif()
endif()
@ -316,3 +322,27 @@ else()
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gc-sections -Wl,--exclude-libs,ALL")
endif()
endif()
# Links provided libraries and include their INTERFACE_INCLUDE_DIRECTORIES as SYSTEM
function(link_system_libraries TARGET_NAME)
set(MODE PRIVATE)
foreach(arg IN LISTS ARGN)
if(arg MATCHES "(PRIVATE|PUBLIC|INTERFACE)")
set(MODE ${arg})
else()
if(TARGET "${arg}")
target_include_directories(${TARGET_NAME}
SYSTEM ${MODE}
$<TARGET_PROPERTY:${arg},INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:${arg},INTERFACE_SYSTEM_INCLUDE_DIRECTORIES>
)
endif()
target_link_libraries(${TARGET_NAME}
${MODE}
${arg}
)
endif()
endforeach()
endfunction()

View File

@ -6,7 +6,7 @@ include(CheckCXXCompilerFlag)
if (ENABLE_SANITIZER)
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize=address")
CHECK_CXX_COMPILER_FLAG("-fsanitize-recover=address" SANITIZE_RECOVER_ADDRESS_SUPPORTED)
check_cxx_compiler_flag("-fsanitize-recover=address" SANITIZE_RECOVER_ADDRESS_SUPPORTED)
if (SANITIZE_RECOVER_ADDRESS_SUPPORTED)
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize-recover=address")
endif()
@ -18,7 +18,7 @@ if (ENABLE_UB_SANITIZER)
# TODO: Remove -fno-sanitize=null as thirdparty/ocl/clhpp_headers UBSAN compatibility resolved:
# https://github.com/KhronosGroup/OpenCL-CLHPP/issues/17
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize=undefined -fno-sanitize=null")
CHECK_CXX_COMPILER_FLAG("-fsanitize-recover=undefined" SANITIZE_RECOVER_UNDEFINED_SUPPORTED)
check_cxx_compiler_flag("-fsanitize-recover=undefined" SANITIZE_RECOVER_UNDEFINED_SUPPORTED)
if (SANITIZE_RECOVER_UNDEFINED_SUPPORTED)
set(SANITIZER_COMPILER_FLAGS "${SANITIZER_COMPILER_FLAGS} -fsanitize-recover=undefined")
endif()

View File

@ -114,6 +114,25 @@ ie_option (ENABLE_SYSTEM_PUGIXML "use the system copy of pugixml" OFF)
ie_option (ENABLE_CPU_DEBUG_CAPS "enable CPU debug capabilities at runtime" OFF)
if(ANDROID OR WINDOWS_STORE OR (MSVC AND (ARM OR AARCH64)))
set(protoc_available OFF)
else()
set(protoc_available ON)
endif()
ie_dependent_option(NGRAPH_ONNX_IMPORT_ENABLE "Enable ONNX importer" ON "protoc_available" OFF)
ie_dependent_option(NGRAPH_ONNX_EDITOR_ENABLE "Enable ONNX Editor" ON "NGRAPH_ONNX_IMPORT_ENABLE" OFF)
ie_dependent_option(NGRAPH_PDPD_FRONTEND_ENABLE "Enable PaddlePaddle FrontEnd" ON "protoc_available" OFF)
ie_dependent_option(NGRAPH_USE_PROTOBUF_LITE "Compiles and links with protobuf-lite" OFF
"NGRAPH_ONNX_IMPORT_ENABLE OR NGRAPH_PDPD_FRONTEND_ENABLE" OFF)
ie_dependent_option(NGRAPH_UNIT_TEST_ENABLE "Enables ngraph unit tests" ON "ENABLE_TESTS;NOT ANDROID" OFF)
ie_dependent_option(NGRAPH_UNIT_TEST_BACKENDS_ENABLE "Control the building of unit tests using backends" ON
"NGRAPH_UNIT_TEST_ENABLE" OFF)
option(NGRAPH_DEBUG_ENABLE "Enable output for NGRAPH_DEBUG statements" OFF)
# WA for ngraph python build on Windows debug
list(REMOVE_ITEM IE_OPTIONS NGRAPH_UNIT_TEST_ENABLE NGRAPH_UNIT_TEST_BACKENDS_ENABLE)
#
# Process featues
#

View File

@ -13,7 +13,7 @@ set_and_check(IE_MAIN_SOURCE_DIR "@IE_MAIN_SOURCE_DIR@") # HDDL
# Variables to export in plugin's projects
set(ie_options "@IE_OPTIONS@;CMAKE_BUILD_TYPE;CMAKE_SKIP_RPATH;")
set(ie_options "@IE_OPTIONS@;CMAKE_BUILD_TYPE;CMAKE_SKIP_RPATH")
list(APPEND ie_options CMAKE_CXX_COMPILER_LAUNCHER CMAKE_C_COMPILER_LAUNCHER)
file(TO_CMAKE_PATH "${CMAKE_CURRENT_LIST_DIR}" cache_path)
@ -73,6 +73,9 @@ if(NOT MSVC)
ie_add_compiler_flags(-Wno-error=unused-variable)
if(CMAKE_COMPILER_IS_GNUCXX)
ie_add_compiler_flags(-Wno-error=unused-but-set-variable)
if(SUGGEST_OVERRIDE_SUPPORTED)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-suggest-override")
endif()
endif()
endif()

View File

@ -30,7 +30,7 @@ public:
* @param ptrNumMemoryBytes pointer to specific number of memory bytes
* @return none.
*/
virtual void GetFileInfo(const char* fileName, uint32_t numArrayToFindSize, uint32_t* ptrNumArrays, uint32_t* ptrNumMemoryBytes);
void GetFileInfo(const char* fileName, uint32_t numArrayToFindSize, uint32_t* ptrNumArrays, uint32_t* ptrNumMemoryBytes) override;
/**
* @brief Load Kaldi ARK speech feature vector file
@ -43,8 +43,8 @@ public:
* @param ptrNumBytesPerElement pointer to number bytes per element (size of float by default)
* @return none.
*/
virtual void LoadFile(const char* fileName, uint32_t arrayIndex, std::string& ptrName, std::vector<uint8_t>& memory, uint32_t* ptrNumRows,
uint32_t* ptrNumColumns, uint32_t* ptrNumBytesPerElement);
void LoadFile(const char* fileName, uint32_t arrayIndex, std::string& ptrName, std::vector<uint8_t>& memory, uint32_t* ptrNumRows, uint32_t* ptrNumColumns,
uint32_t* ptrNumBytesPerElement) override;
/**
* @brief Save Kaldi ARK speech feature vector file
@ -56,7 +56,7 @@ public:
* @param numColumns number of columns
* @return none.
*/
virtual void SaveFile(const char* fileName, bool shouldAppend, std::string name, void* ptrMemory, uint32_t numRows, uint32_t numColumns);
void SaveFile(const char* fileName, bool shouldAppend, std::string name, void* ptrMemory, uint32_t numRows, uint32_t numColumns) override;
};
/// @brief Responsible to work with .npz files
@ -70,7 +70,7 @@ public:
* @param ptrNumMemoryBytes pointer to specific number of memory bytes
* @return none.
*/
virtual void GetFileInfo(const char* fileName, uint32_t numArrayToFindSize, uint32_t* ptrNumArrays, uint32_t* ptrNumMemoryBytes);
void GetFileInfo(const char* fileName, uint32_t numArrayToFindSize, uint32_t* ptrNumArrays, uint32_t* ptrNumMemoryBytes) override;
/**
* @brief Load Numpy* uncompressed NPZ speech feature vector file
@ -83,8 +83,8 @@ public:
* @param ptrNumBytesPerElement pointer to number bytes per element (size of float by default)
* @return none.
*/
virtual void LoadFile(const char* fileName, uint32_t arrayIndex, std::string& ptrName, std::vector<uint8_t>& memory, uint32_t* ptrNumRows,
uint32_t* ptrNumColumns, uint32_t* ptrNumBytesPerElement);
void LoadFile(const char* fileName, uint32_t arrayIndex, std::string& ptrName, std::vector<uint8_t>& memory, uint32_t* ptrNumRows, uint32_t* ptrNumColumns,
uint32_t* ptrNumBytesPerElement) override;
/**
* @brief Save Numpy* uncompressed NPZ speech feature vector file
@ -96,5 +96,5 @@ public:
* @param numColumns number of columns
* @return none.
*/
virtual void SaveFile(const char* fileName, bool shouldAppend, std::string name, void* ptrMemory, uint32_t numRows, uint32_t numColumns);
void SaveFile(const char* fileName, bool shouldAppend, std::string name, void* ptrMemory, uint32_t numRows, uint32_t numColumns) override;
};

View File

@ -46,8 +46,10 @@ target_link_libraries(${TARGET_NAME} PRIVATE mkldnn
inference_engine_lp_transformations)
target_include_directories(${TARGET_NAME} PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}
$<TARGET_PROPERTY:mkldnn,INCLUDE_DIRECTORIES>)
${CMAKE_CURRENT_SOURCE_DIR})
target_include_directories(${TARGET_NAME} SYSTEM PRIVATE
$<TARGET_PROPERTY:mkldnn,INCLUDE_DIRECTORIES>)
# Cross compiled function
# TODO: The same for proposal, proposalONNX, topk
@ -64,15 +66,16 @@ ie_add_api_validator_post_build_step(TARGET ${TARGET_NAME})
# add test object library
add_library(${TARGET_NAME}_obj OBJECT ${SOURCES} ${HEADERS})
target_link_libraries(${TARGET_NAME}_obj PUBLIC mkldnn)
link_system_libraries(${TARGET_NAME}_obj PUBLIC mkldnn)
target_include_directories(${TARGET_NAME}_obj PRIVATE $<TARGET_PROPERTY:inference_engine_preproc_s,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:inference_engine_transformations,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:openvino::itt,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:inference_engine_lp_transformations,INTERFACE_INCLUDE_DIRECTORIES>
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}
$<TARGET_PROPERTY:openvino::conditional_compilation,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:mkldnn,INCLUDE_DIRECTORIES>)
$<TARGET_PROPERTY:openvino::conditional_compilation,INTERFACE_INCLUDE_DIRECTORIES>)
target_include_directories(${TARGET_NAME}_obj SYSTEM PUBLIC $<TARGET_PROPERTY:mkldnn,INCLUDE_DIRECTORIES>)
set_ie_threading_interface_for(${TARGET_NAME}_obj)

View File

@ -13,17 +13,17 @@ constexpr size_t channelsPos = 1lu;
class PlainFormatCreator : public TensorDescCreator {
public:
virtual InferenceEngine::TensorDesc createDesc(const InferenceEngine::Precision& precision, const InferenceEngine::SizeVector& srcDims) const {
InferenceEngine::TensorDesc createDesc(const InferenceEngine::Precision& precision, const InferenceEngine::SizeVector& srcDims) const override {
SizeVector order(srcDims.size());
std::iota(order.begin(), order.end(), 0);
return TensorDesc(precision, srcDims, {srcDims, order});
}
virtual size_t getMinimalRank() const { return 0lu; }
size_t getMinimalRank() const override { return 0lu; }
};
class PerChannelCreator : public TensorDescCreator {
public:
virtual InferenceEngine::TensorDesc createDesc(const InferenceEngine::Precision &precision, const InferenceEngine::SizeVector &srcDims) const {
InferenceEngine::TensorDesc createDesc(const InferenceEngine::Precision &precision, const InferenceEngine::SizeVector &srcDims) const override {
SizeVector order(srcDims.size());
std::iota(order.begin(), order.end(), 0);
SizeVector blkDims = srcDims;
@ -39,13 +39,13 @@ public:
return TensorDesc(precision, srcDims, {blkDims, order});
}
virtual size_t getMinimalRank() const { return 3lu; }
size_t getMinimalRank() const override { return 3lu; }
};
class ChannelBlockedCreator : public TensorDescCreator {
public:
ChannelBlockedCreator(size_t blockSize) : _blockSize(blockSize) {}
virtual InferenceEngine::TensorDesc createDesc(const InferenceEngine::Precision& precision, const InferenceEngine::SizeVector& srcDims) const {
InferenceEngine::TensorDesc createDesc(const InferenceEngine::Precision& precision, const InferenceEngine::SizeVector& srcDims) const override {
if (srcDims.size() < 2) {
IE_THROW() << "Can't create blocked tensor descriptor!";
}
@ -60,7 +60,7 @@ public:
return TensorDesc(precision, srcDims, {blkDims, order});
}
virtual size_t getMinimalRank() const { return 3lu; }
size_t getMinimalRank() const override { return 3lu; }
private:
size_t _blockSize;

View File

@ -149,7 +149,7 @@ struct jit_has_subnormals_base::reg<cpu_isa_t::sse41> {
template<cpu_isa_t isa>
struct jit_has_subnormals : public jit_has_subnormals_base {
void generate() final {
void generate() override final { // NOLINT
size_t const vlen = reg<isa>::length;
const int sh_bits = std::ilogb(vlen);

View File

@ -72,6 +72,11 @@ file(GLOB_RECURSE legacy_tests
set_source_files_properties(${legacy_tests} PROPERTIES INCLUDE_DIRECTORIES
$<TARGET_PROPERTY:inference_engine_legacy,INTERFACE_INCLUDE_DIRECTORIES>)
if(SUGGEST_OVERRIDE_SUPPORTED)
set_source_files_properties(${CMAKE_CURRENT_SOURCE_DIR}/caching_test.cpp
PROPERTIES COMPILE_OPTIONS -Wno-suggest-override)
endif()
include(CMakeParseArguments)
#

View File

@ -25,9 +25,6 @@ using namespace InferenceEngine;
class NGraphReaderTests : public CommonTestUtils::TestsCommon {
protected:
void TearDown() override {}
void SetUp() override {}
void compareIRs(const std::string& modelV10, const std::string& oldModel, size_t weightsSize = 0, const std::function<void(Blob::Ptr&)>& fillBlob = {}) {
Core ie;
Blob::Ptr weights;

View File

@ -17,10 +17,7 @@ class PreallocatorTests: public ::testing::Test {
protected:
std::vector<float> mybuf;
virtual void TearDown() {
}
virtual void SetUp() {
void SetUp() override {
mybuf.resize(10);
allocator = details::make_pre_allocator(&*mybuf.begin(), mybuf.size());
}

View File

@ -56,7 +56,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
FakeQuantizeDecompositionBasicParams basic_params;
std::pair<float, float> input_ranges_values;
bool should_be_decompos;

View File

@ -4,11 +4,6 @@
set(TARGET_NAME subgraphsDumperTests)
list(APPEND DEPENDENCIES
unitTestUtils
ngraph
)
addIeTargetTest(
NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
@ -18,7 +13,7 @@ addIeTargetTest(
$<TARGET_PROPERTY:inference_engine,INTERFACE_INCLUDE_DIRECTORIES>
LINK_LIBRARIES
PRIVATE
unitTestUtils
funcTestUtils
ngraph
pugixml::static
ADD_CPPLINT

View File

@ -17,7 +17,7 @@ using ngraph::element::Type_t;
class ConvolutionMatcherTest : public ::testing::Test {
protected:
void SetUp() {
void SetUp() override {
matcher = SubgraphsDumper::ConvolutionsMatcher();
op_info = LayerTestsUtils::OPInfo();
}

View File

@ -13,7 +13,7 @@ using ngraph::element::Type_t;
class SingleOpMatcherTest : public ::testing::Test {
protected:
void SetUp() {
void SetUp() override {
matcher = SubgraphsDumper::SingleOpMatcher();
op_info = LayerTestsUtils::OPInfo();
}

View File

@ -12,7 +12,7 @@ using ngraph::element::Type_t;
class MatcherConfigTest : public ::testing::Test {
protected:
void SetUp() {
void SetUp() override {
const auto const1 = std::make_shared<Constant>(Type_t::f32, Shape({5, 5}), 1);
const auto const2 = std::make_shared<Constant>(Type_t::f32, Shape({5, 5}), 2);
node = std::make_shared<v1::Add>(const1, const2);

View File

@ -32,7 +32,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
SizeVector ie_shape;
std::tie(inPrc, ie_shape, targetDevice) = this->GetParam();

View File

@ -32,7 +32,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
LayerTestsDefinitions::EltwiseTestParams basicParamsSet;
CPUSpecificParams cpuParams;
std::tie(basicParamsSet, cpuParams) = this->GetParam();

View File

@ -67,7 +67,7 @@ protected:
ASSERT_TRUE(foundConv) << "Can't find Convolution node";
}
void SetUp() {
void SetUp() override {
groupConvLayerTestParamsSet basicParamsSet;
CPUSpecificParams cpuParams;
fusingSpecificParams fusingParams;

View File

@ -49,7 +49,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
groupConvBackpropDataLayerTestParamsSet basicParamsSet;
CPUSpecificParams cpuParams;
fusingSpecificParams fusingParams;

View File

@ -41,7 +41,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
CPUSpecificParams cpuParams;
LayerTestsDefinitions::GRUCellParams basicParamsSet;
std::map<std::string, std::string> additionalConfig;

View File

@ -41,7 +41,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
LayerTestsDefinitions::GRUSequenceParams basicParamsSet;
CPUSpecificParams cpuParams;
std::map<std::string, std::string> additionalConfig;
@ -142,7 +142,7 @@ protected:
}
}
void GenerateInputs() {
void GenerateInputs() override {
for (const auto &input : executableNetwork.GetInputsInfo()) {
const auto &info = input.second;
auto blob = GenerateInput(*info);

View File

@ -46,7 +46,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
LayerTestsDefinitions::InterpolateLayerTestParams basicParamsSet;
CPUSpecificParams cpuParams;
fusingSpecificParams fusingParams;

View File

@ -41,7 +41,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
LayerTestsDefinitions::LSTMCellParams basicParamsSet;
CPUSpecificParams cpuParams;
std::map<std::string, std::string> additionalConfig;

View File

@ -42,7 +42,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
LayerTestsDefinitions::LSTMSequenceParams basicParamsSet;
CPUSpecificParams cpuParams;
std::map<std::string, std::string> additionalConfig;
@ -149,7 +149,7 @@ protected:
}
}
void GenerateInputs() {
void GenerateInputs() override {
for (const auto &input : executableNetwork.GetInputsInfo()) {
const auto &info = input.second;
auto blob = GenerateInput(*info);

View File

@ -33,7 +33,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
LayerTestsDefinitions::padLayerTestParamsSet basicParamsSet;
CPUSpecificParams cpuParams;
std::tie(basicParamsSet, cpuParams) = this->GetParam();

View File

@ -37,7 +37,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
poolLayerTestParamsSet basicParamsSet;
CPUSpecificParams cpuParams;
fusingSpecificParams fusingParams;

View File

@ -41,7 +41,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
CPUSpecificParams cpuParams;
LayerTestsDefinitions::RNNCellParams basicParamsSet;
std::map<std::string, std::string> additionalConfig;

View File

@ -41,7 +41,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
LayerTestsDefinitions::RNNSequenceParams basicParamsSet;
CPUSpecificParams cpuParams;
std::map<std::string, std::string> additionalConfig;
@ -119,7 +119,7 @@ protected:
}
}
void GenerateInputs() {
void GenerateInputs() override {
for (const auto &input : executableNetwork.GetInputsInfo()) {
const auto &info = input.second;
auto blob = GenerateInput(*info);

View File

@ -34,7 +34,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
LayerTestsDefinitions::shuffleChannelsLayerTestParamsSet basicParamsSet;
CPUSpecificParams cpuParams;
std::tie(basicParamsSet, cpuParams) = this->GetParam();

View File

@ -60,7 +60,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
threshold = 0.1f;
std::vector<std::vector<size_t>> inputShapes;

View File

@ -46,7 +46,7 @@ public:
}
protected:
void SetUp() {
void SetUp() override {
threshold = 0.1f;
Shape inputShape;

View File

@ -15,7 +15,7 @@ namespace CPULayerTestsDefinitions {
class InputNoReorderEltwiseBF16 : virtual public LayerTestsUtils::LayerTestsCommon,
public CPUTestsBase {
protected:
void SetUp() {
void SetUp() override {
auto netPrecision = inPrc = Precision::FP32;
outPrc = Precision::BF16;
targetDevice = CommonTestUtils::DEVICE_CPU;

View File

@ -43,7 +43,7 @@ public:
return result.str();
}
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const {
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override {
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc());
blob->allocate();

View File

@ -56,7 +56,7 @@ public:
return result.str();
}
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const {
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override {
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc());
blob->allocate();
@ -122,7 +122,7 @@ public:
return result.str();
}
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const {
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override {
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), inputDataMax - inputDataMin, inputDataMin,
1 / inputDataResolution);
}

View File

@ -59,7 +59,7 @@ public:
return result.str();
}
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const {
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override {
return FuncTestUtils::createAndFillBlob(info.getTensorDesc(), inputDataMax - inputDataMin, inputDataMin, 1 / inputDataResolution);
}

View File

@ -51,7 +51,7 @@ public:
return result.str();
}
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const {
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override {
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc());
blob->allocate();

View File

@ -48,7 +48,7 @@ public:
return result.str();
}
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const {
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override {
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc());
blob->allocate();
@ -123,7 +123,7 @@ public:
return result.str();
}
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const {
InferenceEngine::Blob::Ptr GenerateInput(const InferenceEngine::InputInfo& info) const override {
InferenceEngine::Blob::Ptr blob = make_blob_with_precision(info.getTensorDesc());
blob->allocate();

View File

@ -25,7 +25,7 @@ class RemoteBlob_Test : public CommonTestUtils::TestsCommon {
protected:
std::shared_ptr<ngraph::Function> fn_ptr;
virtual void SetUp() {
void SetUp() override {
fn_ptr = ngraph::builder::subgraph::makeSplitMultiConvConcat();
}
};

View File

@ -21,7 +21,7 @@ protected:
std::vector<std::string> outputsToAdd;
std::string deviceName;
void SetUp();
void SetUp() override;
public:
static std::string getTestCaseName(const testing::TestParamInfo<addOutputsParams> &obj);
};

View File

@ -21,7 +21,7 @@ protected:
std::vector<std::string> statesToQuery;
std::string deviceName;
void SetUp();
void SetUp() override;
InferenceEngine::ExecutableNetwork PrepareNetwork();
public:
static std::string getTestCaseName(const testing::TestParamInfo<memoryStateParams> &obj);

View File

@ -20,7 +20,7 @@ private:
// vector which is later used for comparison
struct exec_graph_walker : pugi::xml_tree_walker {
std::vector<pugi::xml_node> nodes;
virtual bool for_each(pugi::xml_node &node);
bool for_each(pugi::xml_node &node) override;
};
// compare_docs() helper

View File

@ -51,13 +51,13 @@ function(add_common_utils ADD_TARGET_NAME)
target_include_directories(${ADD_TARGET_NAME}
PUBLIC
${IE_TESTS_ROOT}/ie_test_utils
$<TARGET_PROPERTY:inference_engine,INTERFACE_INCLUDE_DIRECTORIES>
PRIVATE
$<TARGET_PROPERTY:inference_engine_legacy,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:inference_engine_plugin_api,INTERFACE_INCLUDE_DIRECTORIES>
$<TARGET_PROPERTY:inference_engine_transformations,INTERFACE_INCLUDE_DIRECTORIES>
)
target_include_directories(${ADD_TARGET_NAME} SYSTEM PUBLIC ${IE_TESTS_ROOT}/ie_test_utils)
target_compile_definitions(${ADD_TARGET_NAME} PUBLIC ${ARGN})
endfunction()

View File

@ -2,6 +2,10 @@
# SPDX-License-Identifier: Apache-2.0
#
if(SUGGEST_OVERRIDE_SUPPORTED)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-suggest-override")
endif()
set(TARGET_NAME unitTestUtils)
add_subdirectory(mocks/mock_engine)

View File

@ -374,7 +374,8 @@ std::shared_ptr<ngraph::Function> ConcatFunction::getOriginalWithSplitedIntermed
Output<Node> lastOutput = intermediateOp->output(1);
if (addConvolution) {
auto weights = ngraph::opset1::Constant::create(
precision, ngraph::Shape{ inputShape[1].get_length() / numSplit, inputShape[1].get_length() / numSplit, 1, 1 }, { 1 });
precision, ngraph::Shape{ static_cast<size_t>(inputShape[1].get_length() / numSplit),
static_cast<size_t>(inputShape[1].get_length() / numSplit), 1, 1 }, { 1 });
auto convolution = std::make_shared<ngraph::opset1::Convolution>(
intermediateOp->output(1),
weights,
@ -1260,7 +1261,8 @@ std::shared_ptr<ngraph::Function> ConcatFunction::getReferenceWithSplitedInterme
if (addConvolution) {
auto weights = ngraph::opset1::Constant::create(
precision,
ngraph::Shape{ inputShape[1].get_length() / numSplit, inputShape[1].get_length() / numSplit, 1, 1 }, { 1 });
ngraph::Shape{ static_cast<size_t>(inputShape[1].get_length() / numSplit),
static_cast<size_t>(inputShape[1].get_length() / numSplit), 1, 1 }, { 1 });
auto convolution = std::make_shared<ngraph::opset1::Convolution>(
lastDequantization2,

View File

@ -12,10 +12,12 @@ addIeTargetTest(
OBJECT_FILES
$<TARGET_OBJECTS:MKLDNNPlugin_obj>
LINK_LIBRARIES
unitTestUtils
gtest
gtest_main
mkldnn
inference_engine_transformations
inference_engine_lp_transformations
inference_engine_s
ADD_CPPLINT
LABELS
CPU

View File

@ -8,11 +8,9 @@ if (NOT NGRAPH_USE_PROTOBUF_LITE)
addIeTargetTest(
NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
DEPENDENCIES
ngraph
onnx_importer
LINK_LIBRARIES
unitTestUtils
gtest
gtest_main
onnx_importer
DEFINES
ONNX_MODELS_DIR=\"${CMAKE_CURRENT_SOURCE_DIR}/models\"

View File

@ -8,9 +8,15 @@ addIeTargetTest(
NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
LINK_LIBRARIES
unitTestUtils
gmock
commonTestUtils_s
GNAPlugin_test_static
ADD_CPPLINT
LABELS
GNA
)
if(SUGGEST_OVERRIDE_SUPPORTED)
set_source_files_properties(gna_model_serial_test.cpp
PROPERTIES COMPILE_OPTIONS -Wno-suggest-override)
endif()

View File

@ -16,7 +16,6 @@ addIeTargetTest(
NAME ${TARGET_NAME}
ROOT ${CMAKE_CURRENT_SOURCE_DIR}
LINK_LIBRARIES
unitTestUtils
inference_engine_lp_transformations
${OpenCV_LIBRARIES}
ADD_CPPLINT
@ -26,3 +25,10 @@ addIeTargetTest(
LABELS
IE
)
if(SUGGEST_OVERRIDE_SUPPORTED)
set_source_files_properties(cpp_interfaces/ie_memory_state_internal_test.cpp
PROPERTIES COMPILE_OPTIONS -Wno-suggest-override)
endif()
link_system_libraries(${TARGET_NAME} PRIVATE unitTestUtils)

View File

@ -32,10 +32,7 @@ protected:
shared_ptr<IInferRequest> request;
ResponseDesc dsc;
virtual void TearDown() {
}
virtual void SetUp() {
void SetUp() override {
mock_impl.reset(new MockIInferRequestInternal());
request = std::make_shared<InferRequestBase>(mock_impl);
}

View File

@ -56,10 +56,10 @@ protected:
MockTaskExecutor::Ptr mockTaskExecutor;
virtual void TearDown() {
void TearDown() override {
}
virtual void SetUp() {
void SetUp() override {
InputsDataMap inputsInfo;
OutputsDataMap outputsInfo;
mockTaskExecutor = make_shared<MockTaskExecutor>();

View File

@ -30,7 +30,7 @@ class VariableStateTests : public ::testing::Test {
SoExecutableNetworkInternal net;
IInferRequestInternal::Ptr req;
virtual void SetUp() {
void SetUp() override {
mockExeNetworkInternal = make_shared<MockIExecutableNetworkInternal>();
mockInferRequestInternal = make_shared<MockIInferRequestInternal>();
mockVariableStateInternal = make_shared<MockIVariableStateInternal>();
@ -199,14 +199,12 @@ TEST_F(VariableStateTests, VariableStateCanPropagateGetLastState) {
ASSERT_FLOAT_EQ(saver->cbuffer().as<const float*>()[2], 125);
IE_SUPPRESS_DEPRECATED_END
}
class VariableStateInternalMockImpl : public IVariableStateInternal {
public:
VariableStateInternalMockImpl(const char* name) : IVariableStateInternal(name) {}
MOCK_METHOD0(Reset, void());
};
TEST_F(VariableStateTests, VariableStateInternalCanSaveName) {
IVariableStateInternal::Ptr pState(new VariableStateInternalMockImpl("VariableStateInternalMockImpl"));
ASSERT_STREQ(pState->GetName().c_str(), "VariableStateInternalMockImpl");

View File

@ -35,14 +35,14 @@ protected:
ResponseDesc dsc;
StatusCode sts;
virtual void TearDown() {
void TearDown() override {
EXPECT_TRUE(Mock::VerifyAndClearExpectations(mock_plugin_impl.get()));
EXPECT_TRUE(Mock::VerifyAndClearExpectations(mockIExeNetworkInternal.get()));
EXPECT_TRUE(Mock::VerifyAndClearExpectations(mockExeNetworkTS.get()));
EXPECT_TRUE(Mock::VerifyAndClearExpectations(mockInferRequestInternal.get()));
}
virtual void SetUp() {
void SetUp() override {
pluginId = "TEST";
mock_plugin_impl.reset(new MockInferencePluginInternal());
mock_plugin_impl->SetName(pluginId);

View File

@ -16,10 +16,6 @@
class BlobTests: public ::testing::Test {
protected:
virtual void TearDown() {}
virtual void SetUp() {}
std::shared_ptr<MockAllocator> createMockAllocator() {
return std::shared_ptr<MockAllocator>(new MockAllocator());
}

View File

@ -42,13 +42,13 @@ protected:
MockIInferencePlugin* mockIPlugin;
InferencePlugin plugin;
virtual void TearDown() {
void TearDown() override {
mockIExeNet.reset();
exeNetwork = {};
plugin = {};
}
virtual void SetUp() {
void SetUp() override {
mockIExeNet = std::make_shared<MockIExecutableNetworkInternal>();
auto mockIPluginPtr = std::make_shared<MockIInferencePlugin>();
ON_CALL(*mockIPluginPtr, LoadNetwork(MatcherCast<const CNNNetwork&>(_), _)).WillByDefault(Return(mockIExeNet));
@ -113,12 +113,12 @@ class ExecutableNetworkWithIInferReqTests : public ExecutableNetworkTests {
protected:
std::shared_ptr<MockIInferRequestInternal> mockIInferReq_p;
virtual void TearDown() {
void TearDown() override {
ExecutableNetworkTests::TearDown();
mockIInferReq_p.reset();
}
virtual void SetUp() {
void SetUp() override {
ExecutableNetworkTests::SetUp();
mockIInferReq_p = std::make_shared<MockIInferRequestInternal>();
}
@ -143,10 +143,7 @@ protected:
std::shared_ptr<IExecutableNetwork> exeNetwork;
ResponseDesc dsc;
virtual void TearDown() {
}
virtual void SetUp() {
void SetUp() override {
mock_impl.reset(new MockIExecutableNetworkInternal());
exeNetwork = std::make_shared<ExecutableNetworkBase>(mock_impl);
}

View File

@ -25,7 +25,7 @@ protected:
return CommonTestUtils::pre + mockEngineName + IE_BUILD_POSTFIX + CommonTestUtils::ext;
}
virtual void SetUp() {
void SetUp() override {
std::string libraryName = get_mock_engine_name();
sharedObjectLoader.reset(new SharedObjectLoader(libraryName.c_str()));
createPluginEngineProxy = make_std_function<IInferencePlugin*(IInferencePlugin*)>("CreatePluginEngineProxy");

View File

@ -19,7 +19,6 @@ addIeTargetTest(
$<TARGET_PROPERTY:ngraphFunctions,INTERFACE_INCLUDE_DIRECTORIES>
LINK_LIBRARIES
vpu_graph_transformer_test_static
unitTestUtils
mvnc
ngraph
interpreter_backend
@ -29,3 +28,5 @@ addIeTargetTest(
VPU
MYRIAD
)
link_system_libraries(${TARGET_NAME} PRIVATE unitTestUtils)

View File

@ -126,9 +126,6 @@ protected:
ie = PluginCache::get().ie();
}
void TearDown() override {
}
public:
std::shared_ptr<ngraph::Function> createSubgraph(const SizeVector &dims, InferenceEngine::Precision prc = InferenceEngine::Precision::FP32) {
ngraph::element::Type type = FuncTestUtils::PrecisionUtils::convertIE2nGraphPrc(prc);

View File

@ -32,7 +32,7 @@ template<typename P = std::nullptr_t>
class PlgTest : public testing::TestWithParam<PlgTestParam<P>> {
protected:
std::map<std::string, std::string> config;
virtual void SetUp() {
void SetUp() override {
device_name = std::get<0>(this->GetParam());
std::transform(device_name.begin(), device_name.end(),
device_name.begin(), [] (char v) { return v == '_' ? ':' : v; });

View File

@ -83,6 +83,5 @@ protected:
std::map <std::string, std::string> config_;
//Operations
virtual void SetUp() override = 0;
virtual void InitConfig();
};

View File

@ -49,7 +49,7 @@ public:
protected:
//Operations
virtual void SetUp() override;
void SetUp() override;
virtual void InitConfig() override;
template <class T>

View File

@ -56,7 +56,7 @@ class PoolingTest : public myriadLayersTests_nightly,
pooling_layer_params, vpu::LayoutPreference, Types...>>
{
public:
virtual void SetUp() {
void SetUp() override {
myriadLayersTests_nightly::SetUp();
auto p = ::testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, pooling_layer_params, vpu::LayoutPreference, Types...>>::GetParam();
_input_tensor = std::get<0>(p);
@ -118,7 +118,7 @@ class GlobalPoolingTest : public myriadLayersTests_nightly,
public testing::WithParamInterface<GlobalPoolingTestParam>
{
public:
virtual void SetUp() {
void SetUp() override {
myriadLayersTests_nightly::SetUp();
auto params = ::testing::WithParamInterface<GlobalPoolingTestParam>::GetParam();
_input_tensor = std::get<0>(params);
@ -166,7 +166,7 @@ class PoolingTestPad4 : public myriadLayersTests_nightly,
public testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, param_size, param_size, paddings4, vpu::LayoutPreference, Types...>>
{
public:
virtual void SetUp() {
void SetUp() override {
myriadLayersTests_nightly::SetUp();
auto p = ::testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, param_size, param_size, paddings4, vpu::LayoutPreference, Types...>>::GetParam();
_input_tensor = std::get<0>(p);
@ -225,7 +225,7 @@ class ConvolutionTest : public myriadLayersTests_nightly,
public testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, param_size, param_size, param_size, uint32_t, uint32_t, Types...>>
{
public:
virtual void SetUp() {
void SetUp() override {
myriadLayersTests_nightly::SetUp();
auto p = ::testing::WithParamInterface<std::tuple<InferenceEngine::SizeVector, param_size, param_size, param_size, uint32_t, uint32_t, Types...>>::GetParam();
_input_tensor = std::get<0>(p);
@ -281,7 +281,7 @@ class FCTest : public myriadLayersTests_nightly,
public testing::WithParamInterface<std::tuple<fcon_test_params, int32_t, int32_t, Types...>>
{
public:
virtual void SetUp() {
void SetUp() override {
myriadLayersTests_nightly::SetUp();
auto p = ::testing::WithParamInterface<std::tuple<fcon_test_params, int32_t, int32_t, Types...>>::GetParam();
_par = std::get<0>(p);

View File

@ -29,6 +29,12 @@ if (ENABLE_GNA)
list(APPEND TEST_SRC ${GNA_TESTS})
list(APPEND TEST_DEPS GNAPlugin_test_static)
if(SUGGEST_OVERRIDE_SUPPORTED)
set_source_files_properties(engines/gna/graph_tools/graph_copy_tests.cpp
engines/gna/graph_tools/graph_tools_test.cpp
PROPERTIES COMPILE_OPTIONS -Wno-suggest-override)
endif()
# TODO: fix GNA tests
if(OFF)
set(gna_stub "${CMAKE_CURRENT_SOURCE_DIR}/engines/gna/gna_api_stub.cpp")
@ -55,6 +61,15 @@ endif()
if (ENABLE_MYRIAD)
include(${XLINK_DIR}/XLink.cmake)
if(SUGGEST_OVERRIDE_SUPPORTED)
set_source_files_properties(engines/vpu/myriad_tests/helpers/myriad_test_case.cpp
engines/vpu/mvnc/watchdog_tests.cpp
engines/vpu/sw_conv_adaptation.cpp
engines/vpu/myriad_tests/myriad_engine_tests.cpp
engines/vpu/myriad_tests/myriad_metrics_tests.cpp
PROPERTIES COMPILE_OPTIONS -Wno-suggest-override)
endif()
file(GLOB
VPU_TESTS
engines/vpu/*cpp

View File

@ -232,7 +232,7 @@ protected:
const int count = 10;
std::vector<int> vec;
virtual void SetUp() override {
void SetUp() override {
for (int i = 0; i < count; ++i) {
vec.push_back(i);
}

View File

@ -20,12 +20,6 @@ InferenceEngine::Precision defaultPrecision{InferenceEngine::Precision::FP32};
class LayersTests : public ::testing::Test {
public:
virtual void TearDown() {
}
virtual void SetUp() {
}
static InferenceEngine::LayerParams getParamsForLayer(std::string name, std::string type,
InferenceEngine::Precision precision) {
InferenceEngine::LayerParams params = {};

View File

@ -6,14 +6,7 @@ if(ENABLE_MYRIAD)
add_subdirectory(movidius)
endif()
if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=unknown-warning-option -Wno-error=inconsistent-missing-override -Wno-error=pass-failed")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=unknown-warning-option -Wno-error=inconsistent-missing-override -Wno-error=pass-failed")
elseif(CMAKE_COMPILER_IS_GNUCC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 9.1)
# On g++ 9.3.0 (Ubuntu 20.04) the ADE library raises "redundant-move" warnings
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=redundant-move")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-error=redundant-move")
elseif((CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") AND (MSVC_VERSION VERSION_GREATER_EQUAL "1910"))
if((CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") AND (MSVC_VERSION VERSION_GREATER_EQUAL "1910"))
# 1910 version of Visual Studio 2017
# This flagis needed for enabling SIMD vectorization with command '#pragma omp simd'.
# Compilation with '/openmp:experimental' key allow us to enable vectorizatikon capability in MSVC.
@ -30,19 +23,12 @@ if (ENABLE_CLDNN)
else()
set(CLDNN__INCLUDE_TESTS OFF CACHE BOOL "" FORCE)
endif()
if (WIN32)
set(CLDNN__ARCHITECTURE_TARGET "Windows64" CACHE STRING "" FORCE)
elseif (ANDROID)
set(CLDNN__ARCHITECTURE_TARGET "Android64" CACHE STRING "" FORCE)
else()
set(CLDNN__ARCHITECTURE_TARGET "Linux64" CACHE STRING "" FORCE)
endif()
set(CLDNN_THREADING "${THREADING}" CACHE STRING "" FORCE)
set(GPU_DEBUG_CONFIG OFF CACHE BOOL "Enable debug config feature")
add_subdirectory(clDNN)
endif()
if(ENABLE_MKL_DNN)
function(ie_add_mkldnn)
set(DNNL_ENABLE_CONCURRENT_EXEC ON CACHE BOOL "" FORCE)
set(DNNL_ENABLE_PRIMITIVE_CACHE OFF CACHE BOOL "" FORCE) ## TODO: try it later
set(DNNL_ENABLE_MAX_CPU_ISA OFF CACHE BOOL "" FORCE) ## TODO: try it later
@ -56,6 +42,18 @@ if(ENABLE_MKL_DNN)
set(OpenMP_cmake_included ON) ## to skip "omp simd" inside a code. Lead to some crashes inside NDK LLVM..
endif()
if(CMAKE_COMPILER_IS_GNUCXX)
ie_add_compiler_flags(-Wno-undef)
if(SUGGEST_OVERRIDE_SUPPORTED)
# xbyak compilation fails
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-suggest-override")
endif()
endif()
add_subdirectory(mkl-dnn EXCLUDE_FROM_ALL)
add_library(mkldnn ALIAS dnnl)
endfunction()
if(ENABLE_MKL_DNN)
ie_add_mkldnn()
endif()

View File

@ -2,12 +2,6 @@
# SPDX-License-Identifier: Apache-2.0
#
cmake_minimum_required (VERSION 3.1)
# ======================================================================================================
# ======================================================================================================
# ======================================================================================================
# Name of project (helper constant variable).
set(CLDNN__PROJ_NAME "clDNN")
@ -58,18 +52,6 @@ set(CLDNN__CODEGEN_INCDIR "${CLDNN__CODEGEN_DIR}/include")
# ============================================ CMAKE OPTIONS ===========================================
# ======================================================================================================
# Include and build: Core of clDNN framework.
set(CLDNN__INCLUDE_CORE ON CACHE BOOL "Include and build: clDNN core.")
mark_as_advanced(CLDNN__INCLUDE_CORE)
# ======================================================================================================
# Include and build: Kernel selector for clDNN framework.
set(CLDNN__INCLUDE_KERNEL_SELECTOR ON CACHE BOOL "Include and build: clDNN kernel selector.")
mark_as_advanced(CLDNN__INCLUDE_KERNEL_SELECTOR)
# ======================================================================================================
# Include and build: Tests (unit tests and small acceptance tests) for clDNN framework.
set(CLDNN__INCLUDE_TESTS ON CACHE BOOL "Include and build: clDNN framework's tests.")
mark_as_advanced(CLDNN__INCLUDE_TESTS)
@ -96,10 +78,6 @@ set(CLDNN_UTILS__RAPIDJSON_INCDIRS "utils/rapidjson" CACHE INTERNAL "Paths to in
set(CLDNN_BUILD__PROJ__clDNN "clDNN_lib")
set(CLDNN_BUILD__PROJ_LABEL__clDNN "clDNN")
# ================================================ Outputs =============================================
set(CLDNN_BUILD__PROJ_OUTPUT_NAME__clDNN "clDNN${CLDNN__OUT_CPU_SUFFIX}")
# ===================================== Include/Link directories =======================================
include_directories(
@ -109,13 +87,11 @@ include_directories(
)
# =================================== Link targets and dependencies ====================================
if(CLDNN__INCLUDE_CORE)
add_subdirectory(src)
add_subdirectory(runtime)
endif()
add_subdirectory(src)
add_subdirectory(runtime)
if(CLDNN__INCLUDE_TESTS)
add_subdirectory(tests)
endif()
if(CLDNN__INCLUDE_KERNEL_SELECTOR)
add_subdirectory(kernel_selector)
endif()
add_subdirectory(kernel_selector)

View File

@ -6,7 +6,6 @@
set(CLDNN_BUILD__PROJ "cldnn_kernel_selector")
set(CLDNN_BUILD__PROJ_LABEL "${CLDNN_BUILD__PROJ}")
set(CLDNN_BUILD__PROJ_OUTPUT_NAME "${CLDNN_BUILD__PROJ}${CLDNN__OUT_CPU_SUFFIX}")
# ========================================= Source/Header files ========================================
@ -125,7 +124,6 @@ add_library("${CLDNN_BUILD__PROJ}" STATIC
)
set_property(TARGET "${CLDNN_BUILD__PROJ}" PROPERTY PROJECT_LABEL "${CLDNN_BUILD__PROJ_LABEL}")
set_property(TARGET "${CLDNN_BUILD__PROJ}" PROPERTY OUTPUT_NAME "${CLDNN_BUILD__PROJ_OUTPUT_NAME}")
if(COMMAND add_cpplint_target)
add_cpplint_target("${CLDNN_BUILD__PROJ}_cpplint" FOR_TARGETS "${CLDNN_BUILD__PROJ}")

View File

@ -15,7 +15,7 @@ struct activation_params : public base_params {
MultiDataTensor inputActivationParams;
virtual ParamsKey GetParamsKey() const {
ParamsKey GetParamsKey() const override {
auto k = base_params::GetParamsKey();
if (!inputActivationParams.empty()) {
k.EnableActivationAdditionalParamsAsInput();

View File

@ -21,7 +21,7 @@ struct arg_max_min_params : public base_params {
uint32_t outputs_num = 1;
bool values_first = false;
virtual ParamsKey GetParamsKey() const {
ParamsKey GetParamsKey() const override {
ParamsKey k = base_params::GetParamsKey();
k.EnableArgMaxMinAxis(argMaxMinAxis);

View File

@ -16,10 +16,6 @@ struct average_unpooling_params : public base_params {
uSize unpoolSize;
uSize unpoolStride;
virtual ParamsKey GetParamsKey() const {
return base_params::GetParamsKey();
}
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -17,7 +17,6 @@ struct batch_to_space_params : public base_params {
DimTensor<uint32_t> block_shape;
DimTensor<uint32_t> crops_begin;
DimTensor<uint32_t> crops_end;
virtual ParamsKey GetParamsKey() const { return base_params::GetParamsKey(); }
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -42,7 +41,7 @@ public:
struct DispatchData : public CommonDispatchData {};
protected:
virtual bool Validate(const Params&, const optional_params&) const;
bool Validate(const Params&, const optional_params&) const override;
virtual JitConstants GetJitConstants(const batch_to_space_params& params) const;
virtual CommonDispatchData SetDefault(const batch_to_space_params& params, const optional_params&) const;
KernelsData GetCommonKernelsData(const Params& params, const optional_params&) const;

View File

@ -18,7 +18,7 @@ struct concatenation_params : public base_params {
bool isAligned = true;
size_t misalignment = 0;
virtual ParamsKey GetParamsKey() const {
ParamsKey GetParamsKey() const override {
auto k = base_params::GetParamsKey();
k.EnableConcatAxis(axis);
return k;
@ -32,7 +32,7 @@ struct concatenation_optional_params : optional_params {
concatenation_optional_params() : optional_params(KernelType::CONCATENATION) {}
bool kernelPerInput = true;
virtual ParamsKey GetSupportedKey() const {
ParamsKey GetSupportedKey() const override {
ParamsKey k = optional_params::GetSupportedKey();
if (kernelPerInput) {

View File

@ -17,8 +17,6 @@ struct cum_sum_params : public base_params {
CumSumAxis axis;
bool exclusive;
bool reverse;
virtual ParamsKey GetParamsKey() const { return base_params::GetParamsKey(); }
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -18,8 +18,6 @@ struct depth_to_space_params : public base_params {
, mode(DepthToSpaceMode::DEPTH_FIRST) {}
size_t block_size;
DepthToSpaceMode mode;
virtual ParamsKey GetParamsKey() const { return base_params::GetParamsKey(); }
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -45,7 +43,7 @@ public:
};
protected:
virtual bool Validate(const Params&, const optional_params&) const;
bool Validate(const Params&, const optional_params&) const override;
virtual JitConstants GetJitConstants(const depth_to_space_params& params) const;
virtual CommonDispatchData SetDefault(const depth_to_space_params& params) const;
KernelsData GetCommonKernelsData(const Params& params, const optional_params&) const;

View File

@ -75,7 +75,7 @@ struct eltwise_params : public base_params {
bool int8_quantization = false;
bool broadcast = false;
virtual ParamsKey GetParamsKey() const;
ParamsKey GetParamsKey() const override;
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -16,8 +16,6 @@ struct embedding_bag_params : public base_params {
EmbeddingBagType type;
int32_t default_index;
virtual ParamsKey GetParamsKey() const { return base_params::GetParamsKey(); }
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -20,8 +20,6 @@ struct extract_image_patches_params : public base_params {
std::vector<unsigned int> strides;
std::vector<unsigned int> rates;
std::string auto_pad;
virtual ParamsKey GetParamsKey() const { return base_params::GetParamsKey(); }
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -16,7 +16,7 @@ struct fully_connected_params : public weight_bias_params {
QuantizationType quantization = QuantizationType::NONE;
virtual ParamsKey GetParamsKey() const {
ParamsKey GetParamsKey() const override {
ParamsKey k = weight_bias_params::GetParamsKey();
k.EnableQuantization(quantization);

View File

@ -16,7 +16,6 @@ struct gather_params : public base_params {
GatherAxis axis;
int64_t batch_dim;
bool support_neg_ind;
virtual ParamsKey GetParamsKey() const { return base_params::GetParamsKey(); }
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -16,8 +16,6 @@ struct gather_nd_params : public base_params {
uint8_t indices_rank;
uint8_t batch_dims;
virtual ParamsKey GetParamsKey() const { return base_params::GetParamsKey(); }
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
@ -33,7 +31,7 @@ public:
virtual ~GatherNDKernelRef() {}
virtual JitConstants GetJitConstants(const gather_nd_params& params) const;
virtual CommonDispatchData SetDefault(const gather_nd_params& params, const optional_params&) const;
KernelsData GetKernelsData(const Params& params, const optional_params& options) const;
KernelsData GetKernelsData(const Params& params, const optional_params& options) const override;
ParamsKey GetSupportedKey() const override;
std::vector<FusedOpType> GetSupportedFusedOps() const override {
return { FusedOpType::QUANTIZE,

View File

@ -21,7 +21,7 @@ struct gemm_params : public base_params {
bool transpose_input1;
QuantizationType quantization = QuantizationType::NONE;
virtual ParamsKey GetParamsKey() const {
ParamsKey GetParamsKey() const override {
ParamsKey k = base_params::GetParamsKey();
k.EnableQuantization(quantization);
return k;

View File

@ -21,7 +21,7 @@ struct lrn_params : public base_params {
float k = 0.f;
uint32_t localSize = 0;
virtual ParamsKey GetParamsKey() const {
ParamsKey GetParamsKey() const override {
ParamsKey _k = base_params::GetParamsKey();
_k.EnableLRNMode(normMode);

View File

@ -13,8 +13,6 @@ namespace kernel_selector {
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
struct max_unpooling_params : public base_params {
max_unpooling_params() : base_params(KernelType::MAX_UNPOOLING) {}
virtual ParamsKey GetParamsKey() const { return base_params::GetParamsKey(); }
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -20,7 +20,7 @@ struct mvn_params : public base_params {
float epsilon = 0.0f;
MVNEpsMode mvnEpsMode = MVNEpsMode::INSIDE_SQRT;
virtual ParamsKey GetParamsKey() const {
ParamsKey GetParamsKey() const override {
ParamsKey k = base_params::GetParamsKey();
k.EnableMVNMode(mvnMode);

View File

@ -18,7 +18,7 @@ struct normalize_params : public base_params {
float epsilon = 1e-10f;
DataTensor scaleTable;
virtual ParamsKey GetParamsKey() const {
ParamsKey GetParamsKey() const override {
ParamsKey k = base_params::GetParamsKey();
k.EnableNormalizeMode(normMode);

View File

@ -15,7 +15,7 @@ public:
virtual ~PermuteKernelBase() {}
bool Validate(const Params& p, const optional_params& o) const override;
KernelsData GetKernelsData(const Params& params, const optional_params& options) const;
KernelsData GetKernelsData(const Params& params, const optional_params& options) const override;
protected:
virtual JitConstants GetJitConstants(const permute_params& params, const CommonDispatchData& dispatchData) const;
virtual CommonDispatchData SetDefault(const permute_params& params) const = 0;

View File

@ -19,7 +19,7 @@ public:
virtual ~PermuteKernelRef() {}
bool Validate(const Params& p, const optional_params& o) const override;
KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const;
KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override;
ParamsKey GetSupportedKey() const override;
protected:

View File

@ -18,11 +18,11 @@ public:
virtual ~PermuteKernel_tile_8x8_4x4() {}
bool Validate(const Params& p, const optional_params& o) const override;
KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const;
KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override;
ParamsKey GetSupportedKey() const override;
protected:
JitConstants GetJitConstants(const permute_params& params, const CommonDispatchData& dispatchData) const;
CommonDispatchData SetDefault(const permute_params& params) const;
JitConstants GetJitConstants(const permute_params& params, const CommonDispatchData& dispatchData) const override;
CommonDispatchData SetDefault(const permute_params& params) const override;
std::vector<FusedOpType> GetSupportedFusedOps() const override {
return {
FusedOpType::ACTIVATION,

View File

@ -18,11 +18,11 @@ public:
virtual ~PermuteKernel_tile_8x8_4x4_fsv() {}
bool Validate(const Params& p, const optional_params& o) const override;
KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const;
KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override;
ParamsKey GetSupportedKey() const override;
protected:
JitConstants GetJitConstants(const permute_params& params, const CommonDispatchData& dispatchData) const;
CommonDispatchData SetDefault(const permute_params& params) const;
JitConstants GetJitConstants(const permute_params& params, const CommonDispatchData& dispatchData) const override;
CommonDispatchData SetDefault(const permute_params& params) const override;
std::vector<FusedOpType> GetSupportedFusedOps() const override {
return {
FusedOpType::ACTIVATION,

View File

@ -15,7 +15,6 @@ struct permute_params : public base_params {
permute_params() : base_params(KernelType::PERMUTE) {}
std::vector<uint16_t> order;
virtual ParamsKey GetParamsKey() const { return base_params::GetParamsKey(); }
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -22,7 +22,7 @@ struct pooling_params : public base_params {
uSize poolStride;
uSize poolPad;
virtual ParamsKey GetParamsKey() const {
ParamsKey GetParamsKey() const override {
ParamsKey k = base_params::GetParamsKey();
k.EnablePoolType(poolType);

View File

@ -53,7 +53,7 @@ struct quantize_params : public base_params {
float out_scale;
float out_shift;
virtual ParamsKey GetParamsKey() const {
ParamsKey GetParamsKey() const override {
auto k = base_params::GetParamsKey();
if (packed_binary_output)
k.EnableQuantizePackedBinaryOutput();

View File

@ -17,8 +17,6 @@ struct reduce_params : public base_params {
ReduceMode reduceMode;
std::vector<uint16_t> reduceAxes;
int32_t keepDims;
virtual ParamsKey GetParamsKey() const { return base_params::GetParamsKey(); }
};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -21,7 +21,7 @@ struct region_yolo_params : public base_params {
uint32_t mask_size;
bool do_softmax;
virtual ParamsKey GetParamsKey() const {
ParamsKey GetParamsKey() const override {
auto k = base_params::GetParamsKey();
return k;
}

View File

@ -16,7 +16,7 @@ public:
KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override;
ParamsKey GetSupportedKey() const override;
protected:
JitConstants GetJitConstants(const reorder_params& params) const;
CommonDispatchData SetDefault(const reorder_params& params) const;
JitConstants GetJitConstants(const reorder_params& params) const override;
CommonDispatchData SetDefault(const reorder_params& params) const override;
};
} // namespace kernel_selector

View File

@ -26,7 +26,7 @@ struct reorder_params : public base_params {
bool winograd = false;
bool has_padded_output = false;
virtual ParamsKey GetParamsKey() const {
ParamsKey GetParamsKey() const override {
auto k = base_params::GetParamsKey();
if (winograd) {
@ -54,7 +54,7 @@ struct reorder_weights_params : public Params {
bool winograd = false;
bool rotate_180 = false;
virtual ParamsKey GetParamsKey() const {
ParamsKey GetParamsKey() const override {
ParamsKey k;
k.EnableInputWeightsType(input.GetDType());
k.EnableOutputWeightsType(output.GetDType());
@ -95,7 +95,7 @@ protected:
virtual JitConstants GetJitConstants(const reorder_params& params) const;
virtual DispatchData SetDefault(const reorder_weights_params& params) const;
virtual DispatchData SetDefault(const reorder_params& params) const;
virtual bool Validate(const Params&, const optional_params&) const { return true; }
bool Validate(const Params&, const optional_params&) const override { return true; }
KernelsData GetCommonKernelsData(const reorder_weights_params& params,
const optional_params&) const;
KernelsData GetCommonKernelsData(const reorder_params& params, const optional_params&) const;

View File

@ -16,7 +16,7 @@ public:
KernelsPriority GetKernelsPriority(const Params& params, const optional_params& options) const override;
ParamsKey GetSupportedKey() const override;
protected:
JitConstants GetJitConstants(const reorder_params& params) const;
CommonDispatchData SetDefault(const reorder_params& params) const;
JitConstants GetJitConstants(const reorder_params& params) const override;
CommonDispatchData SetDefault(const reorder_params& params) const override;
};
} // namespace kernel_selector

View File

@ -16,7 +16,7 @@ struct reorg_yolo_params : public base_params {
uint32_t stride;
virtual ParamsKey GetParamsKey() const {
ParamsKey GetParamsKey() const override {
auto k = base_params::GetParamsKey();
return k;
}

Some files were not shown because too many files have changed in this diff Show More