opm-simulators/CMakeLists.txt

506 lines
18 KiB
CMake
Raw Normal View History

###########################################################################
# #
# Note: The bulk of the build system is located in the cmake/ directory. #
# This file only contains the specializations for this particular #
# project. Most likely you are interested in editing one of these #
# files instead: #
# #
# dune.module Name and version number #
# CMakeLists_files.cmake Path of source files #
# cmake/Modules/${project}-prereqs.cmake Dependencies #
# #
###########################################################################
# Mandatory call to project
2017-12-19 05:44:24 -06:00
project(opm-simulators C CXX)
cmake_minimum_required (VERSION 3.10)
option(SIBLING_SEARCH "Search for other modules in sibling directories?" ON)
2015-10-19 08:21:08 -05:00
set( USE_OPENMP_DEFAULT OFF ) # Use of OpenMP is considered experimental
option(BUILD_FLOW "Build the production oriented flow simulator?" ON)
option(BUILD_FLOW_VARIANTS "Build the variants for flow by default?" OFF)
option(BUILD_FLOW_POLY_GRID "Build flow blackoil with polyhedral grid" OFF)
option(OPM_ENABLE_PYTHON "Enable python bindings?" OFF)
option(OPM_ENABLE_PYTHON_TESTS "Enable tests for the python bindings?" ON)
option(OPM_INSTALL_PYTHON "Install python bindings?" ON)
option(USE_CHOW_PATEL_ILU "Use the iterative ILU by Chow and Patel?" OFF)
option(USE_CHOW_PATEL_ILU_GPU "Run iterative ILU decomposition on GPU? Requires USE_CHOW_PATEL_ILU" OFF)
option(USE_CHOW_PATEL_ILU_GPU_PARALLEL "Try to use more parallelism on the GPU during the iterative ILU decomposition? Requires USE_CHOW_PATEL_ILU_GPU" OFF)
2021-12-01 07:00:21 -06:00
option(BUILD_FLOW_ALU_GRID "Build flow blackoil with alu grid" OFF)
option(USE_DAMARIS_LIB "Use the Damaris library for asynchronous I/O?" OFF)
# The following was copied from CMakeLists.txt in opm-common.
# TODO: factor out the common parts in opm-common and opm-simulator as a cmake module
if (OPM_ENABLE_PYTHON)
# We need to be compatible with older CMake versions
# that do not offer FindPython3
# e.g. Ubuntu LTS 18.04 uses cmake 3.10
if(${CMAKE_VERSION} VERSION_LESS "3.12.0")
find_package(PythonInterp REQUIRED)
if(PYTHON_VERSION_MAJOR LESS 3)
message(SEND_ERROR "OPM requires version 3 of Python but only version ${PYTHON_VERSION_STRING} was found")
endif()
set(Python3_EXECUTABLE ${PYTHON_EXECUTABLE})
set(Python3_LIBRARIES ${PYTHON_LIBRARIES})
2022-07-27 11:26:22 -05:00
set(Python3_VERSION "${PYTHON_VERSION_STRING}")
set(Python3_VERSION_MINOR ${PYTHON_VERSION_MINOR})
else()
# Be backwards compatible.
if(PYTHON_EXECUTABLE AND NOT Python3_EXECUTABLE)
set(Python3_EXECUTABLE ${PYTHON_EXECUTABLE})
endif()
find_package(Python3 REQUIRED COMPONENTS Interpreter Development)
endif()
if(Python3_VERSION_MINOR LESS 3)
# Python native namespace packages requires python >= 3.3
message(SEND_ERROR "OPM requires python >= 3.3 but only version ${Python3_VERSION} was found")
endif()
# Compatibility settings for PythonInterp and PythonLibs
# used e.g. in FindCwrap, pybind11
set(PYTHON_EXECUTABLE ${Python3_EXECUTABLE})
endif()
if(SIBLING_SEARCH AND NOT opm-common_DIR)
# guess the sibling dir
get_filename_component(_leaf_dir_name ${PROJECT_BINARY_DIR} NAME)
get_filename_component(_parent_full_dir ${PROJECT_BINARY_DIR} DIRECTORY)
get_filename_component(_parent_dir_name ${_parent_full_dir} NAME)
#Try if <module-name>/<build-dir> is used
get_filename_component(_modules_dir ${_parent_full_dir} DIRECTORY)
if(IS_DIRECTORY ${_modules_dir}/opm-common/${_leaf_dir_name})
set(opm-common_DIR ${_modules_dir}/opm-common/${_leaf_dir_name})
else()
string(REPLACE ${PROJECT_NAME} opm-common _opm_common_leaf ${_leaf_dir_name})
if(NOT _leaf_dir_name STREQUAL _opm_common_leaf
AND IS_DIRECTORY ${_parent_full_dir}/${_opm_common_leaf})
# We are using build directories named <prefix><module-name><postfix>
set(opm-common_DIR ${_parent_full_dir}/${_opm_common_leaf})
elseif(IS_DIRECTORY ${_parent_full_dir}/opm-common)
# All modules are in a common build dir
2018-02-05 04:38:07 -06:00
set(opm-common_DIR "${_parent_full_dir}/opm-common")
endif()
endif()
endif()
if(opm-common_DIR AND NOT IS_DIRECTORY ${opm-common_DIR})
message(WARNING "Value ${opm-common_DIR} passed to variable"
" opm-common_DIR is not a directory")
endif()
find_package(opm-common REQUIRED)
include(OpmInit)
OpmSetPolicies()
# not the same location as most of the other projects? this hook overrides
macro (dir_hook)
endmacro (dir_hook)
# project information is in dune.module. Read this file and set variables.
# we cannot generate dune.module since it is read by dunecontrol before
# the build starts, so it makes sense to keep the data there then.
include (OpmInit)
2018-03-12 09:37:21 -05:00
# Look for the opm-tests repository; if found the variable
# HAVE_OPM_TESTS will be set to true.
include(Findopm-tests)
# list of prerequisites for this particular project; this is in a
# separate file (in cmake/Modules sub-directory) because it is shared
# with the find module
include ("${project}-prereqs")
# Make sure we are using the same compiler underneath
# NVCC as for the rest. In the case that NVCC does not support
# that compiler it will error out. Unfortunately this will only
2020-04-29 06:28:23 -05:00
# work for CMake >= 3.8. We found no way to make FindCUDA.cmake error
# out. It seems to ignore CMAKE_NVCC_FLAGS and CMAKE. Additionally
# our way of specifying cuda source files never worked for CMake
# version < 3.8. Hence we deactivate cuda for these versions.
# We use "CMAKE_VERSION VERSION_GREATER 3.7.9" instead of
# CMAKE_VERSION VERSION_GREATER_EQUAL 3.8, because of backwards
# compatibility to cmake 3.6 and lower.
if(NOT CMAKE_DISABLE_FIND_PACKAGE_CUDA AND
CMAKE_VERSION VERSION_GREATER 3.7.9)
if(CMAKE_BUILD_TYPE)
set(_flags_suffix "_${CMAKE_BUILD_TYPE}")
endif()
if(NOT DEFINED ENV{CUDAHOSTCXX} AND NOT DEFINED CMAKE_CUDA_HOST_COMPILER AND
(NOT CMAKE_CUDA_FLAGS${_flags_suffix} OR NOT CMAKE_CUDA_FLAGS${_flags_suffix} MATCHES ".*-ccbin .*"))
message(STATUS "Setting CUDA host compiler CMAKE_CUDA_HOST_COMPILER to ${CMAKE_CXX_COMPILER} to "
"prevent incompatibilities. Note that this might report that there "
"is not CUDA compiler if your system's CUDA compiler does not support "
"${CMAKE_CXX_COMPILER}.")
2020-04-29 06:28:23 -05:00
# check_language does not seem to care about ${CMAKE_CUDA_FLAGS} or $(CUDA_NVCC_FLAGS}.
# Hence we set CMAKE_CUDA_HOST_COMPILER to our C++ compiler.
# In check_language(CUDA) we will get an error if we in addition put
# "-ccbin ${CMAKE_CXX_COMPILER}" into CMAKE_CUDA_FLAGS. It results
# in "${NVCC} -ccbin=${CMAKE_CXX_COMPILER} -ccbin ${CMAKE_CXX_COMPILER}"
2020-04-29 06:28:23 -05:00
# which causes nvcc to abort
set(CMAKE_CUDA_HOST_COMPILER ${CMAKE_CXX_COMPILER})
set(ENV{CUDAHOSTCXX} ${CMAKE_CUDA_HOST_COMPILER}) # The only thing honored by check_language(CUDA)!
endif()
include(CheckLanguage)
check_language(CUDA)
if(CMAKE_CUDA_COMPILER)
2020-04-29 06:28:23 -05:00
# OPTIONAL is ignored. Hence the magic above to check whether enabling CUDA works
enable_language(CUDA OPTIONAL)
# While the documentation says that it is deprecated, FindCUDA seems the
# only easy way to determine the cublas and cusparse libraries.
# Hence we call it unconditionally
# The WellContributions kernel uses __shfl_down_sync, which was introduced in CUDA 9.0
find_package(CUDA)
endif()
if(CUDA_FOUND AND CUDA_VERSION VERSION_LESS "9.0")
set(CUDA_FOUND OFF)
message(WARNING "Deactivating CUDA as we require version 9.0 or newer."
" Found only CUDA version ${CUDA_VERSION}.")
endif()
endif()
if(CUDA_FOUND)
set(HAVE_CUDA 1)
2022-11-17 02:59:49 -06:00
set(COMPILE_BDA_BRIDGE 1)
include_directories(${CUDA_INCLUDE_DIRS})
endif()
find_package(OpenCL)
if(OpenCL_FOUND)
# the current OpenCL implementation relies on cl2.hpp, not cl.hpp
# make sure it is available, otherwise disable OpenCL
find_file(CL2_HPP CL/cl2.hpp HINTS ${OpenCL_INCLUDE_DIRS})
if(CL2_HPP)
set(HAVE_OPENCL 1)
2022-11-17 02:59:49 -06:00
set(COMPILE_BDA_BRIDGE 1)
include_directories(${OpenCL_INCLUDE_DIRS})
find_file(OPENCL_HPP CL/opencl.hpp HINTS ${OpenCL_INCLUDE_DIRS})
if(OPENCL_HPP)
set(HAVE_OPENCL_HPP 1)
endif()
else()
message(WARNING " OpenCL was found, but this version of opm-simulators relies on CL/cl2.hpp, which implements OpenCL 1.0, 1.1 and 1.2.\n Deactivating OpenCL")
set(OpenCL_FOUND OFF)
set(OPENCL_FOUND OFF)
endif()
if(USE_CHOW_PATEL_ILU)
add_compile_options(-DCHOW_PATEL=1)
if(USE_CHOW_PATEL_ILU_GPU)
add_compile_options(-DCHOW_PATEL_GPU=1)
if(USE_CHOW_PATEL_ILU_GPU_PARALLEL)
add_compile_options(-DCHOW_PATEL_GPU_PARALLEL=1)
else()
add_compile_options(-DCHOW_PATEL_GPU_PARALLEL=0)
endif()
else()
add_compile_options(-DCHOW_PATEL_GPU=0)
add_compile_options(-DCHOW_PATEL_GPU_PARALLEL=0)
endif()
endif()
else()
if(USE_CHOW_PATEL_ILU)
message(FATAL_ERROR " CHOW_PATEL_ILU only works for openclSolver, but OpenCL was not found")
endif()
endif()
2021-06-18 04:46:00 -05:00
find_package(amgcl)
if(amgcl_FOUND)
2021-06-02 09:19:00 -05:00
set(HAVE_AMGCL 1)
2022-11-17 02:59:49 -06:00
set(COMPILE_BDA_BRIDGE 1)
# Linking to target angcl::amgcl drags in OpenMP and -fopenmp as a compile
# flag. With that nvcc fails as it does not that flag.
# Hence we set AMGCL_INCLUDE_DIRS.
get_property(AMGCL_INCLUDE_DIRS TARGET amgcl::amgcl PROPERTY INTERFACE_INCLUDE_DIRECTORIES)
include_directories(SYSTEM ${AMGCL_INCLUDE_DIRS})
2021-06-02 09:19:00 -05:00
endif()
if(OpenCL_FOUND)
find_package(VexCL)
if(VexCL_FOUND)
set(HAVE_VEXCL 1)
2022-11-17 02:59:49 -06:00
set(COMPILE_BDA_BRIDGE 1)
# generator expressions in vexcl do not seem to work and therefore
# we cannot use the imported target. Hence we exract the needed info
# from the targets
get_property(VEXCL_INCLUDE_DIRS TARGET VexCL::Common PROPERTY INTERFACE_INCLUDE_DIRECTORIES)
get_property(VEXCL_LINK_LIBRARIES TARGET VexCL::Common PROPERTY INTERFACE_LINK_LIBRARIES)
get_property(VEXCL_COMPILE_DEFINITIONS TARGET VexCL::OpenCL PROPERTY INTERFACE_COMPILE_DEFINITIONS)
set(VEXCL_LINK_LIBRARIES "${VEXCL_LINK_LIBRARIES};OpenCL::OpenCL")
add_library(OPM::VexCL::OpenCL INTERFACE IMPORTED)
set_target_properties(OPM::VexCL::OpenCL PROPERTIES
INTERFACE_COMPILE_DEFINITIONS "${VEXCL_COMPILE_DEFINITIONS}"
INTERFACE_LINK_LIBRARIES "${VEXCL_LINK_LIBRARIES}")
target_include_directories(OPM::VexCL::OpenCL SYSTEM INTERFACE "${VEXCL_INCLUDE_DIRS}")
endif()
endif()
2022-10-18 06:59:00 -05:00
find_package(rocalution)
if(ROCALUTION_FOUND)
set(HAVE_ROCALUTION 1)
2022-11-17 02:59:49 -06:00
set(COMPILE_BDA_BRIDGE 1)
2022-10-18 06:59:00 -05:00
endif()
# read the list of components from this file (in the project directory);
# it should set various lists with the names of the files to include
include (CMakeLists_files.cmake)
macro (config_hook)
opm_need_version_of ("dune-common")
opm_need_version_of ("dune-istl")
if(dune-fem_FOUND)
opm_need_version_of ("dune-fem")
endif()
2019-08-23 01:34:32 -05:00
opm_need_version_of ("opm-models")
2020-09-28 04:01:13 -05:00
if(NOT fmt_FOUND)
add_definitions(-DFMT_HEADER_ONLY)
list(APPEND EXTRA_INCLUDES SYSTEM ${PROJECT_SOURCE_DIR}/external/fmtlib/include)
endif()
2020-09-28 04:01:13 -05:00
include_directories(${EXTRA_INCLUDES})
include(UseDamaris)
endmacro (config_hook)
macro (prereqs_hook)
endmacro (prereqs_hook)
macro (sources_hook)
if(OPENCL_FOUND)
include(opencl-source-provider)
list(APPEND opm-simulators_SOURCES ${PROJECT_BINARY_DIR}/clSources.cpp)
endif()
endmacro (sources_hook)
macro (fortran_hook)
endmacro (fortran_hook)
macro (files_hook)
endmacro (files_hook)
macro (tests_hook)
endmacro (tests_hook)
# all setup common to the OPM library modules is done here
include (OpmLibMain)
2018-03-12 09:37:21 -05:00
if (HAVE_OPM_TESTS)
2016-11-21 11:02:21 -06:00
include (${CMAKE_CURRENT_SOURCE_DIR}/compareECLFiles.cmake)
endif()
opm_set_test_driver(${CMAKE_CURRENT_SOURCE_DIR}/tests/run-parallel-unitTest.sh "")
opm_add_test(test_gatherconvergencereport
DEPENDS "opmsimulators"
LIBRARIES opmsimulators ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY}
SOURCES
tests/test_gatherconvergencereport.cpp
CONDITION
MPI_FOUND AND Boost_UNIT_TEST_FRAMEWORK_FOUND
DRIVER_ARGS
-n 4
-b ${PROJECT_BINARY_DIR}
)
2019-01-11 06:53:18 -06:00
opm_add_test(test_gatherdeferredlogger
DEPENDS "opmsimulators"
LIBRARIES opmsimulators ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY}
SOURCES
tests/test_gatherdeferredlogger.cpp
CONDITION
MPI_FOUND AND Boost_UNIT_TEST_FRAMEWORK_FOUND
2019-01-11 06:53:18 -06:00
DRIVER_ARGS
-n 4
-b ${PROJECT_BINARY_DIR}
2019-01-11 06:53:18 -06:00
)
opm_add_test(test_parallelwellinfo_mpi
EXE_NAME
test_parallelwellinfo
CONDITION
MPI_FOUND AND Boost_UNIT_TEST_FRAMEWORK_FOUND
DRIVER_ARGS
-n 4
-b ${PROJECT_BINARY_DIR}
NO_COMPILE
)
opm_add_test(test_broadcast
DEPENDS "opmsimulators"
LIBRARIES opmsimulators ${Boost_UNIT_TEST_FRAMEWORK_LIBRARY}
SOURCES
tests/test_broadcast.cpp
CONDITION
MPI_FOUND AND Boost_UNIT_TEST_FRAMEWORK_FOUND
DRIVER_ARGS
-n 4
-b ${PROJECT_BINARY_DIR}
)
include(OpmBashCompletion)
2019-01-11 06:53:18 -06:00
if (NOT BUILD_FLOW)
set(FLOW_DEFAULT_ENABLE_IF "FALSE")
else()
set(FLOW_DEFAULT_ENABLE_IF "TRUE")
endif()
if (NOT BUILD_FLOW_VARIANTS)
set(FLOW_VARIANTS_DEFAULT_ENABLE_IF "FALSE")
else()
set(FLOW_VARIANTS_DEFAULT_ENABLE_IF "TRUE")
endif()
if (NOT BUILD_FLOW_POLY_GRID)
set(FLOW_POLY_ONLY_DEFAULT_ENABLE_IF "FALSE")
else()
set(FLOW_POLY_ONLY_DEFAULT_ENABLE_IF "TRUE")
endif()
add_library(moduleVersion OBJECT opm/simulators/utils/moduleVersion.cpp)
set_property(TARGET moduleVersion PROPERTY POSITION_INDEPENDENT_CODE ON)
# Strictly we only depend on the update-version target,
# but this is not exposed in a super-build.
add_dependencies(moduleVersion opmsimulators)
2021-11-01 03:03:27 -05:00
set(FLOW_MODELS blackoil brine energy extbo foam gasoil gaswater
2021-12-08 07:35:22 -06:00
oilwater oilwater_brine gaswater_brine oilwater_polymer
oilwater_polymer_injectivity micp polymer solvent
gasoil_energy brine_saltprecipitation
gaswater_saltprec_vapwat brine_precsalt_vapwat
blackoil_legacyassembly gasoildiffuse gaswater_dissolution
gaswater_dissolution_diffuse)
set(FLOW_VARIANT_MODELS brine_energy onephase onephase_energy)
set(FLOW_TGTS)
foreach(OBJ ${COMMON_MODELS} ${FLOW_MODELS} ${FLOW_VARIANT_MODELS})
add_library(flow_lib${OBJ} OBJECT flow/flow_ebos_${OBJ}.cpp)
list(APPEND FLOW_TGTS $<TARGET_OBJECTS:flow_lib${OBJ}>)
if(TARGET fmt::fmt)
target_link_libraries(flow_lib${OBJ} fmt::fmt)
endif()
opm_add_test(flow_${OBJ}
ONLY_COMPILE
SOURCES
flow/flow_${OBJ}.cpp
$<TARGET_OBJECTS:moduleVersion>
$<TARGET_OBJECTS:flow_lib${OBJ}>
EXE_NAME flow_${OBJ}
DEPENDS opmsimulators
LIBRARIES opmsimulators)
endforeach()
set_property(TARGET flow_libblackoil PROPERTY POSITION_INDEPENDENT_CODE ON)
foreach(OBJ ${FLOW_VARIANT_MODELS})
set_property(TARGET flow_lib${OBJ} PROPERTY EXCLUDE_FROM_ALL ${FLOW_VARIANTS_DEFAULT_ENABLE_IF})
endforeach()
add_library(flow_libblackoil_poly OBJECT flow/flow_ebos_blackoil_poly.cpp)
if(TARGET fmt::fmt)
target_link_libraries(flow_libblackoil_poly fmt::fmt)
endif()
set_property(TARGET flow_libblackoil_poly PROPERTY EXCLUDE_FROM_ALL ${FLOW_POLY_ONLY_DEFAULT_ENABLE_IF})
make the build of flow fully parallelizable so far, the actual specializations of the simulator were compiled into the `libopmsimulators` library and the build of the glue code (`flow.cpp`) thus needed to be deferred until the library was fully built. Since the compilation of the glue code requires a full property hierarchy for handling command line parameters, this arrangement significantly increases the build time for systems with a sufficient number of parallel build processes. ("sufficient" here means 8 or more threads, i.e., a quadcore system with hyperthreading is sufficient provided that it has enough main memory.) the new approach is not to include these objects in `libopmsimulators`, but to directly deal with them in the `flow` binary. this allows all of them and the glue code to be compiled in parallel. compilation time on my machine before this change: ``` > touch ../opm/autodiff/BlackoilModelEbos.hpp; time make -j32 flow 2> /dev/null Scanning dependencies of target opmsimulators [ 2%] Building CXX object CMakeFiles/opmsimulators.dir/opm/simulators/flow_ebos_gasoil.cpp.o [ 2%] Building CXX object CMakeFiles/opmsimulators.dir/opm/simulators/flow_ebos_oilwater.cpp.o [ 2%] Building CXX object CMakeFiles/opmsimulators.dir/opm/simulators/flow_ebos_blackoil.cpp.o [ 2%] Building CXX object CMakeFiles/opmsimulators.dir/opm/simulators/flow_ebos_solvent.cpp.o [ 4%] Building CXX object CMakeFiles/opmsimulators.dir/opm/simulators/flow_ebos_polymer.cpp.o [ 6%] Building CXX object CMakeFiles/opmsimulators.dir/opm/simulators/flow_ebos_energy.cpp.o [ 6%] Building CXX object CMakeFiles/opmsimulators.dir/opm/simulators/flow_ebos_oilwater_polymer.cpp.o [ 6%] Linking CXX static library lib/libopmsimulators.a [ 97%] Built target opmsimulators Scanning dependencies of target flow [100%] Building CXX object CMakeFiles/flow.dir/examples/flow.cpp.o [100%] Linking CXX executable bin/flow [100%] Built target flow real 1m45.692s user 8m47.195s sys 0m11.533s ``` after: ``` > touch ../opm/autodiff/BlackoilModelEbos.hpp; time make -j32 flow 2> /dev/null [ 91%] Built target opmsimulators Scanning dependencies of target flow [ 93%] Building CXX object CMakeFiles/flow.dir/flow/flow.cpp.o [ 95%] Building CXX object CMakeFiles/flow.dir/flow/flow_ebos_gasoil.cpp.o [ 97%] Building CXX object CMakeFiles/flow.dir/flow/flow_ebos_oilwater_polymer.cpp.o [100%] Building CXX object CMakeFiles/flow.dir/flow/flow_ebos_polymer.cpp.o [100%] Building CXX object CMakeFiles/flow.dir/flow/flow_ebos_oilwater.cpp.o [100%] Building CXX object CMakeFiles/flow.dir/flow/flow_ebos_solvent.cpp.o [100%] Building CXX object CMakeFiles/flow.dir/flow/flow_ebos_blackoil.cpp.o [100%] Building CXX object CMakeFiles/flow.dir/flow/flow_ebos_energy.cpp.o [100%] Linking CXX executable bin/flow [100%] Built target flow real 1m21.597s user 8m49.476s sys 0m10.973s ``` (this corresponds to a ~20% reduction of the time spend on waiting for the compiler.)
2018-09-20 03:58:27 -05:00
opm_add_test(flow
ONLY_COMPILE
ALWAYS_ENABLE
DEFAULT_ENABLE_IF ${FLOW_DEFAULT_ENABLE_IF}
DEPENDS opmsimulators
LIBRARIES opmsimulators
make the build of flow fully parallelizable so far, the actual specializations of the simulator were compiled into the `libopmsimulators` library and the build of the glue code (`flow.cpp`) thus needed to be deferred until the library was fully built. Since the compilation of the glue code requires a full property hierarchy for handling command line parameters, this arrangement significantly increases the build time for systems with a sufficient number of parallel build processes. ("sufficient" here means 8 or more threads, i.e., a quadcore system with hyperthreading is sufficient provided that it has enough main memory.) the new approach is not to include these objects in `libopmsimulators`, but to directly deal with them in the `flow` binary. this allows all of them and the glue code to be compiled in parallel. compilation time on my machine before this change: ``` > touch ../opm/autodiff/BlackoilModelEbos.hpp; time make -j32 flow 2> /dev/null Scanning dependencies of target opmsimulators [ 2%] Building CXX object CMakeFiles/opmsimulators.dir/opm/simulators/flow_ebos_gasoil.cpp.o [ 2%] Building CXX object CMakeFiles/opmsimulators.dir/opm/simulators/flow_ebos_oilwater.cpp.o [ 2%] Building CXX object CMakeFiles/opmsimulators.dir/opm/simulators/flow_ebos_blackoil.cpp.o [ 2%] Building CXX object CMakeFiles/opmsimulators.dir/opm/simulators/flow_ebos_solvent.cpp.o [ 4%] Building CXX object CMakeFiles/opmsimulators.dir/opm/simulators/flow_ebos_polymer.cpp.o [ 6%] Building CXX object CMakeFiles/opmsimulators.dir/opm/simulators/flow_ebos_energy.cpp.o [ 6%] Building CXX object CMakeFiles/opmsimulators.dir/opm/simulators/flow_ebos_oilwater_polymer.cpp.o [ 6%] Linking CXX static library lib/libopmsimulators.a [ 97%] Built target opmsimulators Scanning dependencies of target flow [100%] Building CXX object CMakeFiles/flow.dir/examples/flow.cpp.o [100%] Linking CXX executable bin/flow [100%] Built target flow real 1m45.692s user 8m47.195s sys 0m11.533s ``` after: ``` > touch ../opm/autodiff/BlackoilModelEbos.hpp; time make -j32 flow 2> /dev/null [ 91%] Built target opmsimulators Scanning dependencies of target flow [ 93%] Building CXX object CMakeFiles/flow.dir/flow/flow.cpp.o [ 95%] Building CXX object CMakeFiles/flow.dir/flow/flow_ebos_gasoil.cpp.o [ 97%] Building CXX object CMakeFiles/flow.dir/flow/flow_ebos_oilwater_polymer.cpp.o [100%] Building CXX object CMakeFiles/flow.dir/flow/flow_ebos_polymer.cpp.o [100%] Building CXX object CMakeFiles/flow.dir/flow/flow_ebos_oilwater.cpp.o [100%] Building CXX object CMakeFiles/flow.dir/flow/flow_ebos_solvent.cpp.o [100%] Building CXX object CMakeFiles/flow.dir/flow/flow_ebos_blackoil.cpp.o [100%] Building CXX object CMakeFiles/flow.dir/flow/flow_ebos_energy.cpp.o [100%] Linking CXX executable bin/flow [100%] Built target flow real 1m21.597s user 8m49.476s sys 0m10.973s ``` (this corresponds to a ~20% reduction of the time spend on waiting for the compiler.)
2018-09-20 03:58:27 -05:00
SOURCES
flow/flow.cpp
${FLOW_TGTS}
$<TARGET_OBJECTS:moduleVersion>
)
opm_add_test(flow_poly
ONLY_COMPILE
ALWAYS_ENABLE
DEFAULT_ENABLE_IF ${FLOW_POLY_ONLY_DEFAULT_ENABLE_IF}
DEPENDS opmsimulators
LIBRARIES opmsimulators
SOURCES
flow/flow_blackoil_poly.cpp
$<TARGET_OBJECTS:flow_libblackoil_poly>
$<TARGET_OBJECTS:moduleVersion>)
target_compile_definitions(flow_poly PRIVATE USE_POLYHEDRALGRID)
opm_add_test(flow_distribute_z
ONLY_COMPILE
ALWAYS_ENABLE
DEFAULT_ENABLE_IF ${FLOW_DEFAULT_ENABLE_IF}
DEPENDS opmsimulators
LIBRARIES opmsimulators
SOURCES
flow/flow_distribute_z.cpp
${FLOW_TGTS}
$<TARGET_OBJECTS:moduleVersion>
)
2021-12-01 07:00:21 -06:00
if(dune-alugrid_FOUND)
if (NOT BUILD_FLOW_ALU_GRID)
set(FLOW_ALUGRID_ONLY_DEFAULT_ENABLE_IF "FALSE")
else()
set(FLOW_ALUGRID_ONLY_DEFAULT_ENABLE_IF "TRUE")
endif()
opm_add_test(flow_alugrid
ONLY_COMPILE
ALWAYS_ENABLE
DEFAULT_ENABLE_IF ${FLOW_ALUGRID_ONLY_DEFAULT_ENABLE_IF}
DEPENDS opmsimulators
LIBRARIES opmsimulators
SOURCES
flow/flow_blackoil_alugrid.cpp
$<TARGET_OBJECTS:moduleVersion>)
target_compile_definitions(flow_alugrid PRIVATE USE_ALUGRID)
endif()
if (BUILD_FLOW)
install(TARGETS flow DESTINATION bin)
opm_add_bash_completion(flow)
add_test(NAME flow__version
COMMAND flow --version)
set_tests_properties(flow__version PROPERTIES
PASS_REGULAR_EXPRESSION "${${project}_LABEL}")
endif()
if (OPM_ENABLE_PYTHON)
add_subdirectory(python)
endif()
2020-01-09 02:32:24 -06:00
add_custom_target(extra_test ${CMAKE_CTEST_COMMAND} -C ExtraTests)
2020-12-22 05:57:01 -06:00
# must link libraries after target 'opmsimulators' has been defined
2020-01-09 02:32:24 -06:00
if(CUDA_FOUND)
Make mixing incompatible host compiler/device compiler versions fail. We experienced weired linker errors when using host compiler version for compilation that were not supported by the nvcc used to compile the cuda code: ``` [ 15%] Linking CXX executable bin/test_timer /usr/bin/ld: /home/mblatt/src/dune/opm-2.6/opm-common/opm-seq/lib/libopmcommon.a(Parser.cpp.o): in function `Opm::(anonymous namespace)::file& std::vector<Opm::(anonymous namespace)::file, std::allocator<Opm::(anonymous namespace)::file> >::emplace_back<std::filesystem::__cxx11::path&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >&>(std::filesystem::__cxx11::path&, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >&)': Parser.cpp:(.text+0x1096): undefined reference to `std::filesystem::__cxx11::path::_M_split_cmpts()' /usr/bin/ld: Parser.cpp:(.text+0x10ad): undefined reference to `std::filesystem::__cxx11::path::_M_split_cmpts()' /usr/bin/ld: /home/mblatt/src/dune/opm-2.6/opm-common/opm-seq/lib/libopmcommon.a(Parser.cpp.o): in function `Opm::(anonymous namespace)::ParserState::loadFile(std::filesystem::__cxx11::path const&)': Parser.cpp:(.text+0x23a1): undefined reference to `std::filesystem::canonical(std::filesystem::__cxx11::path const&)' /usr/bin/ld: Parser.cpp:(.text+0x24e0): undefined reference to `std::filesystem::__cxx11::path::_M_split_cmpts()' ``` The reason turned out to be that the library path was build up by paths of the old (g++-7) compiler used by nvcc and the actual (newer) compiler g++-8. This completely messed up the linker paths for CMake. To detect this situation already when running cmake we have resorted to first setting the CMAKE_CUDA_FLAGS to force cmake to make nvcc use the host compiler and to activate CUDA (if available) before calling `find_package(CUDA)`. If the host compiler is not supported CMake will error out during `enable(CUDA)` Note that we still use (deprecated) FindCUDA later to determine the libraries to link to. The users has the option to either deactivate CUDA by setting `-DCMAKE_DISABLE_FIND_PACKAGE_CUDA=ON` or to use a compiler supported by nvcc (setting `-DCMAKE_CXX_COMPILER=compiler`). Additionally we do not try to activate CUDA the CMake version is < 3.8. Please note that previously CMake would have errored out here anyway since we used the unsupported `enable_language(CUDA)` even in this case. Closes #2363.
2020-02-25 09:54:53 -06:00
target_link_libraries( opmsimulators PUBLIC ${CUDA_cusparse_LIBRARY} )
target_link_libraries( opmsimulators PUBLIC ${CUDA_cublas_LIBRARY} )
2020-01-09 02:32:24 -06:00
endif()
if(OpenCL_FOUND)
target_link_libraries( opmsimulators PUBLIC ${OpenCL_LIBRARIES} )
endif()
2020-12-22 05:57:01 -06:00
2022-10-18 06:59:00 -05:00
if(ROCALUTION_FOUND)
2022-11-02 10:47:30 -05:00
target_include_directories(opmsimulators PUBLIC ${rocalution_INCLUDE_DIR}/rocalution)
2022-10-18 06:59:00 -05:00
endif()
if(VexCL_FOUND)
target_link_libraries( opmsimulators PUBLIC OPM::VexCL::OpenCL )
endif()
2022-11-17 02:27:00 -06:00
if(Damaris_FOUND)
target_link_libraries(opmsimulators PUBLIC damaris)
endif()
install(DIRECTORY doc/man1 DESTINATION ${CMAKE_INSTALL_MANDIR}
FILES_MATCHING PATTERN "*.1")