Merge pull request #5488 from lisajulia/fix/ACTIONX-COMPDAT

Fix/actionx compdat
This commit is contained in:
Markus Blatt
2024-07-30 17:18:04 +02:00
committed by GitHub
8 changed files with 119 additions and 63 deletions

View File

@@ -95,13 +95,29 @@ function(add_test_compareECLFiles)
TESTNAME ${PARAM_CASENAME})
endfunction()
###########################################################################
# TEST: compareSeparateECLFiles
###########################################################################
# Input:
# - casename: basename (no extension)
# - filename1 (no extension)
# - filename2 (no extension)
#
# Details:
# - This test class compares two separate simulations
function(add_test_compareSeparateECLFiles)
set(oneValueArgs CASENAME FILENAME1 FILENAME2 DIR1 DIR2 SIMULATOR ABS_TOL REL_TOL IGNORE_EXTRA_KW DIR_PREFIX)
set(oneValueArgs CASENAME FILENAME1 FILENAME2 DIR1 DIR2 SIMULATOR ABS_TOL REL_TOL IGNORE_EXTRA_KW DIR_PREFIX MPI_PROCS)
set(multiValueArgs TEST_ARGS)
cmake_parse_arguments(PARAM "$" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
if(NOT PARAM_PREFIX)
set(PARAM_PREFIX compareSeparateECLFiles)
endif()
if(PARAM_MPI_PROCS)
set(MPI_PROCS ${PARAM_MPI_PROCS})
else()
set(MPI_PROCS 1)
endif()
set(RESULT_PATH ${BASE_RESULT_PATH}${PARAM_DIR_PREFIX}/${PARAM_SIMULATOR}+${PARAM_CASENAME})
set(TEST_ARGS ${PARAM_TEST_ARGS})
set(DRIVER_ARGS -i ${OPM_TESTS_ROOT}/${PARAM_DIR1}
@@ -112,7 +128,8 @@ function(add_test_compareSeparateECLFiles)
-b ${PROJECT_BINARY_DIR}/bin
-a ${PARAM_ABS_TOL}
-t ${PARAM_REL_TOL}
-c ${COMPARE_ECL_COMMAND})
-c ${COMPARE_ECL_COMMAND}
-n ${MPI_PROCS})
if(PARAM_IGNORE_EXTRA_KW)
list(APPEND DRIVER_ARGS -y ${PARAM_IGNORE_EXTRA_KW})
endif()
@@ -124,7 +141,8 @@ function(add_test_compareSeparateECLFiles)
DIRNAME ${PARAM_DIR}
FILENAME ${PARAM_FILENAME}
SIMULATOR ${PARAM_SIMULATOR}
TESTNAME ${PARAM_CASENAME})
TESTNAME ${PARAM_CASENAME}
PROCESSORS ${MPI_PROCS})
endfunction()
###########################################################################

View File

@@ -196,13 +196,14 @@ doLoadBalance_(const Dune::EdgeWeightMethod edgeWeightsMethod,
const auto wells = ((mpiSize > 1) || partitionJacobiBlocks)
? schedule.getWellsatEnd()
: std::vector<Well>{};
const auto& possibleFutureConnections = schedule.getPossibleFutureConnections();
// Distribute the grid and switch to the distributed view.
if (mpiSize > 1) {
this->distributeGrid(edgeWeightsMethod, ownersFirst, partitionMethod,
serialPartitioning, enableDistributedWells,
imbalanceTol, loadBalancerSet != 0,
faceTrans, wells,
possibleFutureConnections,
eclState1, parallelWells);
}
@@ -215,7 +216,7 @@ doLoadBalance_(const Dune::EdgeWeightMethod edgeWeightsMethod,
#if HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA
if (partitionJacobiBlocks) {
this->cell_part_ = this->grid_->
zoltanPartitionWithoutScatter(&wells, faceTrans.data(),
zoltanPartitionWithoutScatter(&wells, &possibleFutureConnections, faceTrans.data(),
numJacobiBlocks,
imbalanceTol);
}
@@ -282,17 +283,18 @@ extractFaceTrans(const GridView& gridView) const
template <class ElementMapper, class GridView, class Scalar>
void
GenericCpGridVanguard<ElementMapper, GridView, Scalar>::
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
EclipseState& eclState1,
FlowGenericVanguard::ParallelWellStruct& parallelWells)
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
EclipseState& eclState1,
FlowGenericVanguard::ParallelWellStruct& parallelWells)
{
if (auto* eclState = dynamic_cast<ParallelEclipseState*>(&eclState1);
eclState != nullptr)
@@ -300,7 +302,7 @@ distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
this->distributeGrid(edgeWeightsMethod, ownersFirst, partitionMethod,
serialPartitioning, enableDistributedWells,
imbalanceTol, loadBalancerSet, faceTrans,
wells, eclState, parallelWells);
wells, possibleFutureConnections, eclState, parallelWells);
}
else {
const auto message = std::string {
@@ -319,17 +321,18 @@ distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
template <class ElementMapper, class GridView, class Scalar>
void
GenericCpGridVanguard<ElementMapper, GridView, Scalar>::
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
ParallelEclipseState* eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells)
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
ParallelEclipseState* eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells)
{
OPM_TIMEBLOCK(gridDistribute);
const auto isIORank = this->grid_->comm().rank() == 0;
@@ -347,13 +350,14 @@ distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
: std::vector<int>{};
//For this case, simple partitioning is selected automatically
parallelWells =
std::get<1>(this->grid_->loadBalance(handle, parts, &wells, ownersFirst,
std::get<1>(this->grid_->loadBalance(handle, parts, &wells, &possibleFutureConnections, ownersFirst,
addCornerCells, overlapLayers));
}
else {
parallelWells =
std::get<1>(this->grid_->loadBalance(handle, edgeWeightsMethod,
&wells, serialPartitioning,
&wells, &possibleFutureConnections,
serialPartitioning,
faceTrans.data(), ownersFirst,
addCornerCells, overlapLayers,
partitionMethod, imbalanceTol,

View File

@@ -165,29 +165,31 @@ protected:
private:
std::vector<double> extractFaceTrans(const GridView& gridView) const;
void distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
EclipseState& eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells);
void distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
EclipseState& eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells);
void distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
ParallelEclipseState* eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells);
void distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
ParallelEclipseState* eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells);
protected:
virtual const std::string& zoltanParams() const = 0;

View File

@@ -78,12 +78,14 @@ void BdaSolverInfo<Matrix,Vector>::
prepare(const Grid& grid,
const Dune::CartesianIndexMapper<Grid>& cartMapper,
const std::vector<Well>& wellsForConn,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
const std::vector<int>& cellPartition,
const std::size_t nonzeroes,
const bool useWellConn)
{
if (numJacobiBlocks_ > 1) {
detail::setWellConnections(grid, cartMapper, wellsForConn,
possibleFutureConnections,
useWellConn,
wellConnectionsGraph_,
numJacobiBlocks_);
@@ -239,12 +241,13 @@ using BM = Dune::BCRSMatrix<MatrixBlock<Scalar,Dim,Dim>>;
template<class Scalar, int Dim>
using BV = Dune::BlockVector<Dune::FieldVector<Scalar,Dim>>;
#define INSTANTIATE_GRID(T, Dim, Grid) \
template void BdaSolverInfo<BM<T,Dim>,BV<T,Dim>>:: \
prepare(const Grid&, \
const Dune::CartesianIndexMapper<Grid>&, \
const std::vector<Well>&, \
const std::vector<int>&, \
#define INSTANTIATE_GRID(T, Dim, Grid) \
template void BdaSolverInfo<BM<T,Dim>,BV<T,Dim>>:: \
prepare(const Grid&, \
const Dune::CartesianIndexMapper<Grid>&, \
const std::vector<Well>&, \
const std::unordered_map<std::string, std::set<std::array<int,3>>>&, \
const std::vector<int>&, \
const std::size_t, const bool);
using PolyHedralGrid3D = Dune::PolyhedralGrid<3, 3>;
#if HAVE_DUNE_ALUGRID

View File

@@ -60,6 +60,7 @@ struct BdaSolverInfo
void prepare(const Grid& grid,
const Dune::CartesianIndexMapper<Grid>& cartMapper,
const std::vector<Well>& wellsForConn,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
const std::vector<int>& cellPartition,
const std::size_t nonzeroes,
const bool useWellConn);
@@ -207,6 +208,7 @@ public:
bdaBridge_->prepare(this->simulator_.vanguard().grid(),
this->simulator_.vanguard().cartesianIndexMapper(),
this->simulator_.vanguard().schedule().getWellsatEnd(),
this->simulator_.vanguard().schedule().getPossibleFutureConnections(),
this->simulator_.vanguard().cellPartition(),
this->getMatrix().nonzeroes(), this->useWellConn_);
}

View File

@@ -46,7 +46,7 @@ namespace detail
/// \param useWellConn Boolean that is true when UseWellContribusion is true
/// \param wellGraph Cell IDs of well cells stored in a graph.
template<class Grid, class CartMapper, class W>
void setWellConnections(const Grid& grid, const CartMapper& cartMapper, const W& wells, bool useWellConn, std::vector<std::set<int>>& wellGraph, int numJacobiBlocks)
void setWellConnections(const Grid& grid, const CartMapper& cartMapper, const W& wells, const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections, bool useWellConn, std::vector<std::set<int>>& wellGraph, int numJacobiBlocks)
{
if ( grid.comm().size() > 1 || numJacobiBlocks > 1)
{
@@ -62,7 +62,7 @@ namespace detail
cart[ cartMapper.cartesianIndex( i ) ] = i;
Dune::cpgrid::WellConnections well_indices;
well_indices.init(wells, cpgdim, cart);
well_indices.init(wells, &possibleFutureConnections, cpgdim, cart);
for (auto& well : well_indices)
{

View File

@@ -212,3 +212,27 @@ add_test_compare_parallel_simulation(CASENAME rxft
REL_TOL 1.0e-3
DIR rxft_smry
TEST_ARGS --enable-tuning=true --linear-solver-reduction=1e-7 --tolerance-cnv=5e-6 --tolerance-mb=1e-8 --enable-drift-compensation=false)
opm_set_test_driver(${PROJECT_SOURCE_DIR}/tests/run-comparison.sh "")
add_test_compareSeparateECLFiles(CASENAME actionx_compdat_1_proc
DIR1 actionx
FILENAME1 COMPDAT_SHORT
DIR2 actionx
FILENAME2 ACTIONX_COMPDAT_SHORT
SIMULATOR flow
ABS_TOL ${abs_tol}
REL_TOL ${rel_tol}
IGNORE_EXTRA_KW BOTH
MPI_PROCS 1)
add_test_compareSeparateECLFiles(CASENAME actionx_compdat_8_procs
DIR1 actionx
FILENAME1 COMPDAT_SHORT
DIR2 actionx
FILENAME2 ACTIONX_COMPDAT_SHORT
SIMULATOR flow
ABS_TOL ${abs_tol}
REL_TOL ${rel_tol}
IGNORE_EXTRA_KW BOTH
MPI_PROCS 8)

View File

@@ -17,12 +17,14 @@ then
echo -e "\t\t -t <tol> Relative tolerance in comparison"
echo -e "\t\t -c <path> Path to comparison tool"
echo -e "\t\t -e <filename> Simulator binary to use"
echo -e "\t\t -n <procs> Number of MPI processes to use"
exit 1
fi
RESTART_STEP=""
MPI_PROCS=1
OPTIND=1
while getopts "i:j:f:g:r:b:a:t:c:e:y:" OPT
while getopts "i:j:f:g:r:b:a:t:c:e:y:n:" OPT
do
case "${OPT}" in
i) INPUT_DATA_PATH1=${OPTARG} ;;
@@ -36,6 +38,7 @@ do
c) COMPARE_ECL_COMMAND=${OPTARG} ;;
e) EXE_NAME=${OPTARG} ;;
y) IGNORE_EXTRA_KW=${OPTARG} ;;
n) MPI_PROCS=${OPTARG} ;;
esac
done
shift $(($OPTIND-1))
@@ -43,9 +46,9 @@ TEST_ARGS="$@"
mkdir -p ${RESULT_PATH}
cd ${RESULT_PATH}
${BINPATH}/${EXE_NAME} ${INPUT_DATA_PATH1}/${FILENAME1} ${TEST_ARGS} --output-dir=${RESULT_PATH}
mpirun -np ${MPI_PROCS} ${BINPATH}/${EXE_NAME} ${INPUT_DATA_PATH1}/${FILENAME1} ${TEST_ARGS} --output-dir=${RESULT_PATH}
test $? -eq 0 || exit 1
${BINPATH}/${EXE_NAME} ${INPUT_DATA_PATH2}/${FILENAME2} ${TEST_ARGS} --output-dir=${RESULT_PATH}
mpirun -np ${MPI_PROCS} ${BINPATH}/${EXE_NAME} ${INPUT_DATA_PATH2}/${FILENAME2} ${TEST_ARGS} --output-dir=${RESULT_PATH}
test $? -eq 0 || exit 1
cd ..