Merge pull request #5746 from lisajulia/feature/ms-wells-solving

Feature/ms wells - part 2: Solving, straightforward option
This commit is contained in:
Markus Blatt 2024-12-06 09:16:20 +01:00 committed by GitHub
commit 0e22cc1552
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 103 additions and 23 deletions

View File

@ -194,7 +194,7 @@ endfunction()
# - This test class compares the output from a parallel simulation
# to the output from the serial instance of the same model.
function(add_test_compare_parallel_simulation)
set(oneValueArgs CASENAME FILENAME SIMULATOR ABS_TOL REL_TOL DIR MPI_PROCS)
set(oneValueArgs CASENAME FILENAME SIMULATOR ABS_TOL REL_TOL DIR MPI_PROCS ONLY_SMRY)
set(multiValueArgs TEST_ARGS)
cmake_parse_arguments(PARAM "$" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
@ -210,7 +210,22 @@ function(add_test_compare_parallel_simulation)
set(RESULT_PATH ${BASE_RESULT_PATH}/parallel/${PARAM_SIMULATOR}+${PARAM_CASENAME})
set(TEST_ARGS ${OPM_TESTS_ROOT}/${PARAM_DIR}/${PARAM_FILENAME} ${PARAM_TEST_ARGS})
set(DRIVER_ARGS -i ${OPM_TESTS_ROOT}/${PARAM_DIR}
# Handle ONLY_SMRY flag (defaults to 0 if not provided)
if(PARAM_ONLY_SMRY)
if(${PARAM_ONLY_SMRY} EQUAL 1)
set(DRIVER_ARGS -s)
elseif(${PARAM_ONLY_SMRY} EQUAL 0)
set(DRIVER_ARGS "")
else()
message(FATAL_ERROR "ONLY_SMRY must be either 0 or 1.")
endif()
else()
set(DRIVER_ARGS "")
endif()
set(DRIVER_ARGS ${DRIVER_ARGS}
-i ${OPM_TESTS_ROOT}/${PARAM_DIR}
-r ${RESULT_PATH}
-b ${PROJECT_BINARY_DIR}/bin
-f ${PARAM_FILENAME}

View File

@ -322,14 +322,13 @@ void FlowGenericVanguard::init()
if (comm.rank() == 0)
{
std::string message =
std::string("Option --allow-distributed-wells=true is only allowed if model\n")
+ "only has only standard wells. You need to provide option \n"
+ " with --enable-multisegement-wells=false to treat existing \n"
std::string("Option --allow-distributed-wells=true in a model with\n")
+ "multisegment wells. This feature is still experimental. You can\n"
+ "set --enable-multisegment-wells=false to treat the existing \n"
+ "multisegment wells as standard wells.";
OpmLog::error(message);
OpmLog::info(message);
}
comm.barrier();
OPM_THROW(std::invalid_argument, "All wells need to be standard wells!");
}
}
}

View File

@ -887,7 +887,7 @@ namespace Opm {
this->wellState().init(cellPressures, cellTemperatures, this->schedule(), this->wells_ecl_,
this->local_parallel_well_info_, timeStepIdx,
&this->prevWellState(), this->well_perf_data_,
this->summaryState());
this->summaryState(), simulator_.vanguard().enableDistributedWells());
}

View File

@ -152,6 +152,14 @@ apply(const BVector& x, BVector& Ax) const
duneB_.mv(x, Bx);
if (this->pw_info_.communication().size() == 1) {
// We need to communicate here to get the contributions from all segments
this->pw_info_.communication().sum(Bx.data(), Bx.size());
}
// It is ok to do this on each process instead of only on one,
// because the other processes would remain idle while waiting for
// the single process to complete the computation.
// invDBx = duneD^-1 * Bx_
const BVectorWell invDBx = mswellhelpers::applyUMFPack(*duneDSolver_, Bx);
@ -163,6 +171,9 @@ template<class Scalar, int numWellEq, int numEq>
void MultisegmentWellEquations<Scalar,numWellEq,numEq>::
apply(BVector& r) const
{
// It is ok to do this on each process instead of only on one,
// because the other processes would remain idle while waiting for
// the single process to complete the computation.
// invDrw_ = duneD^-1 * resWell_
const BVectorWell invDrw = mswellhelpers::applyUMFPack(*duneDSolver_, resWell_);
// r = r - duneC_^T * invDrw
@ -193,6 +204,9 @@ template<class Scalar, int numWellEq, int numEq>
typename MultisegmentWellEquations<Scalar,numWellEq,numEq>::BVectorWell
MultisegmentWellEquations<Scalar,numWellEq,numEq>::solve() const
{
// It is ok to do this on each process instead of only on one,
// because the other processes would remain idle while waiting for
// the single process to complete the computation.
return mswellhelpers::applyUMFPack(*duneDSolver_, resWell_);
}
@ -200,6 +214,9 @@ template<class Scalar, int numWellEq, int numEq>
typename MultisegmentWellEquations<Scalar,numWellEq,numEq>::BVectorWell
MultisegmentWellEquations<Scalar,numWellEq,numEq>::solve(const BVectorWell& rhs) const
{
// It is ok to do this on each process instead of only on one,
// because the other processes would remain idle while waiting for
// the single process to complete the computation.
return mswellhelpers::applyUMFPack(*duneDSolver_, rhs);
}
@ -207,10 +224,24 @@ template<class Scalar, int numWellEq, int numEq>
void MultisegmentWellEquations<Scalar,numWellEq,numEq>::
recoverSolutionWell(const BVector& x, BVectorWell& xw) const
{
BVectorWell resWell = resWell_;
// resWell = resWell - B * x
duneB_.mmv(x, resWell);
BVectorWell resWell = resWell_;
if (this->pw_info_.communication().size() == 1) {
duneB_.mmv(x, resWell);
} else {
BVectorWell Bx(duneB_.N());
duneB_.mv(x, Bx);
// We need to communicate here to get the contributions from all segments
this->pw_info_.communication().sum(Bx.data(), Bx.size());
resWell -= Bx;
}
// xw = D^-1 * resWell
// It is ok to do this on each process instead of only on one,
// because the other processes would remain idle while waiting for
// the single process to complete the computation.
xw = mswellhelpers::applyUMFPack(*duneDSolver_, resWell);
}

View File

@ -264,11 +264,13 @@ void WellState<Scalar>::init(const std::vector<Scalar>& cellPressures,
const int report_step,
const WellState* prevState,
const std::vector<std::vector<PerforationData<Scalar>>>& well_perf_data,
const SummaryState& summary_state)
const SummaryState& summary_state,
const bool enableDistributedWells)
{
// call init on base class
this->base_init(cellPressures, cellTemperatures, wells_ecl, parallel_well_info,
well_perf_data, summary_state);
this->enableDistributedWells_ = enableDistributedWells;
this->global_well_info = std::make_optional<GlobalWellInfo>(schedule,
report_step,
wells_ecl);
@ -439,7 +441,7 @@ void WellState<Scalar>::resize(const std::vector<Well>& wells_ecl,
const SummaryState& summary_state)
{
const std::vector<Scalar> tmp(numCells, 0.0); // <- UGLY HACK to pass the size
init(tmp, tmp, schedule, wells_ecl, parallel_well_info, 0, nullptr, well_perf_data, summary_state);
init(tmp, tmp, schedule, wells_ecl, parallel_well_info, 0, nullptr, well_perf_data, summary_state, this->enableDistributedWells_);
if (handle_ms_well) {
initWellStateMSWell(wells_ecl, nullptr);
@ -728,8 +730,8 @@ void WellState<Scalar>::initWellStateMSWell(const std::vector<Well>& wells_ecl,
n_activeperf++;
}
}
if (static_cast<int>(ws.perf_data.size()) != n_activeperf)
if (!this->enableDistributedWells_ && static_cast<int>(ws.perf_data.size()) != n_activeperf)
throw std::logic_error("Distributed multi-segment wells cannot be initialized properly yet.");

View File

@ -104,7 +104,8 @@ public:
const int report_step,
const WellState* prevState,
const std::vector<std::vector<PerforationData<Scalar>>>& well_perf_data,
const SummaryState& summary_state);
const SummaryState& summary_state,
const bool enableDistributedWells);
void resize(const std::vector<Well>& wells_ecl,
const std::vector<std::reference_wrapper<ParallelWellInfo<Scalar>>>& parallel_well_info,
@ -353,6 +354,8 @@ public:
}
private:
bool enableDistributedWells_ = false;
bool is_permanently_inactive_well(const std::string& wname) const {
return std::find(this->permanently_inactive_well_names_.begin(), this->permanently_inactive_well_names_.end(), wname) != this->permanently_inactive_well_names_.end();
}

View File

@ -36,6 +36,27 @@ add_test_compare_parallel_simulation(CASENAME spe9_dist_z
REL_TOL ${rel_tol_parallel}
TEST_ARGS --linear-solver-reduction=1e-7 --tolerance-cnv=5e-6 --tolerance-mb=1e-8 --enable-drift-compensation=false)
# A test for distributed multisegment wells. We load distribute only along the z-axis
add_test_compare_parallel_simulation(CASENAME msw-simple
FILENAME MSW-SIMPLE # this file contains one Multisegment well without branches that is distributed across several processes
DIR msw
SIMULATOR flow_distribute_z
ONLY_SMRY 1
ABS_TOL 1e4 # the absolute tolerance is pretty high here, yet in this case, we are only interested in the relative tolerance
REL_TOL 1e-5
MPI_PROCS 4
TEST_ARGS --solver-max-time-step-in-days=10 --allow-distributed-wells=true)
add_test_compare_parallel_simulation(CASENAME msw-3d
FILENAME MSW-3D # this file contains one Multisegment well with branches that is distributed across several processes
DIR msw
SIMULATOR flow_distribute_z
ONLY_SMRY 1
ABS_TOL 1e4 # the absolute tolerance is pretty high here, yet in this case, we are only interested in the relative tolerance
REL_TOL 1e-4
MPI_PROCS 4
TEST_ARGS --allow-distributed-wells=true)
add_test_compare_parallel_simulation(CASENAME spe9group
FILENAME SPE9_CP_GROUP
SIMULATOR flow

View File

@ -18,12 +18,15 @@ then
echo -e "\t\t -e <filename> Simulator binary to use"
echo -e "\tOptional options:"
echo -e "\t\t -n <procs> Number of MPI processes to use"
echo -e "\t\t -s If given, compare only the SMRY file and skip comparison of the UNRST file."
exit 1
fi
MPI_PROCS=4
OPTIND=1
while getopts "i:r:b:f:a:t:c:e:n:" OPT
ONLY_SUMMARY=false
while getopts "i:r:b:f:a:t:c:e:n:s" OPT
do
case "${OPT}" in
i) INPUT_DATA_PATH=${OPTARG} ;;
@ -35,6 +38,7 @@ do
c) COMPARE_ECL_COMMAND=${OPTARG} ;;
e) EXE_NAME=${OPTARG} ;;
n) MPI_PROCS=${OPTARG} ;;
s) ONLY_SUMMARY=true ;;
esac
done
shift $(($OPTIND-1))
@ -61,12 +65,16 @@ then
${COMPARE_ECL_COMMAND} -t SMRY -a -R ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL}
fi
echo "=== Executing comparison for restart file ==="
${COMPARE_ECL_COMMAND} -l -t UNRST ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL}
if [ $? -ne 0 ]
then
ecode=1
${COMPARE_ECL_COMMAND} -a -l -t UNRST ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL}
if [ "$ONLY_SUMMARY" = false ]; then
echo "=== Executing comparison for restart file ==="
${COMPARE_ECL_COMMAND} -l -t UNRST ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL}
if [ $? -ne 0 ]
then
ecode=1
${COMPARE_ECL_COMMAND} -a -l -t UNRST ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL}
fi
else
echo "=== Skipping comparison for restart file due to -s flag ==="
fi
exit $ecode

View File

@ -179,7 +179,8 @@ namespace {
state.init(cpress, ctemp, setup.sched,
wells, ppinfos,
timeStep, nullptr, setup.well_perf_data, setup.st);
timeStep, nullptr, setup.well_perf_data, setup.st,
false /*enableDistributedWells*/);
state.initWellStateMSWell(setup.sched.getWells(timeStep),
nullptr);