mirror of
https://github.com/OPM/opm-simulators.git
synced 2025-02-25 18:55:30 -06:00
Merge pull request #5746 from lisajulia/feature/ms-wells-solving
Feature/ms wells - part 2: Solving, straightforward option
This commit is contained in:
commit
0e22cc1552
@ -194,7 +194,7 @@ endfunction()
|
|||||||
# - This test class compares the output from a parallel simulation
|
# - This test class compares the output from a parallel simulation
|
||||||
# to the output from the serial instance of the same model.
|
# to the output from the serial instance of the same model.
|
||||||
function(add_test_compare_parallel_simulation)
|
function(add_test_compare_parallel_simulation)
|
||||||
set(oneValueArgs CASENAME FILENAME SIMULATOR ABS_TOL REL_TOL DIR MPI_PROCS)
|
set(oneValueArgs CASENAME FILENAME SIMULATOR ABS_TOL REL_TOL DIR MPI_PROCS ONLY_SMRY)
|
||||||
set(multiValueArgs TEST_ARGS)
|
set(multiValueArgs TEST_ARGS)
|
||||||
cmake_parse_arguments(PARAM "$" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
|
cmake_parse_arguments(PARAM "$" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} )
|
||||||
|
|
||||||
@ -210,7 +210,22 @@ function(add_test_compare_parallel_simulation)
|
|||||||
|
|
||||||
set(RESULT_PATH ${BASE_RESULT_PATH}/parallel/${PARAM_SIMULATOR}+${PARAM_CASENAME})
|
set(RESULT_PATH ${BASE_RESULT_PATH}/parallel/${PARAM_SIMULATOR}+${PARAM_CASENAME})
|
||||||
set(TEST_ARGS ${OPM_TESTS_ROOT}/${PARAM_DIR}/${PARAM_FILENAME} ${PARAM_TEST_ARGS})
|
set(TEST_ARGS ${OPM_TESTS_ROOT}/${PARAM_DIR}/${PARAM_FILENAME} ${PARAM_TEST_ARGS})
|
||||||
set(DRIVER_ARGS -i ${OPM_TESTS_ROOT}/${PARAM_DIR}
|
|
||||||
|
# Handle ONLY_SMRY flag (defaults to 0 if not provided)
|
||||||
|
if(PARAM_ONLY_SMRY)
|
||||||
|
if(${PARAM_ONLY_SMRY} EQUAL 1)
|
||||||
|
set(DRIVER_ARGS -s)
|
||||||
|
elseif(${PARAM_ONLY_SMRY} EQUAL 0)
|
||||||
|
set(DRIVER_ARGS "")
|
||||||
|
else()
|
||||||
|
message(FATAL_ERROR "ONLY_SMRY must be either 0 or 1.")
|
||||||
|
endif()
|
||||||
|
else()
|
||||||
|
set(DRIVER_ARGS "")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set(DRIVER_ARGS ${DRIVER_ARGS}
|
||||||
|
-i ${OPM_TESTS_ROOT}/${PARAM_DIR}
|
||||||
-r ${RESULT_PATH}
|
-r ${RESULT_PATH}
|
||||||
-b ${PROJECT_BINARY_DIR}/bin
|
-b ${PROJECT_BINARY_DIR}/bin
|
||||||
-f ${PARAM_FILENAME}
|
-f ${PARAM_FILENAME}
|
||||||
|
@ -322,14 +322,13 @@ void FlowGenericVanguard::init()
|
|||||||
if (comm.rank() == 0)
|
if (comm.rank() == 0)
|
||||||
{
|
{
|
||||||
std::string message =
|
std::string message =
|
||||||
std::string("Option --allow-distributed-wells=true is only allowed if model\n")
|
std::string("Option --allow-distributed-wells=true in a model with\n")
|
||||||
+ "only has only standard wells. You need to provide option \n"
|
+ "multisegment wells. This feature is still experimental. You can\n"
|
||||||
+ " with --enable-multisegement-wells=false to treat existing \n"
|
+ "set --enable-multisegment-wells=false to treat the existing \n"
|
||||||
+ "multisegment wells as standard wells.";
|
+ "multisegment wells as standard wells.";
|
||||||
OpmLog::error(message);
|
OpmLog::info(message);
|
||||||
}
|
}
|
||||||
comm.barrier();
|
comm.barrier();
|
||||||
OPM_THROW(std::invalid_argument, "All wells need to be standard wells!");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -887,7 +887,7 @@ namespace Opm {
|
|||||||
this->wellState().init(cellPressures, cellTemperatures, this->schedule(), this->wells_ecl_,
|
this->wellState().init(cellPressures, cellTemperatures, this->schedule(), this->wells_ecl_,
|
||||||
this->local_parallel_well_info_, timeStepIdx,
|
this->local_parallel_well_info_, timeStepIdx,
|
||||||
&this->prevWellState(), this->well_perf_data_,
|
&this->prevWellState(), this->well_perf_data_,
|
||||||
this->summaryState());
|
this->summaryState(), simulator_.vanguard().enableDistributedWells());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -152,6 +152,14 @@ apply(const BVector& x, BVector& Ax) const
|
|||||||
|
|
||||||
duneB_.mv(x, Bx);
|
duneB_.mv(x, Bx);
|
||||||
|
|
||||||
|
if (this->pw_info_.communication().size() == 1) {
|
||||||
|
// We need to communicate here to get the contributions from all segments
|
||||||
|
this->pw_info_.communication().sum(Bx.data(), Bx.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
// It is ok to do this on each process instead of only on one,
|
||||||
|
// because the other processes would remain idle while waiting for
|
||||||
|
// the single process to complete the computation.
|
||||||
// invDBx = duneD^-1 * Bx_
|
// invDBx = duneD^-1 * Bx_
|
||||||
const BVectorWell invDBx = mswellhelpers::applyUMFPack(*duneDSolver_, Bx);
|
const BVectorWell invDBx = mswellhelpers::applyUMFPack(*duneDSolver_, Bx);
|
||||||
|
|
||||||
@ -163,6 +171,9 @@ template<class Scalar, int numWellEq, int numEq>
|
|||||||
void MultisegmentWellEquations<Scalar,numWellEq,numEq>::
|
void MultisegmentWellEquations<Scalar,numWellEq,numEq>::
|
||||||
apply(BVector& r) const
|
apply(BVector& r) const
|
||||||
{
|
{
|
||||||
|
// It is ok to do this on each process instead of only on one,
|
||||||
|
// because the other processes would remain idle while waiting for
|
||||||
|
// the single process to complete the computation.
|
||||||
// invDrw_ = duneD^-1 * resWell_
|
// invDrw_ = duneD^-1 * resWell_
|
||||||
const BVectorWell invDrw = mswellhelpers::applyUMFPack(*duneDSolver_, resWell_);
|
const BVectorWell invDrw = mswellhelpers::applyUMFPack(*duneDSolver_, resWell_);
|
||||||
// r = r - duneC_^T * invDrw
|
// r = r - duneC_^T * invDrw
|
||||||
@ -193,6 +204,9 @@ template<class Scalar, int numWellEq, int numEq>
|
|||||||
typename MultisegmentWellEquations<Scalar,numWellEq,numEq>::BVectorWell
|
typename MultisegmentWellEquations<Scalar,numWellEq,numEq>::BVectorWell
|
||||||
MultisegmentWellEquations<Scalar,numWellEq,numEq>::solve() const
|
MultisegmentWellEquations<Scalar,numWellEq,numEq>::solve() const
|
||||||
{
|
{
|
||||||
|
// It is ok to do this on each process instead of only on one,
|
||||||
|
// because the other processes would remain idle while waiting for
|
||||||
|
// the single process to complete the computation.
|
||||||
return mswellhelpers::applyUMFPack(*duneDSolver_, resWell_);
|
return mswellhelpers::applyUMFPack(*duneDSolver_, resWell_);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -200,6 +214,9 @@ template<class Scalar, int numWellEq, int numEq>
|
|||||||
typename MultisegmentWellEquations<Scalar,numWellEq,numEq>::BVectorWell
|
typename MultisegmentWellEquations<Scalar,numWellEq,numEq>::BVectorWell
|
||||||
MultisegmentWellEquations<Scalar,numWellEq,numEq>::solve(const BVectorWell& rhs) const
|
MultisegmentWellEquations<Scalar,numWellEq,numEq>::solve(const BVectorWell& rhs) const
|
||||||
{
|
{
|
||||||
|
// It is ok to do this on each process instead of only on one,
|
||||||
|
// because the other processes would remain idle while waiting for
|
||||||
|
// the single process to complete the computation.
|
||||||
return mswellhelpers::applyUMFPack(*duneDSolver_, rhs);
|
return mswellhelpers::applyUMFPack(*duneDSolver_, rhs);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -207,10 +224,24 @@ template<class Scalar, int numWellEq, int numEq>
|
|||||||
void MultisegmentWellEquations<Scalar,numWellEq,numEq>::
|
void MultisegmentWellEquations<Scalar,numWellEq,numEq>::
|
||||||
recoverSolutionWell(const BVector& x, BVectorWell& xw) const
|
recoverSolutionWell(const BVector& x, BVectorWell& xw) const
|
||||||
{
|
{
|
||||||
BVectorWell resWell = resWell_;
|
|
||||||
// resWell = resWell - B * x
|
// resWell = resWell - B * x
|
||||||
duneB_.mmv(x, resWell);
|
BVectorWell resWell = resWell_;
|
||||||
|
if (this->pw_info_.communication().size() == 1) {
|
||||||
|
duneB_.mmv(x, resWell);
|
||||||
|
} else {
|
||||||
|
BVectorWell Bx(duneB_.N());
|
||||||
|
duneB_.mv(x, Bx);
|
||||||
|
|
||||||
|
// We need to communicate here to get the contributions from all segments
|
||||||
|
this->pw_info_.communication().sum(Bx.data(), Bx.size());
|
||||||
|
|
||||||
|
resWell -= Bx;
|
||||||
|
}
|
||||||
|
|
||||||
// xw = D^-1 * resWell
|
// xw = D^-1 * resWell
|
||||||
|
// It is ok to do this on each process instead of only on one,
|
||||||
|
// because the other processes would remain idle while waiting for
|
||||||
|
// the single process to complete the computation.
|
||||||
xw = mswellhelpers::applyUMFPack(*duneDSolver_, resWell);
|
xw = mswellhelpers::applyUMFPack(*duneDSolver_, resWell);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -264,11 +264,13 @@ void WellState<Scalar>::init(const std::vector<Scalar>& cellPressures,
|
|||||||
const int report_step,
|
const int report_step,
|
||||||
const WellState* prevState,
|
const WellState* prevState,
|
||||||
const std::vector<std::vector<PerforationData<Scalar>>>& well_perf_data,
|
const std::vector<std::vector<PerforationData<Scalar>>>& well_perf_data,
|
||||||
const SummaryState& summary_state)
|
const SummaryState& summary_state,
|
||||||
|
const bool enableDistributedWells)
|
||||||
{
|
{
|
||||||
// call init on base class
|
// call init on base class
|
||||||
this->base_init(cellPressures, cellTemperatures, wells_ecl, parallel_well_info,
|
this->base_init(cellPressures, cellTemperatures, wells_ecl, parallel_well_info,
|
||||||
well_perf_data, summary_state);
|
well_perf_data, summary_state);
|
||||||
|
this->enableDistributedWells_ = enableDistributedWells;
|
||||||
this->global_well_info = std::make_optional<GlobalWellInfo>(schedule,
|
this->global_well_info = std::make_optional<GlobalWellInfo>(schedule,
|
||||||
report_step,
|
report_step,
|
||||||
wells_ecl);
|
wells_ecl);
|
||||||
@ -439,7 +441,7 @@ void WellState<Scalar>::resize(const std::vector<Well>& wells_ecl,
|
|||||||
const SummaryState& summary_state)
|
const SummaryState& summary_state)
|
||||||
{
|
{
|
||||||
const std::vector<Scalar> tmp(numCells, 0.0); // <- UGLY HACK to pass the size
|
const std::vector<Scalar> tmp(numCells, 0.0); // <- UGLY HACK to pass the size
|
||||||
init(tmp, tmp, schedule, wells_ecl, parallel_well_info, 0, nullptr, well_perf_data, summary_state);
|
init(tmp, tmp, schedule, wells_ecl, parallel_well_info, 0, nullptr, well_perf_data, summary_state, this->enableDistributedWells_);
|
||||||
|
|
||||||
if (handle_ms_well) {
|
if (handle_ms_well) {
|
||||||
initWellStateMSWell(wells_ecl, nullptr);
|
initWellStateMSWell(wells_ecl, nullptr);
|
||||||
@ -728,8 +730,8 @@ void WellState<Scalar>::initWellStateMSWell(const std::vector<Well>& wells_ecl,
|
|||||||
n_activeperf++;
|
n_activeperf++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (static_cast<int>(ws.perf_data.size()) != n_activeperf)
|
if (!this->enableDistributedWells_ && static_cast<int>(ws.perf_data.size()) != n_activeperf)
|
||||||
throw std::logic_error("Distributed multi-segment wells cannot be initialized properly yet.");
|
throw std::logic_error("Distributed multi-segment wells cannot be initialized properly yet.");
|
||||||
|
|
||||||
|
|
||||||
|
@ -104,7 +104,8 @@ public:
|
|||||||
const int report_step,
|
const int report_step,
|
||||||
const WellState* prevState,
|
const WellState* prevState,
|
||||||
const std::vector<std::vector<PerforationData<Scalar>>>& well_perf_data,
|
const std::vector<std::vector<PerforationData<Scalar>>>& well_perf_data,
|
||||||
const SummaryState& summary_state);
|
const SummaryState& summary_state,
|
||||||
|
const bool enableDistributedWells);
|
||||||
|
|
||||||
void resize(const std::vector<Well>& wells_ecl,
|
void resize(const std::vector<Well>& wells_ecl,
|
||||||
const std::vector<std::reference_wrapper<ParallelWellInfo<Scalar>>>& parallel_well_info,
|
const std::vector<std::reference_wrapper<ParallelWellInfo<Scalar>>>& parallel_well_info,
|
||||||
@ -353,6 +354,8 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
bool enableDistributedWells_ = false;
|
||||||
|
|
||||||
bool is_permanently_inactive_well(const std::string& wname) const {
|
bool is_permanently_inactive_well(const std::string& wname) const {
|
||||||
return std::find(this->permanently_inactive_well_names_.begin(), this->permanently_inactive_well_names_.end(), wname) != this->permanently_inactive_well_names_.end();
|
return std::find(this->permanently_inactive_well_names_.begin(), this->permanently_inactive_well_names_.end(), wname) != this->permanently_inactive_well_names_.end();
|
||||||
}
|
}
|
||||||
|
@ -36,6 +36,27 @@ add_test_compare_parallel_simulation(CASENAME spe9_dist_z
|
|||||||
REL_TOL ${rel_tol_parallel}
|
REL_TOL ${rel_tol_parallel}
|
||||||
TEST_ARGS --linear-solver-reduction=1e-7 --tolerance-cnv=5e-6 --tolerance-mb=1e-8 --enable-drift-compensation=false)
|
TEST_ARGS --linear-solver-reduction=1e-7 --tolerance-cnv=5e-6 --tolerance-mb=1e-8 --enable-drift-compensation=false)
|
||||||
|
|
||||||
|
# A test for distributed multisegment wells. We load distribute only along the z-axis
|
||||||
|
add_test_compare_parallel_simulation(CASENAME msw-simple
|
||||||
|
FILENAME MSW-SIMPLE # this file contains one Multisegment well without branches that is distributed across several processes
|
||||||
|
DIR msw
|
||||||
|
SIMULATOR flow_distribute_z
|
||||||
|
ONLY_SMRY 1
|
||||||
|
ABS_TOL 1e4 # the absolute tolerance is pretty high here, yet in this case, we are only interested in the relative tolerance
|
||||||
|
REL_TOL 1e-5
|
||||||
|
MPI_PROCS 4
|
||||||
|
TEST_ARGS --solver-max-time-step-in-days=10 --allow-distributed-wells=true)
|
||||||
|
|
||||||
|
add_test_compare_parallel_simulation(CASENAME msw-3d
|
||||||
|
FILENAME MSW-3D # this file contains one Multisegment well with branches that is distributed across several processes
|
||||||
|
DIR msw
|
||||||
|
SIMULATOR flow_distribute_z
|
||||||
|
ONLY_SMRY 1
|
||||||
|
ABS_TOL 1e4 # the absolute tolerance is pretty high here, yet in this case, we are only interested in the relative tolerance
|
||||||
|
REL_TOL 1e-4
|
||||||
|
MPI_PROCS 4
|
||||||
|
TEST_ARGS --allow-distributed-wells=true)
|
||||||
|
|
||||||
add_test_compare_parallel_simulation(CASENAME spe9group
|
add_test_compare_parallel_simulation(CASENAME spe9group
|
||||||
FILENAME SPE9_CP_GROUP
|
FILENAME SPE9_CP_GROUP
|
||||||
SIMULATOR flow
|
SIMULATOR flow
|
||||||
|
@ -18,12 +18,15 @@ then
|
|||||||
echo -e "\t\t -e <filename> Simulator binary to use"
|
echo -e "\t\t -e <filename> Simulator binary to use"
|
||||||
echo -e "\tOptional options:"
|
echo -e "\tOptional options:"
|
||||||
echo -e "\t\t -n <procs> Number of MPI processes to use"
|
echo -e "\t\t -n <procs> Number of MPI processes to use"
|
||||||
|
echo -e "\t\t -s If given, compare only the SMRY file and skip comparison of the UNRST file."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
MPI_PROCS=4
|
MPI_PROCS=4
|
||||||
OPTIND=1
|
OPTIND=1
|
||||||
while getopts "i:r:b:f:a:t:c:e:n:" OPT
|
ONLY_SUMMARY=false
|
||||||
|
|
||||||
|
while getopts "i:r:b:f:a:t:c:e:n:s" OPT
|
||||||
do
|
do
|
||||||
case "${OPT}" in
|
case "${OPT}" in
|
||||||
i) INPUT_DATA_PATH=${OPTARG} ;;
|
i) INPUT_DATA_PATH=${OPTARG} ;;
|
||||||
@ -35,6 +38,7 @@ do
|
|||||||
c) COMPARE_ECL_COMMAND=${OPTARG} ;;
|
c) COMPARE_ECL_COMMAND=${OPTARG} ;;
|
||||||
e) EXE_NAME=${OPTARG} ;;
|
e) EXE_NAME=${OPTARG} ;;
|
||||||
n) MPI_PROCS=${OPTARG} ;;
|
n) MPI_PROCS=${OPTARG} ;;
|
||||||
|
s) ONLY_SUMMARY=true ;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
shift $(($OPTIND-1))
|
shift $(($OPTIND-1))
|
||||||
@ -61,12 +65,16 @@ then
|
|||||||
${COMPARE_ECL_COMMAND} -t SMRY -a -R ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL}
|
${COMPARE_ECL_COMMAND} -t SMRY -a -R ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "=== Executing comparison for restart file ==="
|
if [ "$ONLY_SUMMARY" = false ]; then
|
||||||
${COMPARE_ECL_COMMAND} -l -t UNRST ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL}
|
echo "=== Executing comparison for restart file ==="
|
||||||
if [ $? -ne 0 ]
|
${COMPARE_ECL_COMMAND} -l -t UNRST ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL}
|
||||||
then
|
if [ $? -ne 0 ]
|
||||||
ecode=1
|
then
|
||||||
${COMPARE_ECL_COMMAND} -a -l -t UNRST ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL}
|
ecode=1
|
||||||
|
${COMPARE_ECL_COMMAND} -a -l -t UNRST ${RESULT_PATH}/${FILENAME} ${RESULT_PATH}/mpi/${FILENAME} ${ABS_TOL} ${REL_TOL}
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "=== Skipping comparison for restart file due to -s flag ==="
|
||||||
fi
|
fi
|
||||||
|
|
||||||
exit $ecode
|
exit $ecode
|
||||||
|
@ -179,7 +179,8 @@ namespace {
|
|||||||
|
|
||||||
state.init(cpress, ctemp, setup.sched,
|
state.init(cpress, ctemp, setup.sched,
|
||||||
wells, ppinfos,
|
wells, ppinfos,
|
||||||
timeStep, nullptr, setup.well_perf_data, setup.st);
|
timeStep, nullptr, setup.well_perf_data, setup.st,
|
||||||
|
false /*enableDistributedWells*/);
|
||||||
|
|
||||||
state.initWellStateMSWell(setup.sched.getWells(timeStep),
|
state.initWellStateMSWell(setup.sched.getWells(timeStep),
|
||||||
nullptr);
|
nullptr);
|
||||||
|
Loading…
Reference in New Issue
Block a user