Merge pull request #2947 from blattms/fix-parallel-well-red

Prepares for apply distributed standard wells.
This commit is contained in:
Atgeirr Flø Rasmussen 2020-12-04 20:32:30 +01:00 committed by GitHub
commit 83a6c2abae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 1094 additions and 18 deletions

View File

@ -40,6 +40,7 @@ list (APPEND MAIN_SOURCE_FILES
opm/simulators/utils/DeferredLogger.cpp
opm/simulators/utils/gatherDeferredLogger.cpp
opm/simulators/utils/ParallelRestart.cpp
opm/simulators/wells/ParallelWellInfo.cpp
opm/simulators/wells/VFPProdProperties.cpp
opm/simulators/wells/VFPInjProperties.cpp
opm/simulators/wells/WellGroupHelpers.cpp
@ -90,6 +91,7 @@ list (APPEND TEST_SOURCE_FILES
tests/test_norne_pvt.cpp
tests/test_wellprodindexcalculator.cpp
tests/test_wellstatefullyimplicitblackoil.cpp
tests/test_parallelwellinfo.cpp
)
if(MPI_FOUND)
@ -235,6 +237,7 @@ list (APPEND PUBLIC_HEADER_FILES
opm/simulators/wells/MSWellHelpers.hpp
opm/simulators/wells/BlackoilWellModel.hpp
opm/simulators/wells/BlackoilWellModel_impl.hpp
opm/simulators/wells/ParallelWellInfo.hpp
)
list (APPEND EXAMPLE_SOURCE_FILES

View File

@ -416,6 +416,16 @@ public:
int outputInterval = EWOMS_GET_PARAM(TypeTag, int, EclOutputInterval);
if (outputInterval >= 0)
schedule().restart().overrideRestartWriteInterval(outputInterval);
// Initialize parallelWells with all local wells
const auto& schedule_wells = schedule().getWellsatEnd();
parallelWells_.reserve(schedule_wells.size());
for (const auto& well: schedule_wells)
{
parallelWells_.emplace_back(well.name(), true);
}
std::sort(parallelWells_.begin(), parallelWells_.end());
}
/*!

View File

@ -61,6 +61,7 @@
#include <opm/simulators/wells/MultisegmentWell.hpp>
#include <opm/simulators/wells/WellGroupHelpers.hpp>
#include <opm/simulators/wells/WellProdIndexCalculator.hpp>
#include <opm/simulators/wells/ParallelWellInfo.hpp>
#include <opm/simulators/timestepping/gatherConvergenceReport.hpp>
#include <dune/common/fmatrix.hh>
#include <dune/istl/bcrsmatrix.hh>
@ -278,6 +279,9 @@ namespace Opm {
std::vector< std::vector<PerforationData> > well_perf_data_;
std::vector< WellProdIndexCalculator > prod_index_calc_;
std::vector< ParallelWellInfo > parallel_well_info_;
std::vector< ParallelWellInfo* > local_parallel_well_info_;
bool wells_active_;
// a vector of all the wells.
@ -359,6 +363,11 @@ namespace Opm {
std::vector< Well > getLocalNonshutWells(const int timeStepIdx,
int& globalNumWells) const;
/// \brief Create the parallel well information
/// \param localWells The local wells from ECL schedule
std::vector< ParallelWellInfo* >
createLocalParallelWellInfo(const std::vector<Well>& localWells);
// compute the well fluxes and assemble them in to the reservoir equations as source terms
// and in the well equations.
void assemble(const int iterationIdx,

View File

@ -52,17 +52,20 @@ namespace Opm {
const auto& cartDims = Opm::UgGridHelpers::cartDims(grid);
setupCartesianToCompressed_(Opm::UgGridHelpers::globalCell(grid),
cartDims[0]*cartDims[1]*cartDims[2]);
is_shut_or_defunct_ = [&ebosSimulator](const Well& well) {
auto& parallel_wells = ebosSimulator.vanguard().parallelWells();
parallel_well_info_.assign(parallel_wells.begin(), parallel_wells.end());
const auto& pwell_info = parallel_well_info_;
std::size_t numProcs = ebosSimulator.gridView().comm().size();
is_shut_or_defunct_ = [&pwell_info, numProcs](const Well& well) {
if (well.getStatus() == Well::Status::SHUT)
return true;
if (ebosSimulator.gridView().comm().size() == 1)
if (numProcs == 1u)
return false;
std::pair<std::string, bool> value{well.name(), true}; // false indicate not active!
const auto& parallel_wells = ebosSimulator.vanguard().parallelWells();
auto candidate = std::lower_bound(parallel_wells.begin(), parallel_wells.end(),
value);
return candidate == parallel_wells.end() || *candidate != value;
auto candidate = std::lower_bound(pwell_info.begin(),
pwell_info.end(),
value);
return candidate == pwell_info.end() || *candidate != value;
};
alternative_well_rate_init_ = EWOMS_GET_PARAM(TypeTag, bool, AlternativeWellRateInit);
@ -224,6 +227,25 @@ namespace Opm {
return w;
}
template<typename TypeTag>
std::vector< ParallelWellInfo* >
BlackoilWellModel<TypeTag>::createLocalParallelWellInfo(const std::vector<Well>& wells)
{
std::vector< ParallelWellInfo* > local_parallel_well_info;
local_parallel_well_info.reserve(wells.size());
for (const auto& well : wells)
{
auto wellPair = std::make_pair(well.name(), true);
auto pwell = std::lower_bound(parallel_well_info_.begin(),
parallel_well_info_.end(),
wellPair);
assert(pwell != parallel_well_info_.end() &&
*pwell == wellPair);
local_parallel_well_info.push_back(&(*pwell));
}
return local_parallel_well_info;
}
template<typename TypeTag>
void
BlackoilWellModel<TypeTag>::
@ -237,6 +259,7 @@ namespace Opm {
int globalNumWells = 0;
// Make wells_ecl_ contain only this partition's non-shut wells.
wells_ecl_ = getLocalNonshutWells(timeStepIdx, globalNumWells);
local_parallel_well_info_ = createLocalParallelWellInfo(wells_ecl_);
this->initializeWellProdIndCalculators();
initializeWellPerfData();
@ -801,6 +824,7 @@ namespace Opm {
const auto& perf_data = this->well_perf_data_[wellID];
return std::make_unique<WellType>(this->wells_ecl_[wellID],
*local_parallel_well_info_[wellID],
time_step,
this->param_,
*this->rateConverter_,

View File

@ -98,7 +98,9 @@ namespace Opm
// TODO: for now, we only use one type to save some implementation efforts, while improve later.
typedef DenseAd::Evaluation<double, /*size=*/numEq + numWellEq> EvalWell;
MultisegmentWell(const Well& well, const int time_step,
MultisegmentWell(const Well& well,
const ParallelWellInfo& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,
const int pvtRegionIdx,

View File

@ -31,7 +31,9 @@ namespace Opm
template <typename TypeTag>
MultisegmentWell<TypeTag>::
MultisegmentWell(const Well& well, const int time_step,
MultisegmentWell(const Well& well,
const ParallelWellInfo& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,
const int pvtRegionIdx,
@ -40,7 +42,7 @@ namespace Opm
const int index_of_well,
const int first_perf_index,
const std::vector<PerforationData>& perf_data)
: Base(well, time_step, param, rate_converter, pvtRegionIdx, num_components, num_phases, index_of_well, first_perf_index, perf_data)
: Base(well, pw_info, time_step, param, rate_converter, pvtRegionIdx, num_components, num_phases, index_of_well, first_perf_index, perf_data)
, segment_perforations_(numberOfSegments())
, segment_inlets_(numberOfSegments())
, cell_perforation_depth_diffs_(number_of_perforations_, 0.0)

View File

@ -0,0 +1,325 @@
/*
Copyright 2020 OPM-OP AS
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#include <opm/simulators/wells/ParallelWellInfo.hpp>
#include <opm/common/ErrorMacros.hpp>
namespace Dune
{
#if HAVE_MPI
template<>
struct CommPolicy<double*>
{
using Type = double*;
using IndexedType = double;
using IndexedTypeFlag = Dune::SizeOne;
static const void* getAddress(const double*& v, int index)
{
return v + index;
}
static int getSize(const double*&, int)
{
return 1;
}
};
#endif
}
namespace Opm
{
CommunicateAbove::CommunicateAbove([[maybe_unused]] const Communication& comm)
#if HAVE_MPI
: comm_(comm), interface_(comm_)
#endif
{}
void CommunicateAbove::clear()
{
#if HAVE_MPI
above_indices_ = {};
current_indices_ = {};
interface_.free();
communicator_.free();
#endif
count_ = 0;
}
void CommunicateAbove::beginReset()
{
clear();
#if HAVE_MPI
if (comm_.size() > 1)
{
current_indices_.beginResize();
above_indices_.beginResize();
}
#endif
}
void CommunicateAbove::endReset()
{
#if HAVE_MPI
if (comm_.size() > 1)
{
above_indices_.endResize();
current_indices_.endResize();
remote_indices_.setIndexSets(current_indices_, above_indices_, comm_);
remote_indices_.setIncludeSelf(true);
remote_indices_.rebuild<true>();
using FromSet = Dune::EnumItem<Attribute,owner>;
using ToSet = Dune::AllSet<Attribute>;
interface_.build(remote_indices_, FromSet(), ToSet());
communicator_.build<double*>(interface_);
}
#endif
}
std::vector<double> CommunicateAbove::communicate(double first_above,
const double* current,
std::size_t size)
{
std::vector<double> above(size, first_above);
#if HAVE_MPI
if (comm_.size() > 1)
{
using Handle = Dune::OwnerOverlapCopyCommunication<int,int>::CopyGatherScatter<double*>;
auto aboveData = above.data();
// Ugly const_cast needed as my compiler says, that
// passing const double*& and double* as parameter is
// incompatible with function decl template<Data> forward(const Data&, Data&))
// That would need the first argument to be double* const&
communicator_.forward<Handle>(const_cast<double*>(current), aboveData);
}
else
#endif
{
if (above.size() > 1)
{
// No comunication needed, just copy.
std::copy(current, current + (above.size() - 1), above.begin()+1);
}
}
return above;
}
void CommunicateAbove::pushBackEclIndex([[maybe_unused]] int above,
[[maybe_unused]] int current,
[[maybe_unused]] bool isOwner)
{
#if HAVE_MPI
if (comm_.size() > 1)
{
Attribute attr = owner;
if (!isOwner)
{
attr = overlap;
}
above_indices_.add(above, {count_, attr, true});
current_indices_.add(current, {count_++, attr, true});
}
#endif
}
void ParallelWellInfo::DestroyComm::operator()(Communication* comm)
{
#if HAVE_MPI
// Only delete custom communicators.
bool del = comm
&& (*comm != Dune::MPIHelper::getLocalCommunicator())
&& (*comm != MPI_COMM_WORLD && *comm != MPI_COMM_NULL);
if ( del )
{
// Not 100% nice but safe as comm is deleted anyway
// We can only access a copy and no reference.
MPI_Comm mpi_comm = *comm;
MPI_Comm_free(&mpi_comm);
}
#endif
delete comm;
}
ParallelWellInfo::ParallelWellInfo(const std::string& name,
bool hasLocalCells)
: name_(name), hasLocalCells_ (hasLocalCells),
isOwner_(true), rankWithFirstPerf_(-1),
comm_(new Communication(Dune::MPIHelper::getLocalCommunicator())),
commAbove_(new CommunicateAbove(*comm_))
{}
ParallelWellInfo::ParallelWellInfo(const std::pair<std::string,bool>& well_info,
[[maybe_unused]] Communication allComm)
: name_(well_info.first), hasLocalCells_(well_info.second),
rankWithFirstPerf_(-1)
{
#if HAVE_MPI
MPI_Comm newComm;
int color = hasLocalCells_ ? 1 : MPI_UNDEFINED;
MPI_Comm_split(allComm, color, allComm.rank(), &newComm);
comm_.reset(new Communication(newComm));
#else
comm_.reset(new Communication(Dune::MPIHelper::getLocalCommunicator()));
#endif
commAbove_.reset(new CommunicateAbove(*comm_));
isOwner_ = (comm_->rank() == 0);
}
void ParallelWellInfo::communicateFirstPerforation(bool hasFirst)
{
int first = hasFirst;
std::vector<int> firstVec(comm_->size());
comm_->allgather(&first, 1, firstVec.data());
auto found = std::find_if(firstVec.begin(), firstVec.end(),
[](int i) -> bool{ return i;});
rankWithFirstPerf_ = found - firstVec.begin();
}
void ParallelWellInfo::pushBackEclIndex(int above, int current)
{
commAbove_->pushBackEclIndex(above, current);
}
void ParallelWellInfo::beginReset()
{
commAbove_->beginReset();
}
void ParallelWellInfo::endReset()
{
commAbove_->beginReset();
}
void ParallelWellInfo::clearCommunicateAbove()
{
commAbove_->clear();
}
std::vector<double> ParallelWellInfo::communicateAboveValues(double zero_value,
const double* current_values,
std::size_t size) const
{
return commAbove_->communicate(zero_value, current_values,
size);
}
std::vector<double> ParallelWellInfo::communicateAboveValues(double zero_value,
const std::vector<double>& current_values) const
{
return commAbove_->communicate(zero_value, current_values.data(),
current_values.size());
}
bool operator<(const ParallelWellInfo& well1, const ParallelWellInfo& well2)
{
return well1.name() < well2.name() || (! (well2.name() < well1.name()) && well1.hasLocalCells() < well2.hasLocalCells());
}
bool operator==(const ParallelWellInfo& well1, const ParallelWellInfo& well2)
{
bool ret = well1.name() == well2.name() && well1.hasLocalCells() == well2.hasLocalCells()
&& well1.isOwner() == well2.isOwner();
#if HAVE_MPI
using MPIComm = typename Dune::MPIHelper::MPICommunicator;
ret = ret &&
static_cast<MPIComm>(well1.communication()) == static_cast<MPIComm>(well2.communication());
#endif
return ret;
}
bool operator!=(const ParallelWellInfo& well1, const ParallelWellInfo& well2)
{
return ! (well1 == well2);
}
bool operator<(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well)
{
return pair.first < well.name() || ( !( well.name() < pair.first ) && pair.second < well.hasLocalCells() );
}
bool operator<( const ParallelWellInfo& well, const std::pair<std::string, bool>& pair)
{
return well.name() < pair.first || ( !( pair.first < well.name() ) && well.hasLocalCells() < pair.second );
}
bool operator==(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well)
{
return pair.first == well.name() && pair.second == well.hasLocalCells();
}
bool operator==(const ParallelWellInfo& well, const std::pair<std::string, bool>& pair)
{
return pair == well;
}
bool operator!=(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well)
{
return pair.first != well.name() || pair.second != well.hasLocalCells();
}
bool operator!=(const ParallelWellInfo& well, const std::pair<std::string, bool>& pair)
{
return pair != well;
}
CheckDistributedWellConnections::CheckDistributedWellConnections(const Well& well,
const ParallelWellInfo& info)
: well_(well), pwinfo_(info)
{
foundConnections_.resize(well.getConnections().size(), 0);
}
void
CheckDistributedWellConnections::connectionFound(std::size_t i)
{
foundConnections_[i] = 1;
}
bool
CheckDistributedWellConnections::checkAllConnectionsFound()
{
// Ecl does not hold any information of remote connections.
assert(pwinfo_.communication().max(foundConnections_.size()) == foundConnections_.size());
pwinfo_.communication().sum(foundConnections_.data(),
foundConnections_.size());
std::string msg = std::string("Cells with these i,j,k indices were not found ")
+ "in grid (well = " + pwinfo_.name() + "):";
bool missingCells = false;
auto start = foundConnections_.begin();
for(auto conFound = start; conFound != foundConnections_.end(); ++conFound)
{
if (*conFound == 0)
{
const auto& completion = well_.getConnections()[conFound - start];
msg = msg + " " + std::to_string(completion.getI()) + "," +
std::to_string(completion.getJ()) + ","
+ std::to_string(completion.getK()) + " ";
missingCells = true;
}
}
if (missingCells && pwinfo_.isOwner())
{
OPM_THROW(std::runtime_error, msg);
}
return !missingCells;
}
} // end namespace Opm

View File

@ -0,0 +1,260 @@
/*
Copyright 2020 OPM-OP AS
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef OPM_PARALLELWELLINFO_HEADER_INCLUDED
#define OPM_PARALLELWELLINFO_HEADER_INCLUDED
#include <dune/common/version.hh>
#include <dune/common/parallel/mpihelper.hh>
#include <dune/common/parallel/plocalindex.hh>
#include <dune/istl/owneroverlapcopy.hh>
#include <opm/parser/eclipse/EclipseState/Schedule/Well/Well.hpp>
#include <memory>
namespace Opm
{
/// \brief Class to facilitate getting values associated with the above perforation
///
class CommunicateAbove
{
enum Attribute {
owner=1, overlap=2
};
using MPIComm = typename Dune::MPIHelper::MPICommunicator;
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<MPIComm>;
#else
using Communication = Dune::CollectiveCommunication<MPIComm>;
#endif
using LocalIndex = Dune::ParallelLocalIndex<Attribute>;
#if HAVE_MPI
using IndexSet = Dune::ParallelIndexSet<std::size_t,LocalIndex,50>;
using RI = Dune::RemoteIndices<IndexSet>;
#endif
public:
explicit CommunicateAbove(const Communication& comm);
/// \brief Adds information about original index of the perforations in ECL Schedule.
///
/// \warning Theses indices need to be push in the same order as they
/// appear in the ECL well specifiation. Use -1 if there is
/// no perforation above.
/// \param above The ECL index of the next open perforation above.
/// \param current The ECL index of the current open perforation.
void pushBackEclIndex(int above, int current, bool owner=true);
/// \brief Clear all the parallel information
void clear();
/// \brief Indicates that we will add the index information
/// \see pushBackEclIndex
void beginReset();
/// \brief Indicates that the index information is complete.
///
/// Sets up the commmunication structures to be used by
/// communicate()
void endReset();
/// \brief Creates an array of values for the perforation above.
/// \param first_value Value to use for above of the first perforation
/// \param current C-array of the values at the perforations
/// \param size The size of the C-array and the returned vector
/// \return a vector containing the values for the perforation above.
std::vector<double> communicate(double first_value,
const double* current,
std::size_t size);
private:
#if HAVE_MPI
Communication comm_;
/// \brief Mapping of the local well index to ecl index
IndexSet current_indices_;
/// \brief Mapping of the above well index to ecl index
IndexSet above_indices_;
RI remote_indices_;
Dune::Interface interface_;
Dune::BufferedCommunicator communicator_;
#endif
std::size_t count_{};
};
/// \brief Class encapsulating some information about parallel wells
///
/// e.g. It provides a communicator for well information
class ParallelWellInfo
{
public:
using MPIComm = typename Dune::MPIHelper::MPICommunicator;
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<MPIComm>;
#else
using Communication = Dune::CollectiveCommunication<MPIComm>;
#endif
/// \brief Constructs object using MPI_COMM_SELF
ParallelWellInfo(const std::string& name = {""},
bool hasLocalCells = true);
/// \brief Constructs object with communication between all rank sharing
/// a well
/// \param well_info Pair of well name and whether local cells might be perforated
/// on this rank
/// \param allComm The communication object with all MPI ranks active in the simulation.
/// Default is the one with all ranks available.
ParallelWellInfo(const std::pair<std::string,bool>& well_info,
Communication allComm = Communication());
const Communication& communication() const
{
return *comm_;
}
/// \brief Collectively decide which rank has first perforation.
void communicateFirstPerforation(bool hasFirst);
template<class T>
T broadcastFirstPerforationValue(const T& t) const
{
T res=t;
comm_->broadcast(&res, 1, rankWithFirstPerf_);
return res;
}
/// \brief Creates an array of values for the perforation above.
/// \param first_value Value to use for above of the first perforation
/// \param current C-array of the values at the perforations
/// \param size The size of the C-array and the returned vector
/// \return a vector containing the values for the perforation above.
std::vector<double> communicateAboveValues(double zero_value,
const double* current,
std::size_t size) const;
/// \brief Creates an array of values for the perforation above.
/// \param first_value Value to use for above of the first perforation
/// \param current vector of current values
std::vector<double> communicateAboveValues(double zero_value,
const std::vector<double>& current) const;
/// \brief Adds information about the ecl indices of the perforations.
///
/// \warning Theses indices need to be push in the same order as they
/// appear in the ECL well specifiation. Use -1 if there is
/// no perforation above.
/// \param above The ECL index of the next open perforation above.
/// \param current The ECL index of the current open perforation.
void pushBackEclIndex(int above, int current);
/// \brief Name of the well.
const std::string& name() const
{
return name_;
}
/// \brief Whether local cells are perforated somewhen
bool hasLocalCells() const
{
return hasLocalCells_;
}
bool isOwner() const
{
return isOwner_;
}
/// \brief Inidicate that we will reset the ecl index information
///
/// \see pushBackEclIndex;
void beginReset();
/// \brief Inidicate completion of reset of the ecl index information
void endReset();
/// \brief Free data of communication data structures.
void clearCommunicateAbove();
private:
/// \brief Deleter that also frees custom MPI communicators
struct DestroyComm
{
void operator()(Communication* comm);
};
/// \brief Name of the well.
std::string name_;
/// \brief Whether local cells are perforated somewhen
bool hasLocalCells_;
/// \brief Whether we own the well and should do reports etc.
bool isOwner_;
/// \brief Rank with the first perforation on it.
int rankWithFirstPerf_;
/// \brief Communication object for the well
///
/// Contains only ranks where this well will perforate local cells.
std::unique_ptr<Communication, DestroyComm> comm_;
/// \brief used to communicate the values for the perforation above.
std::unique_ptr<CommunicateAbove> commAbove_;
};
/// \brief Class checking that all connections are on active cells
///
/// Works for distributed wells, too
class CheckDistributedWellConnections
{
public:
CheckDistributedWellConnections(const Well& well,
const ParallelWellInfo& info);
/// \brief Inidicate that the i-th completion was found
///
/// in the local grid.
/// \param index The index of the completion in Well::getConnections
void connectionFound(std::size_t index);
bool checkAllConnectionsFound();
private:
std::vector<std::size_t> foundConnections_;
const Well& well_;
const ParallelWellInfo& pwinfo_;
};
bool operator<(const ParallelWellInfo& well1, const ParallelWellInfo& well2);
bool operator==(const ParallelWellInfo& well1, const ParallelWellInfo& well2);
bool operator!=(const ParallelWellInfo& well1, const ParallelWellInfo& well2);
bool operator<(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well);
bool operator<( const ParallelWellInfo& well, const std::pair<std::string, bool>& pair);
bool operator==(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well);
bool operator==(const ParallelWellInfo& well, const std::pair<std::string, bool>& pair);
bool operator!=(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well);
bool operator!=(const ParallelWellInfo& well, const std::pair<std::string, bool>& pair);
} // end namespace Opm
#endif // OPM_PARALLELWELLINFO_HEADER_INCLUDED

View File

@ -31,6 +31,7 @@
#include <opm/simulators/wells/RateConverter.hpp>
#include <opm/simulators/wells/WellInterface.hpp>
#include <opm/simulators/wells/WellProdIndexCalculator.hpp>
#include <opm/simulators/wells/ParallelWellInfo.hpp>
#include <opm/models/blackoil/blackoilpolymermodules.hh>
#include <opm/models/blackoil/blackoilsolventmodules.hh>
@ -158,7 +159,9 @@ namespace Opm
using Base::contiBrineEqIdx;
static const int contiEnergyEqIdx = Indices::contiEnergyEqIdx;
StandardWell(const Well& well, const int time_step,
StandardWell(const Well& well,
const ParallelWellInfo& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,
const int pvtRegionIdx,
@ -377,6 +380,9 @@ namespace Opm
// diagonal matrix for the well
DiagMatWell invDuneD_;
// Wrapper for the parallel application of B for distributed wells
wellhelpers::ParallelStandardWellB<Scalar> parallelB_;
// several vector used in the matrix calculation
mutable BVectorWell Bx_;
mutable BVectorWell invDrw_;

View File

@ -33,7 +33,9 @@ namespace Opm
template<typename TypeTag>
StandardWell<TypeTag>::
StandardWell(const Well& well, const int time_step,
StandardWell(const Well& well,
const ParallelWellInfo& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,
const int pvtRegionIdx,
@ -42,9 +44,10 @@ namespace Opm
const int index_of_well,
const int first_perf_index,
const std::vector<PerforationData>& perf_data)
: Base(well, time_step, param, rate_converter, pvtRegionIdx, num_components, num_phases, index_of_well, first_perf_index, perf_data)
: Base(well, pw_info, time_step, param, rate_converter, pvtRegionIdx, num_components, num_phases, index_of_well, first_perf_index, perf_data)
, perf_densities_(number_of_perforations_)
, perf_pressure_diffs_(number_of_perforations_)
, parallelB_(duneB_, pw_info)
, F0_(numWellConservationEq)
, ipr_a_(number_of_phases_)
, ipr_b_(number_of_phases_)
@ -657,6 +660,9 @@ namespace Opm
// Update the connection
connectionRates_ = connectionRates;
// accumulate resWell_ and invDuneD_ in parallel to get effects of all perforations (might be distributed)
wellhelpers::sumDistributedWellEntries(invDuneD_[0][0], resWell_[0],
this->parallel_well_info_.communication());
// add vol * dF/dt + Q to the well equations;
for (int componentIdx = 0; componentIdx < numWellConservationEq; ++componentIdx) {
// TODO: following the development in MSW, we need to convert the volume of the wellbore to be surface volume
@ -2475,7 +2481,8 @@ namespace Opm
assert( invDrw_.size() == invDuneD_.N() );
// Bx_ = duneB_ * x
duneB_.mv(x, Bx_);
parallelB_.mv(x, Bx_);
// invDBx = invDuneD_ * Bx_
// TODO: with this, we modified the content of the invDrw_.
// Is it necessary to do this to save some memory?
@ -2574,7 +2581,7 @@ namespace Opm
BVectorWell resWell = resWell_;
// resWell = resWell - B * x
duneB_.mmv(x, resWell);
parallelB_.mmv(x, resWell);
// xw = D^-1 * resWell
invDuneD_.mv(resWell, xw);
}

View File

@ -1,6 +1,7 @@
/*
Copyright 2016 SINTEF ICT, Applied Mathematics.
Copyright 2016 Statoil ASA.
Copyright 2020 OPM-OP AS.
This file is part of the Open Porous Media project (OPM).
@ -22,6 +23,12 @@
#ifndef OPM_WELLHELPERS_HEADER_INCLUDED
#define OPM_WELLHELPERS_HEADER_INCLUDED
#include <opm/common/OpmLog/OpmLog.hpp>
#include <opm/simulators/wells/ParallelWellInfo.hpp>
#include <dune/istl/bcrsmatrix.hh>
#include <dune/common/dynmatrix.hh>
#include <dune/common/parallel/mpihelper.hh>
#include <vector>
@ -33,6 +40,108 @@ namespace Opm {
namespace wellhelpers
{
/// \brief A wrapper around the B matrix for distributed wells
///
/// For standard wells the B matrix, is basically a multiplication
/// of the equation of the perforated cells followed by a reduction
/// (summation) of these to the well equations.
///
/// This class does that in the functions mv and mmv (from the DUNE
/// matrix interface.
///
/// \tparam Scalar The scalar used for the computation.
template<typename Scalar>
class ParallelStandardWellB
{
public:
using Block = Dune::DynamicMatrix<Scalar>;
using Matrix = Dune::BCRSMatrix<Block>;
ParallelStandardWellB(const Matrix& B, const ParallelWellInfo& parallel_well_info)
: B_(&B), parallel_well_info_(&parallel_well_info)
{}
//! y = A x
template<class X, class Y>
void mv (const X& x, Y& y) const
{
#if !defined(NDEBUG) && HAVE_MPI
// We need to make sure that all ranks are actually computing
// for the same well. Doing this by checking the name of the well.
int cstring_size = parallel_well_info_->name().size()+1;
std::vector<int> sizes(parallel_well_info_->communication().size());
parallel_well_info_->communication().allgather(&cstring_size, 1, sizes.data());
std::vector<int> offsets(sizes.size()+1, 0); //last entry will be accumulated size
std::partial_sum(sizes.begin(), sizes.end(), offsets.begin() + 1);
std::vector<char> cstrings(offsets[sizes.size()]);
bool consistentWells = true;
char* send = const_cast<char*>(parallel_well_info_->name().c_str());
parallel_well_info_->communication().allgatherv(send, cstring_size,
cstrings.data(), sizes.data(),
offsets.data());
for(std::size_t i = 0; i < sizes.size(); ++i)
{
std::string name(cstrings.data()+offsets[i]);
if (name != parallel_well_info_->name())
{
if (parallel_well_info_->communication().rank() == 0)
{
//only one process per well logs, might not be 0 of MPI_COMM_WORLD, though
std::string msg = std::string("Fatal Error: Not all ranks are computing for the same well")
+ " well should be " + parallel_well_info_->name() + " but is "
+ name;
OpmLog::debug(msg);
}
consistentWells = false;
break;
}
}
parallel_well_info_->communication().barrier();
// As not all processes are involved here we need to use MPI_Abort and hope MPI kills them all
if (!consistentWells)
{
MPI_Abort(MPI_COMM_WORLD, 1);
}
#endif
B_->mv(x, y);
if (this->parallel_well_info_->communication().size() > 1)
{
// Only do communication if we must.
// The B matrix is basically a component-wise multiplication
// with a vector followed by a parallel reduction. We do that
// reduction to all ranks computing for the well to save the
// broadcast when applying C^T.
using YField = typename Y::block_type::value_type;
assert(y.size() == 1);
this->parallel_well_info_->communication().allreduce<std::plus<YField>>(y[0].container().data(),
y[0].container().size());
}
}
//! y = A x
template<class X, class Y>
void mmv (const X& x, Y& y) const
{
if (this->parallel_well_info_->communication().size() == 1)
{
// Do the same thing as before. The else branch
// produces different rounding errors and results
// slightly different iteration counts / well curves
B_->mmv(x, y);
}
else
{
Y temp(y);
mv(x, temp); // includes parallel reduction
y -= temp;
}
}
private:
const Matrix* B_;
const ParallelWellInfo* parallel_well_info_;
};
inline
double computeHydrostaticCorrection(const double well_ref_depth, const double vfp_ref_depth,
const double rho, const double gravity) {
@ -44,6 +153,38 @@ namespace Opm {
/// \brief Sums entries of the diagonal Matrix for distributed wells
template<typename Scalar, typename Comm>
void sumDistributedWellEntries(Dune::DynamicMatrix<Scalar>& mat, Dune::DynamicVector<Scalar>& vec,
const Comm& comm)
{
// DynamicMatrix does not use one contiguous array for storing the data
// but a DynamicVector of DynamicVectors. Hence we need to copy the data
// to contiguous memory for MPI.
if (comm.size() == 1)
{
return;
}
std::vector<Scalar> allEntries;
allEntries.reserve(mat.N()*mat.M()+vec.size());
for(const auto& row: mat)
{
allEntries.insert(allEntries.end(), row.begin(), row.end());
}
allEntries.insert(allEntries.end(), vec.begin(), vec.end());
comm.sum(allEntries.data(), allEntries.size());
auto pos = allEntries.begin();
auto cols = mat.cols();
for(auto&& row: mat)
{
std::copy(pos, pos + cols, &(row[0]));
pos += cols;
}
assert(std::size_t(allEntries.end() - pos) == vec.size());
std::copy(pos, allEntries.end(), &(vec[0]));
}
template <int dim, class C2F, class FC>
std::array<double, dim>

View File

@ -121,7 +121,9 @@ namespace Opm
has_brine,
Indices::numPhases >;
/// Constructor
WellInterface(const Well& well, const int time_step,
WellInterface(const Well& well,
const ParallelWellInfo& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,
const int pvtRegionIdx,
@ -317,6 +319,8 @@ namespace Opm
Well well_ecl_;
const ParallelWellInfo& parallel_well_info_;
const int current_step_;
// simulation parameters

View File

@ -30,6 +30,7 @@ namespace Opm
template<typename TypeTag>
WellInterface<TypeTag>::
WellInterface(const Well& well,
const ParallelWellInfo& pw_info,
const int time_step,
const ModelParameters& param,
const RateConverterType& rate_converter,
@ -40,6 +41,7 @@ namespace Opm
const int first_perf_index,
const std::vector<PerforationData>& perf_data)
: well_ecl_(well)
, parallel_well_info_(pw_info)
, current_step_(time_step)
, param_(param)
, rateConverter_(rate_converter)
@ -50,6 +52,7 @@ namespace Opm
, first_perf_(first_perf_index)
, perf_data_(&perf_data)
{
assert(well.name()==pw_info.name());
assert(std::is_sorted(perf_data.begin(), perf_data.end(),
[](const auto& perf1, const auto& perf2){
return perf1.ecl_index < perf2.ecl_index;

View File

@ -0,0 +1,278 @@
/*
Copyright 2020 OPM-OP AS
Copyright 2015 Dr. Blatt - HPC-Simulation-Software & Services.
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#include<config.h>
#include<opm/simulators/wells/ParallelWellInfo.hpp>
#include<vector>
#include<string>
#include<tuple>
#include<ostream>
#define BOOST_TEST_MODULE ParallelWellInfo
#include <boost/test/unit_test.hpp>
class MPIError {
public:
/** @brief Constructor. */
MPIError(std::string s, int e) : errorstring(s), errorcode(e){}
/** @brief The error string. */
std::string errorstring;
/** @brief The mpi error code. */
int errorcode;
};
#ifdef HAVE_MPI
void MPI_err_handler(MPI_Comm *, int *err_code, ...){
char *err_string=new char[MPI_MAX_ERROR_STRING];
int err_length;
MPI_Error_string(*err_code, err_string, &err_length);
std::string s(err_string, err_length);
std::cerr << "An MPI Error ocurred:"<<std::endl<<s<<std::endl;
delete[] err_string;
throw MPIError(s, *err_code);
}
#endif
struct MPIFixture
{
MPIFixture()
{
#if HAVE_MPI
int m_argc = boost::unit_test::framework::master_test_suite().argc;
char** m_argv = boost::unit_test::framework::master_test_suite().argv;
helper = &Dune::MPIHelper::instance(m_argc, m_argv);
#ifdef MPI_2
MPI_Comm_create_errhandler(MPI_err_handler, &handler);
MPI_Comm_set_errhandler(MPI_COMM_WORLD, handler);
#else
MPI_Errhandler_create(MPI_err_handler, &handler);
MPI_Errhandler_set(MPI_COMM_WORLD, handler);
#endif
#endif
}
~MPIFixture()
{
#if HAVE_MPI
MPI_Finalize();
#endif
}
Dune::MPIHelper* helper;
#if HAVE_MPI
MPI_Errhandler handler;
#endif
};
BOOST_GLOBAL_FIXTURE(MPIFixture);
// Needed for BOOST_CHECK_EQUAL_COLLECTIONS
namespace std
{
std::ostream& operator<<(std::ostream& os, const std::pair<std::string, bool>& p)
{
return os << "{" << p.first << " "<< p.second << "}";
}
}
namespace Opm
{
std::ostream& operator<<(std::ostream& os, const Opm::ParallelWellInfo& w)
{
return os << "{" << w.name() << " "<< w.hasLocalCells() << " "<<
w.isOwner() << "}";
}
}
BOOST_AUTO_TEST_CASE(ParallelWellComparison)
{
int argc = 0;
char** argv = nullptr;
const auto& helper = Dune::MPIHelper::instance(argc, argv);
std::vector<std::pair<std::string,bool>> pairs;
if (helper.rank() == 0)
pairs = {{"Test1", true},{"Test2", true}, {"Test1", false} };
else
pairs = {{"Test1", false},{"Test2", true}, {"Test1", true} };
std::vector<Opm::ParallelWellInfo> well_info;
well_info.assign(pairs.begin(), pairs.end());
BOOST_CHECK_EQUAL_COLLECTIONS(pairs.begin(), pairs.end(),
well_info.begin(), well_info.end());
BOOST_CHECK_EQUAL_COLLECTIONS(well_info.begin(), well_info.end(),
pairs.begin(), pairs.end());
BOOST_TEST(well_info[0] < pairs[1]);
BOOST_TEST(pairs[0] != well_info[1]);
BOOST_TEST(pairs[0] < well_info[1]);
BOOST_TEST(well_info[0] == pairs[0]);
BOOST_TEST(well_info[0] != well_info[1]);
Opm::ParallelWellInfo well0, well1;
BOOST_TEST(well0 == well1);
#if HAVE_MPI
BOOST_TEST(well0.communication()==helper.getLocalCommunicator());
#endif
Opm::ParallelWellInfo well2("Test", false);
std::pair<std::string, bool> pwell={"Test", true};
BOOST_TEST(well2 < pwell);
Opm::ParallelWellInfo well3("Test", true);
BOOST_TEST(! (well3 < pwell));
pwell.second = false;
BOOST_TEST(! (well3 < pwell));
if (helper.rank() == 0)
BOOST_TEST(well_info[0].communication().size()==1);
#if HAVE_MPI
Opm::ParallelWellInfo::Communication comm{MPI_COMM_WORLD};
BOOST_TEST(well_info[1].communication().size() == comm.size());
if (helper.rank() > 0)
{
BOOST_TEST(well_info[2].communication().size() == comm.size()-1);
}
#endif
}
BOOST_AUTO_TEST_CASE(CommunicateAboveSelf)
{
auto comm = Dune::MPIHelper::getLocalCommunicator();
Opm::CommunicateAbove commAbove{ comm };
for(std::size_t count=0; count < 2; ++count)
{
std::vector<int> eclIndex = {0, 1, 2, 3, 7 , 8, 10, 11};
std::vector<double> current(eclIndex.size());
std::transform(eclIndex.begin(), eclIndex.end(), current.begin(),
[](double v){ return 1+10.0*v;});
commAbove.beginReset();
for (std::size_t i = 0; i < current.size(); ++i)
{
if (i==0)
commAbove.pushBackEclIndex(-1, eclIndex[i]);
else
commAbove.pushBackEclIndex(eclIndex[i-1], eclIndex[i]);
}
commAbove.endReset();
auto above = commAbove.communicate(-10.0, current.data(), current.size());
BOOST_TEST(above[0]==-10.0);
BOOST_TEST(above.size() == current.size());
auto a = above.begin()+1;
std::for_each(current.begin(), current.begin() + (current.size()-1),
[&a](double v){ BOOST_TEST(*(a++) == v);});
}
}
BOOST_AUTO_TEST_CASE(CommunicateAboveSelf1)
{
auto comm = Dune::MPIHelper::getLocalCommunicator();
Opm::CommunicateAbove commAbove{ comm };
for(std::size_t count=0; count < 2; ++count)
{
std::vector<int> eclIndex = {0};
std::vector<double> current(eclIndex.size());
std::transform(eclIndex.begin(), eclIndex.end(), current.begin(),
[](double v){ return 1+10.0*v;});
commAbove.beginReset();
for (std::size_t i = 0; i < current.size(); ++i)
{
if (i==0)
commAbove.pushBackEclIndex(-1, eclIndex[i]);
else
commAbove.pushBackEclIndex(eclIndex[i-1], eclIndex[i]);
}
commAbove.endReset();
auto above = commAbove.communicate(-10.0, current.data(), current.size());
BOOST_TEST(above[0]==-10.0);
BOOST_TEST(above.size() == current.size());
auto a = above.begin()+1;
std::for_each(current.begin(), current.begin() + (current.size()-1),
[&a](double v){ BOOST_TEST(*(a++) == v);});
}
}
BOOST_AUTO_TEST_CASE(CommunicateAboveParalle)
{
using MPIComm = typename Dune::MPIHelper::MPICommunicator;
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<MPIComm>;
#else
using Communication = Dune::CollectiveCommunication<MPIComm>;
#endif
auto comm = Communication(Dune::MPIHelper::getCommunicator());
Opm::CommunicateAbove commAbove{ comm };
for(std::size_t count=0; count < 2; ++count)
{
std::vector<int> globalEclIndex = {0, 1, 2, 3, 7 , 8, 10, 11};
auto oldSize = globalEclIndex.size();
std::size_t globalSize = 3 * comm.size();
auto lastIndex = globalEclIndex.back();
globalEclIndex.resize(globalSize);
if ( globalSize > oldSize)
{
++lastIndex;
for(auto entry = globalEclIndex.begin() + oldSize;
entry != globalEclIndex.end(); ++entry, ++lastIndex)
{
*entry = lastIndex;
}
}
std::vector<double> globalCurrent(globalEclIndex.size());
std::transform(globalEclIndex.begin(), globalEclIndex.end(), globalCurrent.begin(),
[](double v){ return 1+10.0*v;});
std::vector<double> current(3);
commAbove.beginReset();
for (std::size_t i = 0; i < current.size(); ++i)
{
auto gi = comm.rank() + comm.size() * i;
if (gi==0)
{
commAbove.pushBackEclIndex(-1, globalEclIndex[gi]);
}
else
{
commAbove.pushBackEclIndex(globalEclIndex[gi-1], globalEclIndex[gi]);
}
current[i] = globalCurrent[gi];
}
commAbove.endReset();
auto above = commAbove.communicate(-10.0, current.data(), current.size());
if (comm.rank() == 0)
BOOST_TEST(above[0]==-10.0);
BOOST_TEST(above.size() == current.size());
for (std::size_t i = 0; i < current.size(); ++i)
{
auto gi = comm.rank() + comm.size() * i;
if (gi > 0)
{
BOOST_TEST(above[i]==globalCurrent[gi-1]);
}
}
}
}

View File

@ -126,8 +126,9 @@ BOOST_AUTO_TEST_CASE(TestStandardWellInput) {
Opm::PerforationData dummy;
std::vector<Opm::PerforationData> pdata(well.getConnections().size(), dummy);
Opm::ParallelWellInfo pinfo{well.name()};
BOOST_CHECK_THROW( StandardWell( well, -1, param, *rateConverter, 0, 3, 3, 0, 0, pdata), std::invalid_argument);
BOOST_CHECK_THROW( StandardWell( well, pinfo, -1, param, *rateConverter, 0, 3, 3, 0, 0, pdata), std::invalid_argument);
}
@ -156,7 +157,8 @@ BOOST_AUTO_TEST_CASE(TestBehavoir) {
std::vector<int>(10, 0)));
Opm::PerforationData dummy;
std::vector<Opm::PerforationData> pdata(wells_ecl[w].getConnections().size(), dummy);
wells.emplace_back(new StandardWell(wells_ecl[w], current_timestep, param, *rateConverter, 0, 3, 3, w, 0, pdata) );
Opm::ParallelWellInfo pinfo{wells_ecl[w].name()};
wells.emplace_back(new StandardWell(wells_ecl[w], pinfo, current_timestep, param, *rateConverter, 0, 3, 3, w, 0, pdata) );
}
}