mirror of
https://github.com/OPM/opm-simulators.git
synced 2025-02-25 18:55:30 -06:00
Merge pull request #3219 from akva2/eclcpgrid_vanguard_separate
Separate eclcpgridvanguard.hh into typetag dependent/independent code
This commit is contained in:
commit
ba9f4c5b50
@ -24,6 +24,7 @@
|
|||||||
# find opm -name '*.c*' -printf '\t%p\n' | sort
|
# find opm -name '*.c*' -printf '\t%p\n' | sort
|
||||||
list (APPEND MAIN_SOURCE_FILES
|
list (APPEND MAIN_SOURCE_FILES
|
||||||
ebos/collecttoiorank.cc
|
ebos/collecttoiorank.cc
|
||||||
|
ebos/eclgenericcpgridvanguard.cc
|
||||||
ebos/eclgenericvanguard.cc
|
ebos/eclgenericvanguard.cc
|
||||||
opm/core/props/phaseUsageFromDeck.cpp
|
opm/core/props/phaseUsageFromDeck.cpp
|
||||||
opm/core/props/satfunc/RelpermDiagnostics.cpp
|
opm/core/props/satfunc/RelpermDiagnostics.cpp
|
||||||
|
@ -30,24 +30,9 @@
|
|||||||
#include "eclbasevanguard.hh"
|
#include "eclbasevanguard.hh"
|
||||||
#include "ecltransmissibility.hh"
|
#include "ecltransmissibility.hh"
|
||||||
#include "femcpgridcompat.hh"
|
#include "femcpgridcompat.hh"
|
||||||
|
#include "eclgenericcpgridvanguard.hh"
|
||||||
#include <opm/grid/CpGrid.hpp>
|
|
||||||
#include <opm/grid/cpgrid/GridHelpers.hpp>
|
|
||||||
#include <opm/simulators/utils/ParallelEclipseState.hpp>
|
|
||||||
#include <opm/simulators/utils/PropsCentroidsDataHandle.hpp>
|
|
||||||
#include <opm/simulators/utils/ParallelSerialization.hpp>
|
|
||||||
|
|
||||||
#if HAVE_MPI
|
|
||||||
#include <ebos/eclmpiserializer.hh>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <dune/grid/common/mcmgmapper.hh>
|
|
||||||
|
|
||||||
#include <dune/common/version.hh>
|
|
||||||
|
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <numeric>
|
|
||||||
#include <sstream>
|
|
||||||
|
|
||||||
namespace Opm {
|
namespace Opm {
|
||||||
template <class TypeTag>
|
template <class TypeTag>
|
||||||
@ -89,6 +74,9 @@ namespace Opm {
|
|||||||
*/
|
*/
|
||||||
template <class TypeTag>
|
template <class TypeTag>
|
||||||
class EclCpGridVanguard : public EclBaseVanguard<TypeTag>
|
class EclCpGridVanguard : public EclBaseVanguard<TypeTag>
|
||||||
|
, public EclGenericCpGridVanguard<GetPropType<TypeTag, Properties::ElementMapper>,
|
||||||
|
GetPropType<TypeTag, Properties::GridView>,
|
||||||
|
GetPropType<TypeTag, Properties::Scalar>>
|
||||||
{
|
{
|
||||||
friend class EclBaseVanguard<TypeTag>;
|
friend class EclBaseVanguard<TypeTag>;
|
||||||
typedef EclBaseVanguard<TypeTag> ParentType;
|
typedef EclBaseVanguard<TypeTag> ParentType;
|
||||||
@ -108,196 +96,11 @@ private:
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
EclCpGridVanguard(Simulator& simulator)
|
EclCpGridVanguard(Simulator& simulator)
|
||||||
: EclBaseVanguard<TypeTag>(simulator), mpiRank()
|
: EclBaseVanguard<TypeTag>(simulator)
|
||||||
{
|
{
|
||||||
#if HAVE_MPI
|
|
||||||
MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank);
|
|
||||||
#endif
|
|
||||||
this->callImplementationInit();
|
this->callImplementationInit();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*!
|
|
||||||
* \brief Return a reference to the simulation grid.
|
|
||||||
*/
|
|
||||||
Grid& grid()
|
|
||||||
{ return *grid_; }
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* \brief Return a reference to the simulation grid.
|
|
||||||
*/
|
|
||||||
const Grid& grid() const
|
|
||||||
{ return *grid_; }
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* \brief Returns a refefence to the grid which should be used by the EQUIL
|
|
||||||
* initialization code.
|
|
||||||
*
|
|
||||||
* The EQUIL keyword is used to specify the initial condition of the reservoir in
|
|
||||||
* hydrostatic equilibrium. Since the code which does this is not accepting arbitrary
|
|
||||||
* DUNE grids (the code is part of the opm-core module), this is not necessarily the
|
|
||||||
* same as the grid which is used for the actual simulation.
|
|
||||||
*/
|
|
||||||
const EquilGrid& equilGrid() const
|
|
||||||
{
|
|
||||||
assert(mpiRank == 0);
|
|
||||||
return *equilGrid_;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* \brief Indicates that the initial condition has been computed and the memory used
|
|
||||||
* by the EQUIL grid can be released.
|
|
||||||
*
|
|
||||||
* Depending on the implementation, subsequent accesses to the EQUIL grid lead to
|
|
||||||
* crashes.
|
|
||||||
*/
|
|
||||||
void releaseEquilGrid()
|
|
||||||
{
|
|
||||||
equilGrid_.reset();
|
|
||||||
equilCartesianIndexMapper_.reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* \brief Distribute the simulation grid over multiple processes
|
|
||||||
*
|
|
||||||
* (For parallel simulation runs.)
|
|
||||||
*/
|
|
||||||
void loadBalance()
|
|
||||||
{
|
|
||||||
#if HAVE_MPI
|
|
||||||
int mpiSize = 1;
|
|
||||||
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize);
|
|
||||||
|
|
||||||
if (mpiSize > 1) {
|
|
||||||
// the CpGrid's loadBalance() method likes to have the transmissibilities as
|
|
||||||
// its edge weights. since this is (kind of) a layering violation and
|
|
||||||
// transmissibilities are relatively expensive to compute, we only do it if
|
|
||||||
// more than a single process is involved in the simulation.
|
|
||||||
cartesianIndexMapper_.reset(new CartesianIndexMapper(*grid_));
|
|
||||||
if (grid_->size(0))
|
|
||||||
{
|
|
||||||
globalTrans_.reset(new EclTransmissibility<TypeTag>(*this));
|
|
||||||
globalTrans_->update(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
Dune::EdgeWeightMethod edgeWeightsMethod = this->edgeWeightsMethod();
|
|
||||||
bool ownersFirst = this->ownersFirst();
|
|
||||||
bool serialPartitioning = this->serialPartitioning();
|
|
||||||
bool enableDistributedWells = this->enableDistributedWells();
|
|
||||||
Scalar zoltanImbalanceTol = this->zoltanImbalanceTol();
|
|
||||||
|
|
||||||
// convert to transmissibility for faces
|
|
||||||
// TODO: grid_->numFaces() is not generic. use grid_->size(1) instead? (might
|
|
||||||
// not work)
|
|
||||||
const auto& gridView = grid_->leafGridView();
|
|
||||||
unsigned numFaces = grid_->numFaces();
|
|
||||||
std::vector<double> faceTrans;
|
|
||||||
int loadBalancerSet = externalLoadBalancer_.has_value();
|
|
||||||
grid_->comm().broadcast(&loadBalancerSet, 1, 0);
|
|
||||||
if (!loadBalancerSet){
|
|
||||||
faceTrans.resize(numFaces, 0.0);
|
|
||||||
ElementMapper elemMapper(this->gridView(), Dune::mcmgElementLayout());
|
|
||||||
auto elemIt = gridView.template begin</*codim=*/0>();
|
|
||||||
const auto& elemEndIt = gridView.template end</*codim=*/0>();
|
|
||||||
for (; elemIt != elemEndIt; ++ elemIt) {
|
|
||||||
const auto& elem = *elemIt;
|
|
||||||
auto isIt = gridView.ibegin(elem);
|
|
||||||
const auto& isEndIt = gridView.iend(elem);
|
|
||||||
for (; isIt != isEndIt; ++ isIt) {
|
|
||||||
const auto& is = *isIt;
|
|
||||||
if (!is.neighbor())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
unsigned I = elemMapper.index(is.inside());
|
|
||||||
unsigned J = elemMapper.index(is.outside());
|
|
||||||
|
|
||||||
// FIXME (?): this is not portable!
|
|
||||||
unsigned faceIdx = is.id();
|
|
||||||
|
|
||||||
faceTrans[faceIdx] = globalTrans_->transmissibility(I, J);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
//distribute the grid and switch to the distributed view.
|
|
||||||
{
|
|
||||||
const auto wells = this->schedule().getWellsatEnd();
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
auto& eclState = dynamic_cast<ParallelEclipseState&>(this->eclState());
|
|
||||||
const EclipseGrid* eclGrid = nullptr;
|
|
||||||
|
|
||||||
if (grid_->comm().rank() == 0)
|
|
||||||
{
|
|
||||||
eclGrid = &this->eclState().getInputGrid();
|
|
||||||
}
|
|
||||||
|
|
||||||
PropsCentroidsDataHandle<Dune::CpGrid> handle(*grid_, eclState, eclGrid, this->centroids_,
|
|
||||||
cartesianIndexMapper());
|
|
||||||
if (loadBalancerSet)
|
|
||||||
{
|
|
||||||
std::vector<int> parts;
|
|
||||||
if (grid_->comm().rank() == 0)
|
|
||||||
{
|
|
||||||
parts = (*externalLoadBalancer_)(*grid_);
|
|
||||||
}
|
|
||||||
this->parallelWells_ = std::get<1>(grid_->loadBalance(handle, parts, &wells, ownersFirst, false, 1));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
this->parallelWells_ =
|
|
||||||
std::get<1>(grid_->loadBalance(handle, edgeWeightsMethod, &wells, serialPartitioning,
|
|
||||||
faceTrans.data(), ownersFirst, false, 1, true, zoltanImbalanceTol,
|
|
||||||
enableDistributedWells));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
catch(const std::bad_cast& e)
|
|
||||||
{
|
|
||||||
std::ostringstream message;
|
|
||||||
message << "Parallel simulator setup is incorrect as it does not use ParallelEclipseState ("
|
|
||||||
<< e.what() <<")"<<std::flush;
|
|
||||||
OpmLog::error(message.str());
|
|
||||||
std::rethrow_exception(std::current_exception());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
grid_->switchToDistributedView();
|
|
||||||
|
|
||||||
cartesianIndexMapper_.reset();
|
|
||||||
|
|
||||||
// Calling Schedule::filterConnections would remove any perforated
|
|
||||||
// cells that exist only on other ranks even in the case of distributed wells
|
|
||||||
// But we need all connections to figure out the first cell of a well (e.g. for
|
|
||||||
// pressure). Hence this is now skipped. Rank 0 had everything even before.
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
cartesianIndexMapper_.reset(new CartesianIndexMapper(*grid_));
|
|
||||||
this->updateGridView_();
|
|
||||||
this->updateCartesianToCompressedMapping_();
|
|
||||||
this->updateCellDepths_();
|
|
||||||
this->updateCellThickness_();
|
|
||||||
|
|
||||||
#if HAVE_MPI
|
|
||||||
if (mpiSize > 1) {
|
|
||||||
try
|
|
||||||
{
|
|
||||||
auto& parallelEclState = dynamic_cast<ParallelEclipseState&>(this->eclState());
|
|
||||||
// reset cartesian index mapper for auto creation of field properties
|
|
||||||
parallelEclState.resetCartesianMapper(cartesianIndexMapper_.get());
|
|
||||||
parallelEclState.switchToDistributedProps();
|
|
||||||
}
|
|
||||||
catch(const std::bad_cast& e)
|
|
||||||
{
|
|
||||||
std::ostringstream message;
|
|
||||||
message << "Parallel simulator setup is incorrect as it does not use ParallelEclipseState ("
|
|
||||||
<< e.what() <<")"<<std::flush;
|
|
||||||
OpmLog::error(message.str());
|
|
||||||
std::rethrow_exception(std::current_exception());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* \brief Free the memory occupied by the global transmissibility object.
|
* \brief Free the memory occupied by the global transmissibility object.
|
||||||
*
|
*
|
||||||
@ -308,23 +111,6 @@ public:
|
|||||||
globalTrans_.reset();
|
globalTrans_.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*!
|
|
||||||
* \brief Returns the object which maps a global element index of the simulation grid
|
|
||||||
* to the corresponding element index of the logically Cartesian index.
|
|
||||||
*/
|
|
||||||
const CartesianIndexMapper& cartesianIndexMapper() const
|
|
||||||
{ return *cartesianIndexMapper_; }
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* \brief Returns mapper from compressed to cartesian indices for the EQUIL grid
|
|
||||||
*/
|
|
||||||
const CartesianIndexMapper& equilCartesianIndexMapper() const
|
|
||||||
{
|
|
||||||
assert(mpiRank == 0);
|
|
||||||
assert(equilCartesianIndexMapper_);
|
|
||||||
return *equilCartesianIndexMapper_;
|
|
||||||
}
|
|
||||||
|
|
||||||
const EclTransmissibility<TypeTag>& globalTransmissibility() const
|
const EclTransmissibility<TypeTag>& globalTransmissibility() const
|
||||||
{
|
{
|
||||||
assert( globalTrans_ != nullptr );
|
assert( globalTrans_ != nullptr );
|
||||||
@ -336,162 +122,54 @@ public:
|
|||||||
globalTrans_.reset();
|
globalTrans_.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// \brief Sets a function that returns external load balancing information when passed the grid
|
/*!
|
||||||
///
|
* \brief Distribute the simulation grid over multiple processes
|
||||||
/// The information is a vector of integers indication the partition index for each cell id.
|
*
|
||||||
static void setExternalLoadBalancer(const std::function<std::vector<int> (const Grid&)>& loadBalancer)
|
* (For parallel simulation runs.)
|
||||||
|
*/
|
||||||
|
void loadBalance()
|
||||||
{
|
{
|
||||||
externalLoadBalancer_ = loadBalancer;
|
this->doLoadBalance_(this->edgeWeightsMethod(), this->ownersFirst(),
|
||||||
|
this->serialPartitioning(), this->enableDistributedWells(),
|
||||||
|
this->zoltanImbalanceTol(), this->gridView(),
|
||||||
|
this->schedule(), this->centroids_,
|
||||||
|
this->eclState(), this->parallelWells_);
|
||||||
|
|
||||||
|
this->updateGridView_();
|
||||||
|
this->updateCartesianToCompressedMapping_();
|
||||||
|
this->updateCellDepths_();
|
||||||
|
this->updateCellThickness_();
|
||||||
|
|
||||||
|
this->distributeFieldProps_(this->eclState());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void createGrids_()
|
void createGrids_()
|
||||||
{
|
{
|
||||||
const EclipseGrid* input_grid = nullptr;
|
this->doCreateGrids_(this->eclState());
|
||||||
std::vector<double> global_porv;
|
}
|
||||||
// At this stage the ParallelEclipseState instance is still in global
|
|
||||||
// view; on rank 0 we have undistributed data for the entire grid, on
|
|
||||||
// the other ranks the EclipseState is empty.
|
|
||||||
if (mpiRank == 0) {
|
|
||||||
input_grid = &this->eclState().getInputGrid();
|
|
||||||
global_porv = this->eclState().fieldProps().porv(true);
|
|
||||||
OpmLog::info("\nProcessing grid");
|
|
||||||
}
|
|
||||||
|
|
||||||
grid_.reset(new Dune::CpGrid());
|
void allocTrans() override
|
||||||
const auto& removed_cells = grid_->processEclipseFormat(input_grid,
|
{
|
||||||
&this->eclState(),
|
globalTrans_.reset(new EclTransmissibility<TypeTag>(*this));
|
||||||
/*isPeriodic=*/false,
|
globalTrans_->update(false);
|
||||||
/*flipNormals=*/false,
|
}
|
||||||
/*clipZ=*/false);
|
|
||||||
|
|
||||||
if (mpiRank == 0) {
|
double getTransmissibility(unsigned I, unsigned J) override
|
||||||
const auto& active_porv = this->eclState().fieldProps().porv(false);
|
{
|
||||||
const auto& unit_system = this->eclState().getUnits();
|
return globalTrans_->transmissibility(I,J);
|
||||||
const auto& volume_unit = unit_system.name( UnitSystem::measure::volume);
|
|
||||||
double total_pore_volume = unit_system.from_si( UnitSystem::measure::volume, std::accumulate(active_porv.begin(), active_porv.end(), 0.0));
|
|
||||||
OpmLog::info(fmt::format("Total number of active cells: {} / total pore volume: {:0.0f} {}", grid_->numCells(), total_pore_volume , volume_unit));
|
|
||||||
|
|
||||||
double removed_pore_volume = 0;
|
|
||||||
for (const auto& global_index : removed_cells)
|
|
||||||
removed_pore_volume += active_porv[ this->eclState().getInputGrid().activeIndex(global_index) ];
|
|
||||||
|
|
||||||
if (removed_pore_volume > 0) {
|
|
||||||
removed_pore_volume = unit_system.from_si( UnitSystem::measure::volume, removed_pore_volume );
|
|
||||||
OpmLog::info(fmt::format("Removed {} cells with a pore volume of {:0.0f} {} ({:5.3f} %) due to MINPV/MINPVV",
|
|
||||||
removed_cells.size(),
|
|
||||||
removed_pore_volume,
|
|
||||||
volume_unit,
|
|
||||||
100 * removed_pore_volume / total_pore_volume));
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
#if HAVE_MPI
|
|
||||||
{
|
|
||||||
const bool has_numerical_aquifer = this->eclState().aquifer().hasNumericalAquifer();
|
|
||||||
int mpiSize = 1;
|
|
||||||
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize);
|
|
||||||
// when there is numerical aquifers, new NNC are generated during grid processing
|
|
||||||
// we need to pass the NNC from root process to other processes
|
|
||||||
if (has_numerical_aquifer && mpiSize > 1) {
|
|
||||||
auto nnc_input = this->eclState().getInputNNC();
|
|
||||||
Opm::EclMpiSerializer ser(Dune::MPIHelper::getCollectiveCommunication());
|
|
||||||
ser.broadcast(nnc_input);
|
|
||||||
if (mpiRank > 0) {
|
|
||||||
this->eclState().setInputNNC(nnc_input);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// we use separate grid objects: one for the calculation of the initial condition
|
|
||||||
// via EQUIL and one for the actual simulation. The reason is that the EQUIL code
|
|
||||||
// is allergic to distributed grids and the simulation grid is distributed before
|
|
||||||
// the initial condition is calculated.
|
|
||||||
// After loadbalance grid_ will contain a global and distribute view.
|
|
||||||
// equilGrid_being a shallow copy only the global view.
|
|
||||||
if (mpiRank == 0)
|
|
||||||
{
|
|
||||||
equilGrid_.reset(new Dune::CpGrid(*grid_));
|
|
||||||
equilCartesianIndexMapper_.reset(new CartesianIndexMapper(*equilGrid_));
|
|
||||||
|
|
||||||
std::vector<int> actnum = Opm::UgGridHelpers::createACTNUM(*grid_);
|
|
||||||
auto &field_props = this->eclState().fieldProps();
|
|
||||||
const_cast<FieldPropsManager&>(field_props).reset_actnum(actnum);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// removing some connection located in inactive grid cells
|
// removing some connection located in inactive grid cells
|
||||||
void filterConnections_()
|
void filterConnections_()
|
||||||
{
|
{
|
||||||
// We only filter if we hold the global grid. Otherwise the filtering
|
this->doFilterConnections_(this->schedule());
|
||||||
// is done after load balancing as in the future the other processes
|
|
||||||
// will hold an empty partition for the global grid and hence filtering
|
|
||||||
// here would remove all well connections.
|
|
||||||
if (equilGrid_)
|
|
||||||
{
|
|
||||||
ActiveGridCells activeCells(equilGrid().logicalCartesianSize(),
|
|
||||||
equilGrid().globalCell().data(),
|
|
||||||
equilGrid().size(0));
|
|
||||||
this->schedule().filterConnections(activeCells);
|
|
||||||
}
|
|
||||||
#if HAVE_MPI
|
|
||||||
try
|
|
||||||
{
|
|
||||||
// Broadcast another time to remove inactive peforations on
|
|
||||||
// slave processors.
|
|
||||||
Opm::eclScheduleBroadcast(this->schedule());
|
|
||||||
}
|
|
||||||
catch(const std::exception& broadcast_error)
|
|
||||||
{
|
|
||||||
OpmLog::error(fmt::format("Distributing properties to all processes failed\n"
|
|
||||||
"Internal error message: {}", broadcast_error.what()));
|
|
||||||
MPI_Finalize();
|
|
||||||
std::exit(EXIT_FAILURE);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Scalar computeCellThickness(const Element& element) const
|
|
||||||
{
|
|
||||||
typedef typename Element::Geometry Geometry;
|
|
||||||
static constexpr int zCoord = Element::dimension - 1;
|
|
||||||
Scalar zz1 = 0.0;
|
|
||||||
Scalar zz2 = 0.0;
|
|
||||||
|
|
||||||
const Geometry& geometry = element.geometry();
|
|
||||||
// This code only works with CP-grid where the
|
|
||||||
// number of corners are 8 and
|
|
||||||
// also assumes that the first
|
|
||||||
// 4 corners are the top surface and
|
|
||||||
// the 4 next are the bottomn.
|
|
||||||
assert(geometry.corners() == 8);
|
|
||||||
for (int i=0; i < 4; ++i){
|
|
||||||
zz1 += geometry.corner(i)[zCoord];
|
|
||||||
zz2 += geometry.corner(i+4)[zCoord];
|
|
||||||
}
|
|
||||||
zz1 /=4;
|
|
||||||
zz2 /=4;
|
|
||||||
return zz2-zz1;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<Grid> grid_;
|
|
||||||
std::unique_ptr<EquilGrid> equilGrid_;
|
|
||||||
std::unique_ptr<CartesianIndexMapper> cartesianIndexMapper_;
|
|
||||||
std::unique_ptr<CartesianIndexMapper> equilCartesianIndexMapper_;
|
|
||||||
|
|
||||||
std::unique_ptr<EclTransmissibility<TypeTag> > globalTrans_;
|
std::unique_ptr<EclTransmissibility<TypeTag> > globalTrans_;
|
||||||
|
|
||||||
/// \brief optional functor returning external load balancing information
|
|
||||||
///
|
|
||||||
/// If it is set then this will be used during loadbalance.
|
|
||||||
static std::optional<std::function<std::vector<int> (const Grid&)>> externalLoadBalancer_;
|
|
||||||
int mpiRank;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
template<class TypeTag>
|
|
||||||
std::optional<std::function<std::vector<int>(const typename EclCpGridVanguard<TypeTag>::Grid&)>>
|
|
||||||
Opm::EclCpGridVanguard<TypeTag>::externalLoadBalancer_;
|
|
||||||
|
|
||||||
} // namespace Opm
|
} // namespace Opm
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
380
ebos/eclgenericcpgridvanguard.cc
Normal file
380
ebos/eclgenericcpgridvanguard.cc
Normal file
@ -0,0 +1,380 @@
|
|||||||
|
// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||||
|
// vi: set et ts=4 sw=4 sts=4:
|
||||||
|
/*
|
||||||
|
This file is part of the Open Porous Media project (OPM).
|
||||||
|
|
||||||
|
OPM is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation, either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
OPM is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with OPM. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
Consult the COPYING file in the top-level source directory of this
|
||||||
|
module for the precise wording of the license and the list of
|
||||||
|
copyright holders.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <config.h>
|
||||||
|
#include <ebos/eclgenericcpgridvanguard.hh>
|
||||||
|
|
||||||
|
#if HAVE_MPI
|
||||||
|
#include <ebos/eclmpiserializer.hh>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <opm/common/utility/ActiveGridCells.hpp>
|
||||||
|
#include <opm/grid/cpgrid/GridHelpers.hpp>
|
||||||
|
#include <opm/parser/eclipse/EclipseState/Schedule/Schedule.hpp>
|
||||||
|
#include <opm/simulators/utils/ParallelEclipseState.hpp>
|
||||||
|
#include <opm/simulators/utils/PropsCentroidsDataHandle.hpp>
|
||||||
|
#include <opm/simulators/utils/ParallelSerialization.hpp>
|
||||||
|
|
||||||
|
#include <dune/common/version.hh>
|
||||||
|
#include <dune/grid/common/mcmgmapper.hh>
|
||||||
|
|
||||||
|
#if HAVE_DUNE_FEM
|
||||||
|
#include <dune/fem/gridpart/adaptiveleafgridpart.hh>
|
||||||
|
#include <dune/fem/gridpart/common/gridpart2gridview.hh>
|
||||||
|
#include <ebos/femcpgridcompat.hh>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <fmt/format.h>
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
|
#include <numeric>
|
||||||
|
#include <sstream>
|
||||||
|
|
||||||
|
namespace Opm {
|
||||||
|
|
||||||
|
template<class ElementMapper, class GridView, class Scalar>
|
||||||
|
EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::EclGenericCpGridVanguard()
|
||||||
|
{
|
||||||
|
#if HAVE_MPI
|
||||||
|
MPI_Comm_rank(MPI_COMM_WORLD, &mpiRank);
|
||||||
|
#else
|
||||||
|
mpiRank = 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class ElementMapper, class GridView, class Scalar>
|
||||||
|
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::releaseEquilGrid()
|
||||||
|
{
|
||||||
|
equilGrid_.reset();
|
||||||
|
equilCartesianIndexMapper_.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class ElementMapper, class GridView, class Scalar>
|
||||||
|
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doLoadBalance_(Dune::EdgeWeightMethod edgeWeightsMethod,
|
||||||
|
bool ownersFirst, bool serialPartitioning,
|
||||||
|
bool enableDistributedWells, double zoltanImbalanceTol,
|
||||||
|
const GridView& gridv, const Schedule& schedule,
|
||||||
|
std::vector<double>& centroids,
|
||||||
|
EclipseState& eclState1,
|
||||||
|
EclGenericVanguard::ParallelWellStruct& parallelWells)
|
||||||
|
{
|
||||||
|
#if HAVE_MPI
|
||||||
|
int mpiSize = 1;
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize);
|
||||||
|
|
||||||
|
if (mpiSize > 1) {
|
||||||
|
// the CpGrid's loadBalance() method likes to have the transmissibilities as
|
||||||
|
// its edge weights. since this is (kind of) a layering violation and
|
||||||
|
// transmissibilities are relatively expensive to compute, we only do it if
|
||||||
|
// more than a single process is involved in the simulation.
|
||||||
|
cartesianIndexMapper_.reset(new CartesianIndexMapper(*grid_));
|
||||||
|
if (grid_->size(0))
|
||||||
|
{
|
||||||
|
this->allocTrans();
|
||||||
|
}
|
||||||
|
|
||||||
|
// convert to transmissibility for faces
|
||||||
|
// TODO: grid_->numFaces() is not generic. use grid_->size(1) instead? (might
|
||||||
|
// not work)
|
||||||
|
const auto& gridView = grid_->leafGridView();
|
||||||
|
unsigned numFaces = grid_->numFaces();
|
||||||
|
std::vector<double> faceTrans;
|
||||||
|
int loadBalancerSet = externalLoadBalancer_.has_value();
|
||||||
|
grid_->comm().broadcast(&loadBalancerSet, 1, 0);
|
||||||
|
if (!loadBalancerSet){
|
||||||
|
faceTrans.resize(numFaces, 0.0);
|
||||||
|
ElementMapper elemMapper(gridv, Dune::mcmgElementLayout());
|
||||||
|
auto elemIt = gridView.template begin</*codim=*/0>();
|
||||||
|
const auto& elemEndIt = gridView.template end</*codim=*/0>();
|
||||||
|
for (; elemIt != elemEndIt; ++ elemIt) {
|
||||||
|
const auto& elem = *elemIt;
|
||||||
|
auto isIt = gridView.ibegin(elem);
|
||||||
|
const auto& isEndIt = gridView.iend(elem);
|
||||||
|
for (; isIt != isEndIt; ++ isIt) {
|
||||||
|
const auto& is = *isIt;
|
||||||
|
if (!is.neighbor())
|
||||||
|
continue;
|
||||||
|
|
||||||
|
unsigned I = elemMapper.index(is.inside());
|
||||||
|
unsigned J = elemMapper.index(is.outside());
|
||||||
|
|
||||||
|
// FIXME (?): this is not portable!
|
||||||
|
unsigned faceIdx = is.id();
|
||||||
|
|
||||||
|
faceTrans[faceIdx] = this->getTransmissibility(I,J);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//distribute the grid and switch to the distributed view.
|
||||||
|
{
|
||||||
|
const auto wells = schedule.getWellsatEnd();
|
||||||
|
|
||||||
|
try
|
||||||
|
{
|
||||||
|
auto& eclState = dynamic_cast<ParallelEclipseState&>(eclState1);
|
||||||
|
const EclipseGrid* eclGrid = nullptr;
|
||||||
|
|
||||||
|
if (grid_->comm().rank() == 0)
|
||||||
|
{
|
||||||
|
eclGrid = &eclState.getInputGrid();
|
||||||
|
}
|
||||||
|
|
||||||
|
PropsCentroidsDataHandle<Dune::CpGrid> handle(*grid_, eclState, eclGrid, centroids,
|
||||||
|
cartesianIndexMapper());
|
||||||
|
if (loadBalancerSet)
|
||||||
|
{
|
||||||
|
std::vector<int> parts;
|
||||||
|
if (grid_->comm().rank() == 0)
|
||||||
|
{
|
||||||
|
parts = (*externalLoadBalancer_)(*grid_);
|
||||||
|
}
|
||||||
|
parallelWells = std::get<1>(grid_->loadBalance(handle, parts, &wells, ownersFirst, false, 1));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
parallelWells =
|
||||||
|
std::get<1>(grid_->loadBalance(handle, edgeWeightsMethod, &wells, serialPartitioning,
|
||||||
|
faceTrans.data(), ownersFirst, false, 1, true, zoltanImbalanceTol,
|
||||||
|
enableDistributedWells));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch(const std::bad_cast& e)
|
||||||
|
{
|
||||||
|
std::ostringstream message;
|
||||||
|
message << "Parallel simulator setup is incorrect as it does not use ParallelEclipseState ("
|
||||||
|
<< e.what() <<")"<<std::flush;
|
||||||
|
OpmLog::error(message.str());
|
||||||
|
std::rethrow_exception(std::current_exception());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
grid_->switchToDistributedView();
|
||||||
|
|
||||||
|
cartesianIndexMapper_.reset();
|
||||||
|
|
||||||
|
// Calling Schedule::filterConnections would remove any perforated
|
||||||
|
// cells that exist only on other ranks even in the case of distributed wells
|
||||||
|
// But we need all connections to figure out the first cell of a well (e.g. for
|
||||||
|
// pressure). Hence this is now skipped. Rank 0 had everything even before.
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
this->cartesianIndexMapper_.reset(new CartesianIndexMapper(this->grid()));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<class ElementMapper, class GridView, class Scalar>
|
||||||
|
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::distributeFieldProps_(EclipseState& eclState1)
|
||||||
|
{
|
||||||
|
#if HAVE_MPI
|
||||||
|
int mpiSize = 1;
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize);
|
||||||
|
|
||||||
|
if (mpiSize > 1) {
|
||||||
|
try
|
||||||
|
{
|
||||||
|
auto& parallelEclState = dynamic_cast<ParallelEclipseState&>(eclState1);
|
||||||
|
// reset cartesian index mapper for auto creation of field properties
|
||||||
|
parallelEclState.resetCartesianMapper(cartesianIndexMapper_.get());
|
||||||
|
parallelEclState.switchToDistributedProps();
|
||||||
|
}
|
||||||
|
catch(const std::bad_cast& e)
|
||||||
|
{
|
||||||
|
std::ostringstream message;
|
||||||
|
message << "Parallel simulator setup is incorrect as it does not use ParallelEclipseState ("
|
||||||
|
<< e.what() <<")"<<std::flush;
|
||||||
|
OpmLog::error(message.str());
|
||||||
|
std::rethrow_exception(std::current_exception());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<class ElementMapper, class GridView, class Scalar>
|
||||||
|
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(EclipseState& eclState)
|
||||||
|
{
|
||||||
|
const EclipseGrid* input_grid = nullptr;
|
||||||
|
std::vector<double> global_porv;
|
||||||
|
// At this stage the ParallelEclipseState instance is still in global
|
||||||
|
// view; on rank 0 we have undistributed data for the entire grid, on
|
||||||
|
// the other ranks the EclipseState is empty.
|
||||||
|
if (mpiRank == 0) {
|
||||||
|
input_grid = &eclState.getInputGrid();
|
||||||
|
global_porv = eclState.fieldProps().porv(true);
|
||||||
|
OpmLog::info("\nProcessing grid");
|
||||||
|
}
|
||||||
|
|
||||||
|
grid_.reset(new Dune::CpGrid());
|
||||||
|
const auto& removed_cells = grid_->processEclipseFormat(input_grid,
|
||||||
|
&eclState,
|
||||||
|
/*isPeriodic=*/false,
|
||||||
|
/*flipNormals=*/false,
|
||||||
|
/*clipZ=*/false);
|
||||||
|
|
||||||
|
if (mpiRank == 0) {
|
||||||
|
const auto& active_porv = eclState.fieldProps().porv(false);
|
||||||
|
const auto& unit_system = eclState.getUnits();
|
||||||
|
const auto& volume_unit = unit_system.name( UnitSystem::measure::volume);
|
||||||
|
double total_pore_volume = unit_system.from_si( UnitSystem::measure::volume, std::accumulate(active_porv.begin(), active_porv.end(), 0.0));
|
||||||
|
OpmLog::info(fmt::format("Total number of active cells: {} / total pore volume: {:0.0f} {}", grid_->numCells(), total_pore_volume , volume_unit));
|
||||||
|
|
||||||
|
double removed_pore_volume = 0;
|
||||||
|
for (const auto& global_index : removed_cells)
|
||||||
|
removed_pore_volume += active_porv[ eclState.getInputGrid().activeIndex(global_index) ];
|
||||||
|
|
||||||
|
if (removed_pore_volume > 0) {
|
||||||
|
removed_pore_volume = unit_system.from_si( UnitSystem::measure::volume, removed_pore_volume );
|
||||||
|
OpmLog::info(fmt::format("Removed {} cells with a pore volume of {:0.0f} {} ({:5.3f} %) due to MINPV/MINPVV",
|
||||||
|
removed_cells.size(),
|
||||||
|
removed_pore_volume,
|
||||||
|
volume_unit,
|
||||||
|
100 * removed_pore_volume / total_pore_volume));
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
#if HAVE_MPI
|
||||||
|
{
|
||||||
|
const bool has_numerical_aquifer = eclState.aquifer().hasNumericalAquifer();
|
||||||
|
int mpiSize = 1;
|
||||||
|
MPI_Comm_size(MPI_COMM_WORLD, &mpiSize);
|
||||||
|
// when there is numerical aquifers, new NNC are generated during grid processing
|
||||||
|
// we need to pass the NNC from root process to other processes
|
||||||
|
if (has_numerical_aquifer && mpiSize > 1) {
|
||||||
|
auto nnc_input = eclState.getInputNNC();
|
||||||
|
EclMpiSerializer ser(Dune::MPIHelper::getCollectiveCommunication());
|
||||||
|
ser.broadcast(nnc_input);
|
||||||
|
if (mpiRank > 0) {
|
||||||
|
eclState.setInputNNC(nnc_input);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// we use separate grid objects: one for the calculation of the initial condition
|
||||||
|
// via EQUIL and one for the actual simulation. The reason is that the EQUIL code
|
||||||
|
// is allergic to distributed grids and the simulation grid is distributed before
|
||||||
|
// the initial condition is calculated.
|
||||||
|
// After loadbalance grid_ will contain a global and distribute view.
|
||||||
|
// equilGrid_being a shallow copy only the global view.
|
||||||
|
if (mpiRank == 0)
|
||||||
|
{
|
||||||
|
equilGrid_.reset(new Dune::CpGrid(*grid_));
|
||||||
|
equilCartesianIndexMapper_.reset(new CartesianIndexMapper(*equilGrid_));
|
||||||
|
|
||||||
|
std::vector<int> actnum = UgGridHelpers::createACTNUM(*grid_);
|
||||||
|
auto &field_props = eclState.fieldProps();
|
||||||
|
const_cast<FieldPropsManager&>(field_props).reset_actnum(actnum);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class ElementMapper, class GridView, class Scalar>
|
||||||
|
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doFilterConnections_(Schedule& schedule)
|
||||||
|
{
|
||||||
|
// We only filter if we hold the global grid. Otherwise the filtering
|
||||||
|
// is done after load balancing as in the future the other processes
|
||||||
|
// will hold an empty partition for the global grid and hence filtering
|
||||||
|
// here would remove all well connections.
|
||||||
|
if (equilGrid_)
|
||||||
|
{
|
||||||
|
ActiveGridCells activeCells(equilGrid().logicalCartesianSize(),
|
||||||
|
equilGrid().globalCell().data(),
|
||||||
|
equilGrid().size(0));
|
||||||
|
schedule.filterConnections(activeCells);
|
||||||
|
}
|
||||||
|
#if HAVE_MPI
|
||||||
|
try
|
||||||
|
{
|
||||||
|
// Broadcast another time to remove inactive peforations on
|
||||||
|
// slave processors.
|
||||||
|
eclScheduleBroadcast(schedule);
|
||||||
|
}
|
||||||
|
catch(const std::exception& broadcast_error)
|
||||||
|
{
|
||||||
|
OpmLog::error(fmt::format("Distributing properties to all processes failed\n"
|
||||||
|
"Internal error message: {}", broadcast_error.what()));
|
||||||
|
MPI_Finalize();
|
||||||
|
std::exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class ElementMapper, class GridView, class Scalar>
|
||||||
|
const Dune::CpGrid& EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::equilGrid() const
|
||||||
|
{
|
||||||
|
assert(mpiRank == 0);
|
||||||
|
return *equilGrid_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class ElementMapper, class GridView, class Scalar>
|
||||||
|
const Dune::CartesianIndexMapper<Dune::CpGrid>& EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::cartesianIndexMapper() const
|
||||||
|
{
|
||||||
|
return *cartesianIndexMapper_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class ElementMapper, class GridView, class Scalar>
|
||||||
|
const Dune::CartesianIndexMapper<Dune::CpGrid>& EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::equilCartesianIndexMapper() const
|
||||||
|
{
|
||||||
|
assert(mpiRank == 0);
|
||||||
|
assert(equilCartesianIndexMapper_);
|
||||||
|
return *equilCartesianIndexMapper_;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class ElementMapper, class GridView, class Scalar>
|
||||||
|
Scalar EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::computeCellThickness(const typename GridView::template Codim<0>::Entity& element) const
|
||||||
|
{
|
||||||
|
typedef typename Element::Geometry Geometry;
|
||||||
|
static constexpr int zCoord = Element::dimension - 1;
|
||||||
|
Scalar zz1 = 0.0;
|
||||||
|
Scalar zz2 = 0.0;
|
||||||
|
|
||||||
|
const Geometry& geometry = element.geometry();
|
||||||
|
// This code only works with CP-grid where the
|
||||||
|
// number of corners are 8 and
|
||||||
|
// also assumes that the first
|
||||||
|
// 4 corners are the top surface and
|
||||||
|
// the 4 next are the bottomn.
|
||||||
|
assert(geometry.corners() == 8);
|
||||||
|
for (int i=0; i < 4; ++i){
|
||||||
|
zz1 += geometry.corner(i)[zCoord];
|
||||||
|
zz2 += geometry.corner(i+4)[zCoord];
|
||||||
|
}
|
||||||
|
zz1 /=4;
|
||||||
|
zz2 /=4;
|
||||||
|
return zz2-zz1;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class ElementMapper, class GridView, class Scalar>
|
||||||
|
std::optional<std::function<std::vector<int> (const Dune::CpGrid&)>>
|
||||||
|
EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::externalLoadBalancer_;
|
||||||
|
|
||||||
|
#if HAVE_DUNE_FEM
|
||||||
|
template class EclGenericCpGridVanguard<Dune::MultipleCodimMultipleGeomTypeMapper<
|
||||||
|
Dune::GridView<Dune::Fem::GridPart2GridViewTraits<Dune::Fem::AdaptiveLeafGridPart<Dune::CpGrid, Dune::PartitionIteratorType(4), false>>>,
|
||||||
|
Dune::GridView<Dune::Fem::GridPart2GridViewTraits<Dune::Fem::AdaptiveLeafGridPart<Dune::CpGrid, Dune::PartitionIteratorType(4), false>>>,
|
||||||
|
double>;
|
||||||
|
#else
|
||||||
|
template class EclGenericCpGridVanguard<Dune::MultipleCodimMultipleGeomTypeMapper<Dune::GridView<Dune::DefaultLeafGridViewTraits<Dune::CpGrid>>,Dune::Impl::MCMGFailLayout>,
|
||||||
|
Dune::GridView<Dune::DefaultLeafGridViewTraits<Dune::CpGrid>>,
|
||||||
|
double>;
|
||||||
|
#endif
|
||||||
|
} // namespace Opm
|
137
ebos/eclgenericcpgridvanguard.hh
Normal file
137
ebos/eclgenericcpgridvanguard.hh
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||||
|
// vi: set et ts=4 sw=4 sts=4:
|
||||||
|
/*
|
||||||
|
This file is part of the Open Porous Media project (OPM).
|
||||||
|
|
||||||
|
OPM is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation, either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
OPM is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License
|
||||||
|
along with OPM. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
Consult the COPYING file in the top-level source directory of this
|
||||||
|
module for the precise wording of the license and the list of
|
||||||
|
copyright holders.
|
||||||
|
*/
|
||||||
|
/*!
|
||||||
|
* \file
|
||||||
|
* \copydoc Opm::EclCpGridVanguard
|
||||||
|
*/
|
||||||
|
#ifndef EWOMS_ECL_CP_GRID_GENERIC_VANGUARD_HH
|
||||||
|
#define EWOMS_ECL_CP_GRID_GENERIC_VANGUARD_HH
|
||||||
|
|
||||||
|
#include <ebos/eclgenericvanguard.hh>
|
||||||
|
#include <opm/grid/CpGrid.hpp>
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
|
||||||
|
namespace Opm {
|
||||||
|
|
||||||
|
template<class ElementMapper, class GridView, class Scalar>
|
||||||
|
class EclGenericCpGridVanguard {
|
||||||
|
protected:
|
||||||
|
using CartesianIndexMapper = Dune::CartesianIndexMapper<Dune::CpGrid>;
|
||||||
|
using Element = typename GridView::template Codim<0>::Entity;
|
||||||
|
|
||||||
|
public:
|
||||||
|
EclGenericCpGridVanguard();
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* \brief Return a reference to the simulation grid.
|
||||||
|
*/
|
||||||
|
Dune::CpGrid& grid()
|
||||||
|
{ return *grid_; }
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* \brief Return a reference to the simulation grid.
|
||||||
|
*/
|
||||||
|
const Dune::CpGrid& grid() const
|
||||||
|
{ return *grid_; }
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* \brief Returns a refefence to the grid which should be used by the EQUIL
|
||||||
|
* initialization code.
|
||||||
|
*
|
||||||
|
* The EQUIL keyword is used to specify the initial condition of the reservoir in
|
||||||
|
* hydrostatic equilibrium. Since the code which does this is not accepting arbitrary
|
||||||
|
* DUNE grids (the code is part of the opm-core module), this is not necessarily the
|
||||||
|
* same as the grid which is used for the actual simulation.
|
||||||
|
*/
|
||||||
|
const Dune::CpGrid& equilGrid() const;
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* \brief Indicates that the initial condition has been computed and the memory used
|
||||||
|
* by the EQUIL grid can be released.
|
||||||
|
*
|
||||||
|
* Depending on the implementation, subsequent accesses to the EQUIL grid lead to
|
||||||
|
* crashes.
|
||||||
|
*/
|
||||||
|
void releaseEquilGrid();
|
||||||
|
|
||||||
|
/// \brief Sets a function that returns external load balancing information when passed the grid
|
||||||
|
///
|
||||||
|
/// The information is a vector of integers indication the partition index for each cell id.
|
||||||
|
static void setExternalLoadBalancer(const std::function<std::vector<int> (const Dune::CpGrid&)>& loadBalancer)
|
||||||
|
{
|
||||||
|
externalLoadBalancer_ = loadBalancer;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* \brief Returns the object which maps a global element index of the simulation grid
|
||||||
|
* to the corresponding element index of the logically Cartesian index.
|
||||||
|
*/
|
||||||
|
const CartesianIndexMapper& cartesianIndexMapper() const;
|
||||||
|
|
||||||
|
/*!
|
||||||
|
* \brief Returns mapper from compressed to cartesian indices for the EQUIL grid
|
||||||
|
*/
|
||||||
|
const CartesianIndexMapper& equilCartesianIndexMapper() const;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
/*!
|
||||||
|
* \brief Distribute the simulation grid over multiple processes
|
||||||
|
*
|
||||||
|
* (For parallel simulation runs.)
|
||||||
|
*/
|
||||||
|
void doLoadBalance_(Dune::EdgeWeightMethod edgeWeightsMethod,
|
||||||
|
bool ownersFirst, bool serialPartitioning,
|
||||||
|
bool enableDistributedWells, double zoltanImbalanceTol,
|
||||||
|
const GridView& gridv, const Schedule& schedule,
|
||||||
|
std::vector<double>& centroids,
|
||||||
|
EclipseState& eclState,
|
||||||
|
EclGenericVanguard::ParallelWellStruct& parallelWells);
|
||||||
|
|
||||||
|
void doCreateGrids_(EclipseState& eclState);
|
||||||
|
|
||||||
|
void distributeFieldProps_(EclipseState& eclState);
|
||||||
|
|
||||||
|
virtual void allocTrans() = 0;
|
||||||
|
virtual double getTransmissibility(unsigned I, unsigned J) = 0;
|
||||||
|
|
||||||
|
// removing some connection located in inactive grid cells
|
||||||
|
void doFilterConnections_(Schedule& schedule);
|
||||||
|
|
||||||
|
Scalar computeCellThickness(const Element& element) const;
|
||||||
|
|
||||||
|
std::unique_ptr<Dune::CpGrid> grid_;
|
||||||
|
std::unique_ptr<Dune::CpGrid> equilGrid_;
|
||||||
|
std::unique_ptr<CartesianIndexMapper> cartesianIndexMapper_;
|
||||||
|
std::unique_ptr<CartesianIndexMapper> equilCartesianIndexMapper_;
|
||||||
|
|
||||||
|
/// \brief optional functor returning external load balancing information
|
||||||
|
///
|
||||||
|
/// If it is set then this will be used during loadbalance.
|
||||||
|
static std::optional<std::function<std::vector<int> (const Dune::CpGrid&)>> externalLoadBalancer_;
|
||||||
|
int mpiRank;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace Opm
|
||||||
|
|
||||||
|
#endif
|
Loading…
Reference in New Issue
Block a user