mirror of
https://github.com/OPM/opm-simulators.git
synced 2025-02-25 18:55:30 -06:00
Merge pull request #5968 from bska/tidy-up-grid-proc-impl
Tidy Up Parts of Grid Processing Implementation
This commit is contained in:
commit
5f40515cec
@ -113,30 +113,34 @@ FlowGenericVanguard::FlowGenericVanguard(SimulationModelParams&& params)
|
||||
edgeWeightsMethod_ = Dune::EdgeWeightMethod(Parameters::Get<Parameters::EdgeWeightsMethod>());
|
||||
|
||||
#if HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA
|
||||
numJacobiBlocks_ = Parameters::Get<Parameters::NumJacobiBlocks>();
|
||||
#endif
|
||||
numJacobiBlocks_ = Parameters::Get<Parameters::NumJacobiBlocks>();
|
||||
#endif // HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA
|
||||
|
||||
ownersFirst_ = Parameters::Get<Parameters::OwnerCellsFirst>();
|
||||
|
||||
ownersFirst_ = Parameters::Get<Parameters::OwnerCellsFirst>();
|
||||
#if HAVE_MPI
|
||||
numOverlap_ = Parameters::Get<Parameters::NumOverlap>();
|
||||
addCorners_ = Parameters::Get<Parameters::AddCorners>();
|
||||
partitionMethod_ = Dune::PartitionMethod(Parameters::Get<Parameters::PartitionMethod>());
|
||||
serialPartitioning_ = Parameters::Get<Parameters::SerialPartitioning>();
|
||||
zoltanParams_ = Parameters::Get<Parameters::ZoltanParams>();
|
||||
numOverlap_ = Parameters::Get<Parameters::NumOverlap>();
|
||||
addCorners_ = Parameters::Get<Parameters::AddCorners>();
|
||||
partitionMethod_ = Dune::PartitionMethod(Parameters::Get<Parameters::PartitionMethod>());
|
||||
serialPartitioning_ = Parameters::Get<Parameters::SerialPartitioning>();
|
||||
zoltanParams_ = Parameters::Get<Parameters::ZoltanParams>();
|
||||
|
||||
metisParams_ = Parameters::Get<Parameters::MetisParams>();
|
||||
metisParams_ = Parameters::Get<Parameters::MetisParams>();
|
||||
|
||||
externalPartitionFile_ = Parameters::Get<Parameters::ExternalPartition>();
|
||||
#endif
|
||||
enableDistributedWells_ = Parameters::Get<Parameters::AllowDistributedWells>();
|
||||
enableEclOutput_ = Parameters::Get<Parameters::EnableEclOutput>();
|
||||
allow_splitting_inactive_wells_ = Parameters::Get<Parameters::AllowSplittingInactiveWells>();
|
||||
ignoredKeywords_ = Parameters::Get<Parameters::IgnoreKeywords>();
|
||||
int output_param = Parameters::Get<Parameters::EclOutputInterval>();
|
||||
if (output_param >= 0) {
|
||||
outputInterval_ = output_param;
|
||||
}
|
||||
useMultisegmentWell_ = Parameters::Get<Parameters::UseMultisegmentWell>();
|
||||
externalPartitionFile_ = Parameters::Get<Parameters::ExternalPartition>();
|
||||
#endif // HAVE_MPI
|
||||
|
||||
enableDistributedWells_ = Parameters::Get<Parameters::AllowDistributedWells>();
|
||||
enableEclOutput_ = Parameters::Get<Parameters::EnableEclOutput>();
|
||||
allow_splitting_inactive_wells_ = Parameters::Get<Parameters::AllowSplittingInactiveWells>();
|
||||
ignoredKeywords_ = Parameters::Get<Parameters::IgnoreKeywords>();
|
||||
|
||||
const int output_param = Parameters::Get<Parameters::EclOutputInterval>();
|
||||
if (output_param >= 0) {
|
||||
outputInterval_ = output_param;
|
||||
}
|
||||
|
||||
useMultisegmentWell_ = Parameters::Get<Parameters::UseMultisegmentWell>();
|
||||
}
|
||||
|
||||
FlowGenericVanguard::SimulationModelParams
|
||||
@ -449,7 +453,7 @@ void FlowGenericVanguard::registerParameters_()
|
||||
#if HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA
|
||||
Parameters::Register<Parameters::NumJacobiBlocks>
|
||||
("Number of blocks to be created for the Block-Jacobi preconditioner.");
|
||||
#endif
|
||||
#endif // HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA
|
||||
|
||||
Parameters::Register<Parameters::OwnerCellsFirst>
|
||||
("Order cells owned by rank before ghost/overlap cells.");
|
||||
@ -489,7 +493,8 @@ void FlowGenericVanguard::registerParameters_()
|
||||
|
||||
Parameters::Hide<Parameters::ZoltanImbalanceTol<Scalar>>();
|
||||
Parameters::Hide<Parameters::ZoltanParams>();
|
||||
#endif
|
||||
#endif // HAVE_MPI
|
||||
|
||||
Parameters::Register<Parameters::AllowDistributedWells>
|
||||
("Allow the perforations of a well to be distributed to interior of multiple processes");
|
||||
Parameters::Register<Parameters::AllowSplittingInactiveWells>
|
||||
@ -503,6 +508,6 @@ template void FlowGenericVanguard::registerParameters_<double>();
|
||||
|
||||
#if FLOW_INSTANTIATE_FLOAT
|
||||
template void FlowGenericVanguard::registerParameters_<float>();
|
||||
#endif
|
||||
#endif // FLOW_INSTANTIATE_FLOAT
|
||||
|
||||
} // namespace Opm
|
||||
|
@ -66,7 +66,7 @@ struct MetisParams { static constexpr auto value = "default"; };
|
||||
|
||||
#if HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA
|
||||
struct NumJacobiBlocks { static constexpr int value = 0; };
|
||||
#endif
|
||||
#endif // HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA
|
||||
|
||||
struct OwnerCellsFirst { static constexpr bool value = true; };
|
||||
struct ParsingStrictness { static constexpr auto value = "normal"; };
|
||||
@ -248,7 +248,7 @@ public:
|
||||
return numJacobiBlocks_;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
#endif // HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA
|
||||
}
|
||||
|
||||
/*!
|
||||
@ -297,7 +297,7 @@ public:
|
||||
{
|
||||
return this->externalPartitionFile_;
|
||||
}
|
||||
#endif
|
||||
#endif // HAVE_MPI
|
||||
|
||||
/*!
|
||||
* \brief Whether perforations of a well might be distributed.
|
||||
@ -367,12 +367,13 @@ protected:
|
||||
|
||||
#if HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA
|
||||
int numJacobiBlocks_{0};
|
||||
#endif
|
||||
#endif // HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA
|
||||
|
||||
bool ownersFirst_;
|
||||
#if HAVE_MPI
|
||||
bool addCorners_;
|
||||
int numOverlap_;
|
||||
|
||||
Dune::PartitionMethod partitionMethod_;
|
||||
bool serialPartitioning_;
|
||||
double imbalanceTol_;
|
||||
@ -384,7 +385,8 @@ protected:
|
||||
std::string metisParams_;
|
||||
|
||||
std::string externalPartitionFile_{};
|
||||
#endif
|
||||
#endif // HAVE_MPI
|
||||
|
||||
bool enableDistributedWells_;
|
||||
bool enableEclOutput_;
|
||||
bool allow_splitting_inactive_wells_;
|
||||
|
@ -428,12 +428,14 @@ distributeGrid(const Dune::EdgeWeightMethod edgeWeights
|
||||
template<class ElementMapper, class GridView, class Scalar>
|
||||
void GenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(EclipseState& eclState)
|
||||
{
|
||||
const EclipseGrid* input_grid = nullptr;
|
||||
std::vector<double> global_porv;
|
||||
const auto isRoot = this->mpiRank == 0;
|
||||
|
||||
// At this stage the ParallelEclipseState instance is still in global
|
||||
// view; on rank 0 we have undistributed data for the entire grid, on
|
||||
// the other ranks the EclipseState is empty.
|
||||
if (mpiRank == 0) {
|
||||
const EclipseGrid* input_grid = nullptr;
|
||||
std::vector<double> global_porv;
|
||||
if (isRoot) {
|
||||
input_grid = &eclState.getInputGrid();
|
||||
global_porv = eclState.fieldProps().porv(true);
|
||||
OpmLog::info("\nProcessing grid");
|
||||
@ -441,6 +443,7 @@ void GenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(Eclips
|
||||
|
||||
// --- Create grid ---
|
||||
OPM_TIMEBLOCK(createGrids);
|
||||
|
||||
#if HAVE_MPI
|
||||
this->grid_ = std::make_unique<Dune::CpGrid>(FlowGenericVanguard::comm());
|
||||
#else
|
||||
@ -448,14 +451,14 @@ void GenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(Eclips
|
||||
#endif
|
||||
|
||||
// Note: removed_cells is guaranteed to be empty on ranks other than 0.
|
||||
auto removed_cells =
|
||||
this->grid_->processEclipseFormat(input_grid,
|
||||
&eclState,
|
||||
/*isPeriodic=*/false,
|
||||
/*flipNormals=*/false,
|
||||
/*clipZ=*/false);
|
||||
auto removed_cells = this->grid_
|
||||
->processEclipseFormat(input_grid,
|
||||
&eclState,
|
||||
/* isPeriodic = */ false,
|
||||
/* flipNormals = */ false,
|
||||
/* clipZ = */ false);
|
||||
|
||||
if (mpiRank == 0) {
|
||||
if (isRoot) {
|
||||
const auto& active_porv = eclState.fieldProps().porv(false);
|
||||
const auto& unit_system = eclState.getUnits();
|
||||
const auto& volume_unit = unit_system.name( UnitSystem::measure::volume);
|
||||
@ -480,45 +483,38 @@ void GenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(Eclips
|
||||
|
||||
// --- Add LGRs and update Leaf Grid View ---
|
||||
// Check if input file contains Lgrs.
|
||||
const auto& lgrs = eclState.getLgrs();
|
||||
const auto lgrsSize = lgrs.size();
|
||||
//
|
||||
// If there are lgrs, create the grid with them, and update the leaf grid view.
|
||||
if (lgrsSize)
|
||||
{
|
||||
if (const auto& lgrs = eclState.getLgrs(); lgrs.size() > 0) {
|
||||
OpmLog::info("\nAdding LGRs to the grid and updating its leaf grid view");
|
||||
this->addLgrsUpdateLeafView(lgrs, lgrsSize, *(this->grid_));
|
||||
this->addLgrsUpdateLeafView(lgrs, lgrs.size(), *this->grid_);
|
||||
}
|
||||
|
||||
#if HAVE_MPI
|
||||
{
|
||||
const bool has_numerical_aquifer = eclState.aquifer().hasNumericalAquifer();
|
||||
int mpiSize = 1;
|
||||
MPI_Comm_size(grid_->comm(), &mpiSize);
|
||||
if (this->grid_->comm().size() > 1) {
|
||||
// Numerical aquifers generate new NNCs during grid processing. We
|
||||
// need to distribute these NNCs from the root process to all other
|
||||
// MPI processes.
|
||||
if (eclState.aquifer().hasNumericalAquifer()) {
|
||||
auto nnc_input = eclState.getInputNNC();
|
||||
Parallel::MpiSerializer ser(this->grid_->comm());
|
||||
ser.broadcast(nnc_input);
|
||||
|
||||
// when there is numerical aquifers, new NNC are generated during
|
||||
// grid processing we need to pass the NNC from root process to
|
||||
// other processes
|
||||
if ( mpiSize > 1)
|
||||
{
|
||||
if (has_numerical_aquifer) {
|
||||
auto nnc_input = eclState.getInputNNC();
|
||||
Parallel::MpiSerializer ser(grid_->comm());
|
||||
ser.broadcast(nnc_input);
|
||||
if (mpiRank > 0) {
|
||||
eclState.setInputNNC(nnc_input);
|
||||
}
|
||||
if (! isRoot) {
|
||||
eclState.setInputNNC(nnc_input);
|
||||
}
|
||||
bool hasPinchNnc = eclState.hasPinchNNC();
|
||||
grid_->comm().broadcast(&hasPinchNnc, 1, 0);
|
||||
}
|
||||
|
||||
if(hasPinchNnc)
|
||||
{
|
||||
auto pinch_nnc = eclState.getPinchNNC();
|
||||
Parallel::MpiSerializer ser(grid_->comm());
|
||||
ser.broadcast(pinch_nnc);
|
||||
if (mpiRank > 0) {
|
||||
eclState.setPinchNNC(std::move(pinch_nnc));
|
||||
}
|
||||
bool hasPinchNnc = eclState.hasPinchNNC();
|
||||
grid_->comm().broadcast(&hasPinchNnc, 1, 0);
|
||||
|
||||
if (hasPinchNnc) {
|
||||
auto pinch_nnc = eclState.getPinchNNC();
|
||||
Parallel::MpiSerializer ser(this->grid_->comm());
|
||||
ser.broadcast(pinch_nnc);
|
||||
|
||||
if (! isRoot) {
|
||||
eclState.setPinchNNC(std::move(pinch_nnc));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -533,21 +529,20 @@ void GenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(Eclips
|
||||
//
|
||||
// After loadbalance, grid_ will contain a global and distribute view.
|
||||
// equilGrid_ being a shallow copy only the global view.
|
||||
if (mpiRank == 0)
|
||||
{
|
||||
equilGrid_.reset(new Dune::CpGrid(*grid_));
|
||||
equilCartesianIndexMapper_ = std::make_unique<CartesianIndexMapper>(*equilGrid_);
|
||||
if (isRoot) {
|
||||
this->equilGrid_ = std::make_unique<Dune::CpGrid>(*this->grid_);
|
||||
this->equilCartesianIndexMapper_ =
|
||||
std::make_unique<CartesianIndexMapper>(*this->equilGrid_);
|
||||
|
||||
eclState.reset_actnum(UgGridHelpers::createACTNUM(*grid_));
|
||||
eclState.reset_actnum(UgGridHelpers::createACTNUM(*this->grid_));
|
||||
eclState.set_active_indices(this->grid_->globalCell());
|
||||
}
|
||||
|
||||
{
|
||||
auto size = removed_cells.size();
|
||||
|
||||
this->grid_->comm().broadcast(&size, 1, 0);
|
||||
|
||||
if (mpiRank != 0) {
|
||||
if (! isRoot) {
|
||||
removed_cells.resize(size);
|
||||
}
|
||||
|
||||
|
@ -236,11 +236,17 @@ public:
|
||||
// not required for this type of grid yet (only from bdaBridge??)
|
||||
return {};
|
||||
}
|
||||
|
||||
protected:
|
||||
void createGrids_()
|
||||
{
|
||||
grid_ = std::make_unique<Grid>(this->eclState().getInputGrid(), this->eclState().fieldProps().porv(true));
|
||||
cartesianIndexMapper_ = std::make_unique<CartesianIndexMapper>(*grid_);
|
||||
this->grid_ = std::make_unique<Grid>
|
||||
(this->eclState().getInputGrid(),
|
||||
this->eclState().fieldProps().porv(true));
|
||||
|
||||
this->cartesianIndexMapper_ =
|
||||
std::make_unique<CartesianIndexMapper>(*this->grid_);
|
||||
|
||||
this->updateGridView_();
|
||||
this->updateCartesianToCompressedMapping_();
|
||||
this->updateCellDepths_();
|
||||
|
Loading…
Reference in New Issue
Block a user