Merge pull request #5505 from lisajulia/fix/COMPDAT-NLDD

Fix the usage of the COMPDAT keyword in an ACTIONX for the NLDD solver for sequential runs
This commit is contained in:
Markus Blatt 2024-08-08 15:35:00 +02:00 committed by GitHub
commit a6006b0c93
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 142 additions and 98 deletions

View File

@ -953,9 +953,10 @@ private:
? param.num_local_domains_
: num_cells / default_cells_per_domain;
const auto& possibleFutureConnectionSet = this->model_.simulator().vanguard().schedule().getPossibleFutureConnections();
return ::Opm::partitionCells(param.local_domain_partition_method_,
num_domains,
grid.leafGridView(), wells, zoltan_ctrl);
grid.leafGridView(), wells, possibleFutureConnectionSet, zoltan_ctrl);
}
std::vector<int> reconstitutePartitionVector() const

View File

@ -216,7 +216,7 @@ doLoadBalance_(const Dune::EdgeWeightMethod edgeWeightsMethod,
#if HAVE_OPENCL || HAVE_ROCSPARSE || HAVE_CUDA
if (partitionJacobiBlocks) {
this->cell_part_ = this->grid_->
zoltanPartitionWithoutScatter(&wells, &possibleFutureConnections, faceTrans.data(),
zoltanPartitionWithoutScatter(&wells, possibleFutureConnections, faceTrans.data(),
numJacobiBlocks,
imbalanceTol);
}
@ -283,18 +283,18 @@ extractFaceTrans(const GridView& gridView) const
template <class ElementMapper, class GridView, class Scalar>
void
GenericCpGridVanguard<ElementMapper, GridView, Scalar>::
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
EclipseState& eclState1,
FlowGenericVanguard::ParallelWellStruct& parallelWells)
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections,
EclipseState& eclState1,
FlowGenericVanguard::ParallelWellStruct& parallelWells)
{
if (auto* eclState = dynamic_cast<ParallelEclipseState*>(&eclState1);
eclState != nullptr)
@ -321,18 +321,18 @@ distributeGrid(const Dune::EdgeWeightMethod
template <class ElementMapper, class GridView, class Scalar>
void
GenericCpGridVanguard<ElementMapper, GridView, Scalar>::
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
ParallelEclipseState* eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells)
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections,
ParallelEclipseState* eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells)
{
OPM_TIMEBLOCK(gridDistribute);
const auto isIORank = this->grid_->comm().rank() == 0;
@ -350,13 +350,13 @@ distributeGrid(const Dune::EdgeWeightMethod
: std::vector<int>{};
//For this case, simple partitioning is selected automatically
parallelWells =
std::get<1>(this->grid_->loadBalance(handle, parts, &wells, &possibleFutureConnections, ownersFirst,
std::get<1>(this->grid_->loadBalance(handle, parts, &wells, possibleFutureConnections, ownersFirst,
addCornerCells, overlapLayers));
}
else {
parallelWells =
std::get<1>(this->grid_->loadBalance(handle, edgeWeightsMethod,
&wells, &possibleFutureConnections,
&wells, possibleFutureConnections,
serialPartitioning,
faceTrans.data(), ownersFirst,
addCornerCells, overlapLayers,

View File

@ -165,31 +165,31 @@ protected:
private:
std::vector<double> extractFaceTrans(const GridView& gridView) const;
void distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
EclipseState& eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells);
void distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections,
EclipseState& eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells);
void distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
ParallelEclipseState* eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells);
void distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const Dune::PartitionMethod partitionMethod,
const bool serialPartitioning,
const bool enableDistributedWells,
const double imbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections,
ParallelEclipseState* eclState,
FlowGenericVanguard::ParallelWellStruct& parallelWells);
protected:
virtual const std::string& zoltanParams() const = 0;

View File

@ -106,9 +106,10 @@ public:
/// \param[in] zoltan_ctrl Control parameters for on-rank subdomain
/// partitioning.
template <class GridView, class Element>
void buildLocalGraph(const GridView& grid_view,
const std::vector<Opm::Well>& wells,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl);
void buildLocalGraph(const GridView& grid_view,
const std::vector<Opm::Well>& wells,
const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl);
/// Partition rank's interior cells into non-overlapping domains using
/// the Zoltan graph partitioning software package.
@ -178,9 +179,10 @@ private:
/// \param[in] g2l Mapping from globally unique cell IDs to local,
/// on-rank active cell IDs. Return value from \c connectElements().
template <typename Comm>
void connectWells(const Comm comm,
const std::vector<Opm::Well>& wells,
const std::unordered_map<int, int>& g2l);
void connectWells(const Comm comm,
const std::vector<Opm::Well>& wells,
const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections,
const std::unordered_map<int, int>& g2l);
};
// Note: "grid_view.size(0)" is intentional here. It is not an error. The
@ -194,11 +196,12 @@ ZoltanPartitioner::ZoltanPartitioner(const GridView&
{}
template <class GridView, class Element>
void ZoltanPartitioner::buildLocalGraph(const GridView& grid_view,
const std::vector<Opm::Well>& wells,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl)
void ZoltanPartitioner::buildLocalGraph(const GridView& grid_view,
const std::vector<Opm::Well>& wells,
const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl)
{
this->connectWells(grid_view.comm(), wells, this->connectElements(grid_view, zoltan_ctrl));
this->connectWells(grid_view.comm(), wells, possibleFutureConnections, this->connectElements(grid_view, zoltan_ctrl));
}
template <class GridView, class Element>
@ -282,9 +285,10 @@ ZoltanPartitioner::connectElements(const GridView&
}
template <typename Comm>
void ZoltanPartitioner::connectWells(const Comm comm,
const std::vector<Opm::Well>& wells,
const std::unordered_map<int, int>& g2l)
void ZoltanPartitioner::connectWells(const Comm comm,
const std::vector<Opm::Well>& wells,
const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections,
const std::unordered_map<int, int>& g2l)
{
auto distributedWells = 0;
@ -301,6 +305,17 @@ void ZoltanPartitioner::connectWells(const Comm comm,
cellIx.push_back(locPos->second);
}
const auto possibleFutureConnectionSetIt = possibleFutureConnections.find(well.name());
if (possibleFutureConnectionSetIt != possibleFutureConnections.end()) {
for (auto& global_index : possibleFutureConnectionSetIt->second) {
auto locPos = g2l.find(global_index);
if (locPos == g2l.end()) {
++otherProc;
continue;
}
cellIx.push_back(locPos->second);
}
}
if ((otherProc > 0) && !cellIx.empty()) {
++distributedWells;
@ -338,10 +353,11 @@ This is not supported in the current NLDD implementation.)",
template <class GridView, class Element>
std::pair<std::vector<int>, int>
partitionCellsZoltan(const int num_domains,
const GridView& grid_view,
const std::vector<Opm::Well>& wells,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl)
partitionCellsZoltan(const int num_domains,
const GridView& grid_view,
const std::vector<Opm::Well>& wells,
const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl)
{
if (num_domains <= 1) { // No partitioning => every cell in domain zero.
const auto num_interior_cells =
@ -355,7 +371,7 @@ partitionCellsZoltan(const int num_domains,
}
auto partitioner = ZoltanPartitioner { grid_view, zoltan_ctrl.local_to_global };
partitioner.buildLocalGraph(grid_view, wells, zoltan_ctrl);
partitioner.buildLocalGraph(grid_view, wells, possibleFutureConnections, zoltan_ctrl);
return partitioner.partition(num_domains, grid_view, zoltan_ctrl);
}
@ -574,13 +590,14 @@ std::pair<std::vector<int>, int>
Opm::partitionCells(const std::string& method,
const int num_local_domains,
const GridView& grid_view,
[[maybe_unused]] const std::vector<Well>& wells,
[[maybe_unused]] const ZoltanPartitioningControl<Element>& zoltan_ctrl)
[[maybe_unused]] const std::vector<Well>& wells,
[[maybe_unused]] const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections,
[[maybe_unused]] const ZoltanPartitioningControl<Element>& zoltan_ctrl)
{
if (method == "zoltan") {
#if HAVE_MPI && HAVE_ZOLTAN
return partitionCellsZoltan(num_local_domains, grid_view, wells, zoltan_ctrl);
return partitionCellsZoltan(num_local_domains, grid_view, wells, possibleFutureConnections, zoltan_ctrl);
#else // !HAVE_MPI || !HAVE_ZOLTAN
@ -665,15 +682,16 @@ Opm::partitionCellsSimple(const int num_cells, const int num_domains)
// Deliberately placed at end of file. No other code beyond this separator.
// ---------------------------------------------------------------------------
#define InstantiatePartitionCells(Grid) \
template std::pair<std::vector<int>, int> \
Opm::partitionCells(const std::string&, \
const int, \
const std::remove_cv_t<std::remove_reference_t< \
decltype(std::declval<Grid>().leafGridView())>>&, \
const std::vector<Opm::Well>&, \
const Opm::ZoltanPartitioningControl< \
typename std::remove_cv_t<std::remove_reference_t< \
#define InstantiatePartitionCells(Grid) \
template std::pair<std::vector<int>, int> \
Opm::partitionCells(const std::string&, \
const int, \
const std::remove_cv_t<std::remove_reference_t< \
decltype(std::declval<Grid>().leafGridView())>>&, \
const std::vector<Opm::Well>&, \
const std::unordered_map<std::string, std::set<int>>&, \
const Opm::ZoltanPartitioningControl< \
typename std::remove_cv_t<std::remove_reference_t< \
decltype(std::declval<Grid>().leafGridView())>>::template Codim<0>::Entity>&)
// ---------------------------------------------------------------------------

View File

@ -21,6 +21,7 @@
#define OPM_ASPINPARTITION_HEADER_INCLUDED
#include <functional>
#include <set>
#include <string>
#include <utility>
#include <vector>
@ -88,11 +89,12 @@ struct ZoltanPartitioningControl
/// on current rank.
template <class GridView, class Element>
std::pair<std::vector<int>, int>
partitionCells(const std::string& method,
const int num_local_domains,
const GridView& grid_view,
const std::vector<Well>& wells,
const ZoltanPartitioningControl<Element>& zoltan_ctrl);
partitionCells(const std::string& method,
const int num_local_domains,
const GridView& grid_view,
const std::vector<Well>& wells,
const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections,
const ZoltanPartitioningControl<Element>& zoltan_ctrl);
/// Read a partitioning from file, assumed to contain one number per cell, its partition number.
/// \return pair containing a partition vector (partition number for each cell), and the number of partitions.

View File

@ -78,7 +78,7 @@ void BdaSolverInfo<Matrix,Vector>::
prepare(const Grid& grid,
const Dune::CartesianIndexMapper<Grid>& cartMapper,
const std::vector<Well>& wellsForConn,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections,
const std::vector<int>& cellPartition,
const std::size_t nonzeroes,
const bool useWellConn)
@ -241,13 +241,13 @@ using BM = Dune::BCRSMatrix<MatrixBlock<Scalar,Dim,Dim>>;
template<class Scalar, int Dim>
using BV = Dune::BlockVector<Dune::FieldVector<Scalar,Dim>>;
#define INSTANTIATE_GRID(T, Dim, Grid) \
template void BdaSolverInfo<BM<T,Dim>,BV<T,Dim>>:: \
prepare(const Grid&, \
const Dune::CartesianIndexMapper<Grid>&, \
const std::vector<Well>&, \
const std::unordered_map<std::string, std::set<std::array<int,3>>>&, \
const std::vector<int>&, \
#define INSTANTIATE_GRID(T, Dim, Grid) \
template void BdaSolverInfo<BM<T,Dim>,BV<T,Dim>>:: \
prepare(const Grid&, \
const Dune::CartesianIndexMapper<Grid>&, \
const std::vector<Well>&, \
const std::unordered_map<std::string, std::set<int>>&, \
const std::vector<int>&, \
const std::size_t, const bool);
using PolyHedralGrid3D = Dune::PolyhedralGrid<3, 3>;
#if HAVE_DUNE_ALUGRID

View File

@ -60,7 +60,7 @@ struct BdaSolverInfo
void prepare(const Grid& grid,
const Dune::CartesianIndexMapper<Grid>& cartMapper,
const std::vector<Well>& wellsForConn,
const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections,
const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections,
const std::vector<int>& cellPartition,
const std::size_t nonzeroes,
const bool useWellConn);

View File

@ -46,7 +46,7 @@ namespace detail
/// \param useWellConn Boolean that is true when UseWellContribusion is true
/// \param wellGraph Cell IDs of well cells stored in a graph.
template<class Grid, class CartMapper, class W>
void setWellConnections(const Grid& grid, const CartMapper& cartMapper, const W& wells, const std::unordered_map<std::string, std::set<std::array<int,3>>>& possibleFutureConnections, bool useWellConn, std::vector<std::set<int>>& wellGraph, int numJacobiBlocks)
void setWellConnections(const Grid& grid, const CartMapper& cartMapper, const W& wells, const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections, bool useWellConn, std::vector<std::set<int>>& wellGraph, int numJacobiBlocks)
{
if ( grid.comm().size() > 1 || numJacobiBlocks > 1)
{
@ -62,7 +62,7 @@ namespace detail
cart[ cartMapper.cartesianIndex( i ) ] = i;
Dune::cpgrid::WellConnections well_indices;
well_indices.init(wells, &possibleFutureConnections, cpgdim, cart);
well_indices.init(wells, possibleFutureConnections, cpgdim, cart);
for (auto& well : well_indices)
{

View File

@ -194,11 +194,22 @@ namespace Opm {
// Create cartesian to compressed mapping
const auto& schedule_wells = this->schedule().getWellsatEnd();
const auto& possibleFutureConnections = this->schedule().getPossibleFutureConnections();
// initialize the additional cell connections introduced by wells.
for (const auto& well : schedule_wells)
{
std::vector<int> wellCells = this->getCellsForConnections(well);
// Now add the cells of the possible future connections
const auto possibleFutureConnectionSetIt = possibleFutureConnections.find(well.name());
if (possibleFutureConnectionSetIt != possibleFutureConnections.end()) {
for (auto& global_index : possibleFutureConnectionSetIt->second) {
int compressed_idx = compressedIndexForInterior(global_index);
if (compressed_idx >= 0) { // Ignore connections in inactive/remote cells.
wellCells.push_back(compressed_idx);
}
}
}
for (int cellIdx : wellCells) {
neighbors[cellIdx].insert(wellCells.begin(),
wellCells.end());

View File

@ -236,3 +236,15 @@ add_test_compareSeparateECLFiles(CASENAME actionx_compdat_8_procs
REL_TOL ${rel_tol}
IGNORE_EXTRA_KW BOTH
MPI_PROCS 8)
add_test_compareSeparateECLFiles(CASENAME actionx_compdat_nldd_1_proc
DIR1 actionx
FILENAME1 COMPDAT_SHORT
DIR2 actionx
FILENAME2 ACTIONX_COMPDAT_SHORT
SIMULATOR flow
ABS_TOL ${abs_tol}
REL_TOL ${rel_tol}
IGNORE_EXTRA_KW BOTH
MPI_PROCS 1
TEST_ARGS --nonlinear-solver=nldd --matrix-add-well-contributions=true)