Merge pull request #4864 from bska/parallel-nldd-partitioning-zoltan

Make Zoltan-Based NLDD Domain Partitioning MPI Aware
This commit is contained in:
Atgeirr Flø Rasmussen 2023-11-15 15:34:36 +01:00 committed by GitHub
commit 6fa81d54a1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 1239 additions and 110 deletions

View File

@ -235,6 +235,7 @@ endif()
if(MPI_FOUND)
list(APPEND MAIN_SOURCE_FILES opm/simulators/utils/MPIPacker.cpp
opm/simulators/utils/ParallelEclipseState.cpp
opm/simulators/utils/ParallelNLDDPartitioningZoltan.cpp
opm/simulators/utils/ParallelSerialization.cpp
opm/simulators/utils/SetupZoltanParams.cpp)
endif()
@ -528,6 +529,7 @@ list (APPEND PUBLIC_HEADER_FILES
opm/simulators/utils/gatherDeferredLogger.hpp
opm/simulators/utils/moduleVersion.hpp
opm/simulators/utils/ParallelEclipseState.hpp
opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp
opm/simulators/utils/ParallelRestart.hpp
opm/simulators/utils/PropsDataHandle.hpp
opm/simulators/utils/SerializationPackers.hpp

View File

@ -54,12 +54,14 @@
#include <cassert>
#include <cmath>
#include <cstddef>
#include <functional>
#include <iomanip>
#include <ios>
#include <memory>
#include <numeric>
#include <sstream>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
@ -93,16 +95,8 @@ public:
BlackoilModelEbosNldd(BlackoilModelEbos<TypeTag>& model)
: model_(model)
{
const auto& grid = model_.ebosSimulator().vanguard().grid();
const auto& schedule = model_.ebosSimulator().vanguard().schedule();
// Create partitions.
const auto& [partition_vector, num_domains] =
partitionCells(grid,
schedule.getWellsatEnd(),
model_.param().local_domain_partition_method_,
model_.param().num_local_domains_,
model_.param().local_domain_partition_imbalance_);
const auto& [partition_vector, num_domains] = this->partitionCells();
// Scan through partitioning to get correct size for each.
std::vector<int> sizes(num_domains, 0);
@ -121,6 +115,8 @@ public:
// Iterate through grid once, setting the seeds of all partitions.
// Note: owned cells only!
const auto& grid = model_.ebosSimulator().vanguard().grid();
std::vector<int> count(num_domains, 0);
const auto& gridView = grid.leafGridView();
const auto beg = gridView.template begin<0, Dune::Interior_Partition>();
@ -835,6 +831,41 @@ private:
return errorPV;
}
decltype(auto) partitionCells() const
{
const auto& grid = this->model_.ebosSimulator().vanguard().grid();
using GridView = std::remove_cv_t<std::remove_reference_t<decltype(grid.leafGridView())>>;
using Element = std::remove_cv_t<std::remove_reference_t<typename GridView::template Codim<0>::Entity>>;
const auto& param = this->model_.param();
auto zoltan_ctrl = ZoltanPartitioningControl<Element>{};
zoltan_ctrl.domain_imbalance = param.local_domain_partition_imbalance_;
zoltan_ctrl.index =
[elementMapper = &this->model_.ebosSimulator().model().elementMapper()]
(const Element& element)
{
return elementMapper->index(element);
};
zoltan_ctrl.local_to_global =
[cartMapper = &this->model_.ebosSimulator().vanguard().cartesianIndexMapper()]
(const int elemIdx)
{
return cartMapper->cartesianIndex(elemIdx);
};
// Forming the list of wells is expensive, so do this only if needed.
const auto need_wells = param.local_domain_partition_method_ == "zoltan";
const auto wells = need_wells
? this->model_.ebosSimulator().vanguard().schedule().getWellsatEnd()
: std::vector<Well>{};
return ::Opm::partitionCells(param.local_domain_partition_method_,
param.num_local_domains_,
grid.leafGridView(), wells, zoltan_ctrl);
}
BlackoilModelEbos<TypeTag>& model_; //!< Reference to model
std::vector<Domain> domains_; //!< Vector of subdomains
std::vector<std::unique_ptr<Mat>> domain_matrices_; //!< Vector of matrix operator for each subdomain

View File

@ -18,15 +18,26 @@
*/
#include <config.h>
#include <opm/simulators/flow/partitionCells.hpp>
#include <opm/simulators/flow/countGlobalCells.hpp>
#if HAVE_MPI && HAVE_ZOLTAN
#include <opm/simulators/utils/DeferredLogger.hpp>
#include <opm/simulators/utils/DeferredLoggingErrorHelpers.hpp>
#include <opm/simulators/utils/gatherDeferredLogger.hpp>
#include <opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp>
#include <opm/simulators/utils/SetupZoltanParams.hpp>
#include <opm/input/eclipse/Schedule/Well/Connection.hpp>
#include <opm/input/eclipse/Schedule/Well/Well.hpp>
#include <opm/input/eclipse/Schedule/Well/WellConnections.hpp>
#endif // HAVE_MPI && HAVE_ZOLTAN
#include <opm/grid/CpGrid.hpp>
#include <opm/grid/polyhedralgrid.hh>
#include <opm/input/eclipse/Schedule/Well/Well.hpp>
#include <opm/simulators/flow/countGlobalCells.hpp>
#if HAVE_DUNE_ALUGRID
#include <dune/alugrid/grid.hh>
#endif // HAVE_DUNE_ALUGRID
@ -40,68 +51,351 @@
#include <numeric>
#include <stdexcept>
#include <string>
#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
// ===========================================================================
// Zoltan-based partitioning if available in this build configuration.
// ---------------------------------------------------------------------------
#if HAVE_MPI && HAVE_ZOLTAN
namespace {
std::pair<std::vector<int>, int>
countDomains(std::vector<int> partition_vector)
/// Partition rank's interior cells into non-overlapping domains using the
/// Zoltan graph partitioning software package.
class ZoltanPartitioner
{
auto maxPos = std::max_element(partition_vector.begin(),
partition_vector.end());
public:
/// Constructor.
///
/// \tparam GridView DUNE grid view type
///
/// \param[in] grid_view Current rank's reachable cells.
///
/// \param[in] local_to_global Callback for mapping rank's local cells
/// to globally unique cell IDs. May for instance return the
/// Cartesian cell ID of each local cell.
template <class GridView>
explicit ZoltanPartitioner(const GridView& grid_view,
const Opm::ParallelNLDDPartitioningZoltan::GlobalCellID& local_to_global);
const auto num_domains = (maxPos == partition_vector.end())
? 0 : *maxPos + 1;
/// Form internal connectivity graph between rank's reachable, interior
/// cells.
///
/// \tparam GridView DUNE grid view type
///
/// \tparam Element Grid view's representation of a cell.
///
/// \param[in] grid_view Current rank's reachable cells.
///
/// \param[in] wells Collection of wells to which this rank's partition
/// vector must be adapted. Wells must not intersect multiple local
/// domains/blocks.
///
/// \param[in] zoltan_ctrl Control parameters for on-rank subdomain
/// partitioning.
template <class GridView, class Element>
void buildLocalGraph(const GridView& grid_view,
const std::vector<Opm::Well>& wells,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl);
return { std::move(partition_vector), num_domains };
/// Partition rank's interior cells into non-overlapping domains using
/// the Zoltan graph partitioning software package.
///
/// \tparam GridView DUNE grid view type
///
/// \tparam Element Grid view's representation of a cell.
///
/// \param[in] num_local_domains Target number of domains into which the
/// full model will be partitioned. Note: This number is treated as a
/// global parameter across all MPI ranks and is not used as a number
/// of domains per rank. The final number of model subdomains may
/// differ from this number when adapting to well constraints and if,
/// for whatever reason, Zoltan happens to generate a global
/// partitioning for which one domain/block happens to cross process
/// boundaries.
///
/// \param[in] grid_view Current rank's reachable cells.
///
/// \param[in] zoltan_ctrl Control parameters for on-rank subdomain
/// partitioning.
///
/// \return Partition vector for rank's interior cells and number of
/// unique domains across those interior cells.
template <class GridView, class Element>
std::pair<std::vector<int>, int>
partition(const int num_local_domains,
const GridView& grid_view,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl) const;
private:
/// Zoltan partitioning backend.
Opm::ParallelNLDDPartitioningZoltan partitioner_;
/// Form internal connectivity graph between rank's reachable, interior
/// cells.
///
/// \tparam GridView DUNE grid view type
///
/// \tparam Element Grid view's representation of a cell.
///
/// \param[in] grid_view Current rank's reachable cells.
///
/// \param[in] zoltan_ctrl Control parameters for on-rank subdomain
/// partitioning.
///
/// \return Mapping from globally unique cell IDs to local, on-rank
/// active cell IDs. Effectively the cached result of \code
/// zoltan_ctrl.index() \endcode for each globally unique cell ID.
template <class GridView, class Element>
std::unordered_map<int, int>
connectElements(const GridView& grid_view,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl);
/// Adapt internal connectivity graph to account for connections induced
/// by well reservoir connections.
///
/// \tparam Comm MPI communication type.
///
/// \param[in] comm MPI communication object. Needed to get a global
/// view of any exceptions thrown in the implementation.
///
/// \param[in] wells Collection of wells to which this rank's partition
/// vector must be adapted. Wells must not intersect multiple local
/// domains/blocks.
///
/// \param[in] g2l Mapping from globally unique cell IDs to local,
/// on-rank active cell IDs. Return value from \c connectElements().
template <typename Comm>
void connectWells(const Comm comm,
const std::vector<Opm::Well>& wells,
const std::unordered_map<int, int>& g2l);
};
// Note: "grid_view.size(0)" is intentional here. It is not an error. The
// ParallelNLDDPartitioningZoltan type will internally enumerate interior
// cells in a consecutive sequence, regardless of the ordering in the
// grid_view object.
template <class GridView>
ZoltanPartitioner::ZoltanPartitioner(const GridView& grid_view,
const Opm::ParallelNLDDPartitioningZoltan::GlobalCellID& local_to_global)
: partitioner_(grid_view.comm(), grid_view.size(0), local_to_global)
{}
template <class GridView, class Element>
void ZoltanPartitioner::buildLocalGraph(const GridView& grid_view,
const std::vector<Opm::Well>& wells,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl)
{
this->connectWells(grid_view.comm(), wells, this->connectElements(grid_view, zoltan_ctrl));
}
template <typename, class = void>
struct HasZoltanPartitioning : public std::false_type {};
template <class GridView, class Element>
std::pair<std::vector<int>, int>
ZoltanPartitioner::partition(const int num_local_domains,
const GridView& grid_view,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl) const
{
auto partition = std::pair<std::vector<int>, int>{};
template <typename GridType>
struct HasZoltanPartitioning<
GridType,
std::void_t<decltype(std::declval<const GridType&>().zoltanPartitionWithoutScatter
(std::declval<const std::vector<Opm::Well>*>(),
std::declval<const double*>(),
std::declval<const int>(),
std::declval<const double>()))>
> : public std::true_type {};
auto param = Opm::setupZoltanParams("graph");
param.insert_or_assign("NUM_GLOBAL_PARTS", fmt::format("{}", num_local_domains));
param.insert_or_assign("IMBALANCE_TOL" , fmt::format("{}", zoltan_ctrl.domain_imbalance));
} // anonymous namespace
const auto domainsAll = this->partitioner_.partitionElements(param);
namespace Opm {
auto& [domains, num_domains] = partition;
domains.reserve(domainsAll.size());
num_domains = 0;
template<class Grid>
std::pair<std::vector<int>, int> partitionCells(const Grid& grid,
const std::vector<Well>& wells,
const std::string& method,
const int num_local_domains,
const double partition_imbalance)
for (const auto& cell : elements(grid_view, Dune::Partitions::interior)) {
const auto domainId = domains.emplace_back(domainsAll[zoltan_ctrl.index(cell)]);
if (domainId >= num_domains) {
num_domains = domainId + 1;
}
}
// Fix-up for an extreme case: Interior cells whose neighbours are all
// on a different rank--i.e., interior cells which do not have on-rank
// neighbours. Make each such cell be a separate domain on this rank.
// Don't remove this code unless you've taken remedial action elsewhere
// to ensure the situation doesn't happen. For what it's worth, we've
// seen this case occur in practice when testing on field cases.
for (auto& domainId : domains) {
if (domainId < 0) {
domainId = num_domains++;
}
}
return partition;
}
template <class GridView, class Element>
std::unordered_map<int, int>
ZoltanPartitioner::connectElements(const GridView& grid_view,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl)
{
auto g2l = std::unordered_map<int, int>{};
for (const auto& element : elements(grid_view, Dune::Partitions::interior)) {
{
const auto c = zoltan_ctrl.index(element);
g2l.insert_or_assign(zoltan_ctrl.local_to_global(c), c);
}
for (const auto& is : intersections(grid_view, element)) {
if (! is.neighbor()) {
continue;
}
const auto& in = is.inside();
const auto& out = is.outside();
if ((in .partitionType() != Dune::InteriorEntity) ||
(out.partitionType() != Dune::InteriorEntity))
{
// Connect cells in interior only.
continue;
}
const auto c1 = zoltan_ctrl.index(in);
const auto c2 = zoltan_ctrl.index(out);
this->partitioner_.registerConnection(c1, c2);
this->partitioner_.registerConnection(c2, c1);
}
}
return g2l;
}
template <typename Comm>
void ZoltanPartitioner::connectWells(const Comm comm,
const std::vector<Opm::Well>& wells,
const std::unordered_map<int, int>& g2l)
{
auto distributedWells = 0;
for (const auto& well : wells) {
auto cellIx = std::vector<int>{};
auto otherProc = 0;
for (const auto& conn : well.getConnections()) {
auto locPos = g2l.find(conn.global_index());
if (locPos == g2l.end()) {
++otherProc;
continue;
}
cellIx.push_back(locPos->second);
}
if ((otherProc > 0) && !cellIx.empty()) {
++distributedWells;
continue;
}
const auto nc = cellIx.size();
for (auto ic1 = 0*nc; ic1 < nc; ++ic1) {
for (auto ic2 = ic1 + 1; ic2 < nc; ++ic2) {
this->partitioner_.registerConnection(cellIx[ic1], cellIx[ic2]);
this->partitioner_.registerConnection(cellIx[ic2], cellIx[ic1]);
}
}
if (! cellIx.empty()) {
// All cells intersected by a single well go in the same domain.
this->partitioner_.forceSameDomain(std::move(cellIx));
}
}
OPM_BEGIN_PARALLEL_TRY_CATCH()
if (distributedWells > 0) {
const auto* pl = (distributedWells == 1) ? "" : "s";
throw std::invalid_argument {
fmt::format(R"({} distributed well{} detected on rank {}."
This is not supported in the current NLDD implementation.)",
distributedWells, pl, comm.rank())
};
}
OPM_END_PARALLEL_TRY_CATCH(std::string { "ZoltanPartitioner::connectWells()" }, comm)
}
template <class GridView, class Element>
std::pair<std::vector<int>, int>
partitionCellsZoltan(const int num_domains,
const GridView& grid_view,
const std::vector<Opm::Well>& wells,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl)
{
if (num_domains <= 1) { // No partitioning => every cell in domain zero.
const auto num_interior_cells =
Opm::detail::countLocalInteriorCellsGridView(grid_view);
return {
std::piecewise_construct,
std::forward_as_tuple(num_interior_cells, 0),
std::forward_as_tuple(1)
};
}
auto partitioner = ZoltanPartitioner { grid_view, zoltan_ctrl.local_to_global };
partitioner.buildLocalGraph(grid_view, wells, zoltan_ctrl);
return partitioner.partition(num_domains, grid_view, zoltan_ctrl);
}
} // Anonymous namespace
#endif // HAVE_MPI && HAVE_ZOLTAN
// ===========================================================================
template <class GridView, class Element>
std::pair<std::vector<int>, int>
Opm::partitionCells(const std::string& method,
const int num_local_domains,
const GridView& grid_view,
[[maybe_unused]] const std::vector<Well>& wells,
[[maybe_unused]] const ZoltanPartitioningControl<Element>& zoltan_ctrl)
{
if (method == "zoltan") {
if constexpr (HasZoltanPartitioning<Grid>::value) {
return partitionCellsZoltan(grid, wells, num_local_domains, partition_imbalance);
} else {
OPM_THROW(std::runtime_error, "Zoltan requested for local domain partitioning, "
"but is not available for the current grid type.");
}
} else if (method == "simple") {
const int num_cells = detail::countLocalInteriorCells(grid);
#if HAVE_MPI && HAVE_ZOLTAN
return partitionCellsZoltan(num_local_domains, grid_view, wells, zoltan_ctrl);
#else // !HAVE_MPI || !HAVE_ZOLTAN
OPM_THROW(std::runtime_error, "Zoltan requested for local domain partitioning, "
"but is not available in the current build configuration.");
#endif // HAVE_MPI && HAVE_ZOLTAN
}
else if (method == "simple") {
const int num_cells = detail::countLocalInteriorCellsGridView(grid_view);
return partitionCellsSimple(num_cells, num_local_domains);
} else if (method.size() > 10 && method.substr(method.size() - 10, 10) == ".partition") {
}
else if (method.size() > 10 && method.substr(method.size() - 10, 10) == ".partition") {
// Method string ends with ".partition", interpret as filename for partitioning.
const int num_cells = detail::countLocalInteriorCells(grid);
const int num_cells = detail::countLocalInteriorCellsGridView(grid_view);
return partitionCellsFromFile(method, num_cells);
} else {
}
else {
OPM_THROW(std::runtime_error, "Unknown local domain partitioning method requested: " + method);
}
}
// Read from file, containing one number per cell.
std::pair<std::vector<int>, int> partitionCellsFromFile(const std::string& filename, const int num_cells)
std::pair<std::vector<int>, int>
Opm::partitionCellsFromFile(const std::string& filename, const int num_cells)
{
// Read file into single vector.
std::ifstream is(filename);
@ -119,7 +413,8 @@ std::pair<std::vector<int>, int> partitionCellsFromFile(const std::string& filen
// Trivially simple partitioner
std::pair<std::vector<int>, int> partitionCellsSimple(const int num_cells, const int num_domains)
std::pair<std::vector<int>, int>
Opm::partitionCellsSimple(const int num_cells, const int num_domains)
{
// Build the partitions.
const int dom_sz = num_cells / num_domains;
@ -137,46 +432,53 @@ std::pair<std::vector<int>, int> partitionCellsSimple(const int num_cells, const
return { part, num_domains };
}
// ===========================================================================
template<class Grid>
std::pair<std::vector<int>, int> partitionCellsZoltan(const Grid& grid,
const std::vector<Well>& wells,
const int num_domains,
const double domain_imbalance)
{
auto partition_vector = grid.zoltanPartitionWithoutScatter
(&wells, nullptr, num_domains, domain_imbalance);
// ---------------------------------------------------------------------------
// Explicit specialisations/instantiations of partitionCells template.
// Deliberately placed at end of file. No other code beyond this separator.
// ---------------------------------------------------------------------------
return countDomains(std::move(partition_vector));
}
#define InstantiatePartitionCells(Grid) \
template std::pair<std::vector<int>, int> \
Opm::partitionCells(const std::string&, \
const int, \
const std::remove_cv_t<std::remove_reference_t< \
decltype(std::declval<Grid>().leafGridView())>>&, \
const std::vector<Opm::Well>&, \
const Opm::ZoltanPartitioningControl< \
typename std::remove_cv_t<std::remove_reference_t< \
decltype(std::declval<Grid>().leafGridView())>>::template Codim<0>::Entity>&)
template std::pair<std::vector<int>,int>
partitionCells<Dune::CpGrid>(const Dune::CpGrid&,
const std::vector<Well>&,
const std::string&,
const int,
const double);
// ---------------------------------------------------------------------------
// Grid types built into Flow.
// ---------------------------------------------------------------------------
InstantiatePartitionCells(Dune::CpGrid);
template std::pair<std::vector<int>,int>
partitionCells<Dune::PolyhedralGrid<3,3,double>>(const Dune::PolyhedralGrid<3,3,double>&,
const std::vector<Well>&,
const std::string&,
const int,
const double);
using PolyGrid = Dune::PolyhedralGrid<3, 3, double>;
InstantiatePartitionCells(PolyGrid);
// ---------------------------------------------------------------------------
// ALUGrid, if available.
// ---------------------------------------------------------------------------
#if HAVE_DUNE_ALUGRID
#if HAVE_MPI
using ALUGrid3CN = Dune::ALUGrid<3, 3, Dune::cube, Dune::nonconforming, Dune::ALUGridMPIComm>;
using ALUGridComm = Dune::ALUGridMPIComm;
#else
using ALUGrid3CN = Dune::ALUGrid<3, 3, Dune::cube, Dune::nonconforming, Dune::ALUGridNoComm>;
#endif //HAVE_MPI
using ALUGridComm = Dune::ALUGridNoComm;
#endif // HAVE_MPI
template std::pair<std::vector<int>,int>
partitionCells<ALUGrid3CN>(const ALUGrid3CN&,
const std::vector<Well>&,
const std::string&,
const int,
const double);
#endif
// ---------------------------------------------------------------------------
// Base case.
// ---------------------------------------------------------------------------
using BaseALUGrid = Dune::ALUGrid<3, 3, Dune::cube, Dune::nonconforming, ALUGridComm>;
InstantiatePartitionCells(BaseALUGrid);
#endif // HAVE_DUNE_ALUGRID
} // namespace Opm
// ===========================================================================
#undef InstantiatePartitionCells
// ---------------------------------------------------------------------------
// End of file. No statements below this separator.
// ---------------------------------------------------------------------------

View File

@ -20,6 +20,7 @@
#ifndef OPM_ASPINPARTITION_HEADER_INCLUDED
#define OPM_ASPINPARTITION_HEADER_INCLUDED
#include <functional>
#include <string>
#include <utility>
#include <vector>
@ -28,14 +29,70 @@ namespace Opm {
class Well;
/// Partitions the grid using the specified method.
/// \return pair containing a partition vector (partition number for each cell), and the number of partitions.
template<class Grid>
std::pair<std::vector<int>, int> partitionCells(const Grid& grid,
const std::vector<Well>& wells,
const std::string& method,
const int num_local_domains,
const double partition_imbalance);
} // namespace Opm
namespace Opm {
/// Control parameters for on-rank subdomain partitioning using Zoltan library.
///
/// \tparam Element Grid view entity type
template <typename Element>
struct ZoltanPartitioningControl
{
/// Partition imbalance, percentage units. Assumed to be the run's
/// value of ZoltanImbalanceTol.
double domain_imbalance{1.03};
/// Compute a locally unique, for this MPI process, ID of a local
/// cell/element/entity.
std::function<int(const Element&)> index;
/// Compute a globally unique, across all MPI processes, ID for a local
/// cell/element/entity. Might for instance return the cell's
/// linearised index
///
/// i + nx*(j + ny*k)
///
/// of the Cartesian cell (i,j,k).
std::function<int(int)> local_to_global;
};
/// Partition rank's interior cells.
///
/// \param[in] method Partitioning method. Supported values are \c
/// "zoltan", \c "simple", or a filename with the extension \c
/// ".partition". The \c "zoltan" method invokes the Zoltan graph
/// partitioning package and requires both MPI and an active Zoltan
/// library. The \c "simple" method uses a one-dimensional load-balanced
/// approach, and the filename method will read a precomputed partition
/// vector from the named file.
///
/// \param[in] num_local_domains Number of subdomains. Not used when
/// explicit partitioning is input from a file.
///
/// \param[in] comm MPI Communication object for exchanging globally unique
/// cell IDs and for communication within the Zoltan library. Not used
/// unless \code method == "zoltan" \endcode.
///
/// \param[in] grid_view View of rank's cells, both interior and overlap
/// cells. Not used unless \code method == "zoltan" \endcode.
///
/// \param[in] wells Collection of simulation model wells. Not used unless
/// \code method == "zoltan" \endcode.
///
/// \param[in] zoltan_ctrl Control parameters for local Zoltan-based
/// partitioning. Not used unless \code method == "zoltan" \endcode.
///
/// \return Partition vector--subdomain ID for each cell in \p grid_view
/// traversal order for its interior cells--and the number of subdomains
/// on current rank.
template <class GridView, class Element>
std::pair<std::vector<int>, int>
partitionCells(const std::string& method,
const int num_local_domains,
const GridView& grid_view,
const std::vector<Well>& wells,
const ZoltanPartitioningControl<Element>& zoltan_ctrl);
/// Read a partitioning from file, assumed to contain one number per cell, its partition number.
/// \return pair containing a partition vector (partition number for each cell), and the number of partitions.
@ -45,13 +102,6 @@ std::pair<std::vector<int>, int> partitionCellsFromFile(const std::string& filen
/// \return pair containing a partition vector (partition number for each cell), and the number of partitions.
std::pair<std::vector<int>, int> partitionCellsSimple(const int num_cells, const int num_domains);
/// Partitions the grid using the Zoltan graph partitioner.
/// \return pair containing a partition vector (partition number for each cell), and the number of partitions.
template<class Grid>
std::pair<std::vector<int>, int> partitionCellsZoltan(const Grid& grid,
const std::vector<Well>& wells,
const int num_domains,
const double domain_imbalance);
} // namespace Opm
#endif // OPM_ASPINPARTITION_HEADER_INCLUDED

View File

@ -0,0 +1,607 @@
/*
Copyright 2023 Equinor ASA
This file is part of the Open Porous Media Project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#include <opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp>
#include <opm/simulators/utils/ParallelCommunication.hpp>
#include <opm/common/ErrorMacros.hpp>
#include <opm/common/utility/CSRGraphFromCoordinates.hpp>
// Note: The build system guarantees that we're only built in configurations
// that have both MPI and Zoltan. Zoltan.h redefines 'HAVE_MPI', so let it
// do so, but restore a simple definition afterwards.
#undef HAVE_MPI
#include <zoltan.h>
#undef HAVE_MPI
#define HAVE_MPI
#include <algorithm>
#include <cstddef>
#include <functional>
#include <numeric>
#include <stdexcept>
#include <utility>
#include <vector>
namespace {
/// Assign Zoltan control parameters.
///
/// Built-in defaults possibly overriden by user-selected values.
///
/// \param[in] params User-selected Zoltan control parameters.
///
/// \param[in,out] zz Opaque Zoltan context.
void setZoltanParameters(const Opm::ParallelNLDDPartitioningZoltan::ZoltanParamMap& params,
Zoltan_Struct* zz)
{
Zoltan_Set_Param(zz, "DEBUG_LEVEL", "0");
Zoltan_Set_Param(zz, "LB_METHOD", "GRAPH");
Zoltan_Set_Param(zz, "LB_APPROACH", "PARTITION");
Zoltan_Set_Param(zz, "NUM_GID_ENTRIES", "1");
Zoltan_Set_Param(zz, "NUM_LID_ENTRIES", "1");
Zoltan_Set_Param(zz, "RETURN_LISTS", "EXPORT"); // Self to "other"
Zoltan_Set_Param(zz, "CHECK_GRAPH", "2");
Zoltan_Set_Param(zz, "EDGE_WEIGHT_DIM", "0");
Zoltan_Set_Param(zz, "OBJ_WEIGHT_DIM", "0");
Zoltan_Set_Param(zz, "PHG_EDGE_SIZE_THRESHOLD", "0.35"); // 0-remove all, 1-remove none
for (const auto& [ param, value ] : params) {
Zoltan_Set_Param(zz, param.c_str(), value.c_str());
}
}
/// Create consecutive numbering of reachable vertices
class EnumerateSeenVertices
{
public:
/// Constructor.
///
/// \tparam Edge Type for representing a directed edge between a
/// pair of vertices. Must support \code std::get<>() \endcode
/// protocol. Typically a \code std::pair<> \endcode of integral
/// types.
///
/// \param[in] numVertices Maximum number of vertices in graph.
///
/// \param[in] edges Edge representation of connectivity graph.
template <typename Edge>
explicit EnumerateSeenVertices(const std::size_t numVertices,
const std::vector<Edge>& edges)
: index_(numVertices, -1)
{
auto seen = std::vector<bool>(numVertices, false);
for (const auto& [v1, v2] : edges) {
seen[v1] = true;
seen[v2] = true;
}
for (auto vertex = 0*numVertices; vertex < numVertices; ++vertex) {
if (seen[vertex]) {
this->index_[vertex] = this->numSeenVertices_++;
}
}
}
/// Retrieve number of reachable vertices. Less than or equal to \p
/// numVertices.
std::size_t numVertices() const
{
return this->numSeenVertices_;
}
/// Retrieve reachable index of vertex.
///
/// \param[in] vertex Vertex ID in original numbering
///
/// \return Reachable index of \p vertex. -1 if \p vertex is not
/// reachable.
int operator[](const std::size_t vertex) const
{
return this->index_[vertex];
}
private:
/// Indices of reachable vertices.
std::vector<int> index_{};
/// Number of reachable vertices. One more than the maximum value
/// in \c index_.
int numSeenVertices_{0};
};
/// Unstructured graph of reachable vertices
class VertexGraph
{
public:
/// Constructor.
///
/// \tparam Edge Type for representing a directed edge between a
/// pair of vertices. Must support \code std::get<>() \endcode
/// protocol. Typically a \code std::pair<> \endcode of integral
/// types.
///
/// \tparam GlobalCellID Callback type for mapping a vertex ID to a
/// globally unique ID. Typically the same as
/// \code ParallelNLDDPartitioningZoltan::GlobalCellID \endcode.
///
/// \param[in] myRank Current rank in MPI communicator. Needed by
/// Zoltan.
///
/// \param[in] numVertices Maximum number of vertices in graph.
///
/// \param[in] edges Edge representation of connectivity graph.
///
/// \param[in] vertexId Enumeration of reachable vertices.
///
/// \param[in] globalCell Callback for mapping (local) vertex IDs to
/// globally unique vertex IDs.
template <typename Edge, typename GlobalCellID>
explicit VertexGraph(const int myRank,
const std::size_t numVertices,
const std::vector<Edge>& edges,
const EnumerateSeenVertices& vertexId,
GlobalCellID&& globalCell)
: myRank_ { myRank }
{
// Form undirected connectivity graph.
for (const auto& [v1, v2] : edges) {
this->graph_.addConnection(vertexId[v1], vertexId[v2]);
this->graph_.addConnection(vertexId[v2], vertexId[v1]);
}
this->graph_.compress(vertexId.numVertices());
// Form local-to-global vertex ID mapping for reachable vertices.
this->globalCell_.resize(vertexId.numVertices());
for (auto vertex = 0*numVertices; vertex < numVertices; ++vertex) {
if (const auto localIx = vertexId[vertex]; localIx >= 0) {
this->globalCell_[localIx] = globalCell(vertex);
}
}
}
/// Retrive my rank in current MPI communicator.
///
/// Needed by Zoltan.
int myRank() const
{
return this->myRank_;
}
/// Retrieve number of vertices in connectivity graph.
int numVertices() const
{
return static_cast<int>(this->graph_.numVertices());
}
/// Retrieve globally unique ID of reachable vertex.
///
/// \param[in] localCell Index of locally reachable cell/vertex.
int globalId(const int localCell) const
{
return this->globalCell_[localCell];
}
/// Get read-only access to start pointers of graph's CSR representation.
///
/// \return Reference to start pointers (IA array).
decltype(auto) startPointers() const
{
return this->graph_.startPointers();
}
/// Get read-only access to column indices of graph's CSR representation.
///
/// \return Reference to column indices (JA array).
decltype(auto) columnIndices() const
{
return this->graph_.columnIndices();
}
private:
// VertexID = int, TrackCompressedIdx = false
using Backend = Opm::utility::CSRGraphFromCoordinates<>;
/// My rank in current MPI communicator.
int myRank_{};
/// Globally unique vertex ID of each locally reachable vertex.
std::vector<int> globalCell_{};
/// Vertex connectivity graph.
Backend graph_{};
};
// Use C linkage for Zoltan interface/query functions. Ensures maximum compatibility.
extern "C" {
/// Compute number of vertices in connectivity graph.
///
/// Callback for Zoltan_Set_Num_Obj_Fn.
///
/// \param[in] graphPtr Opaque user data. Assumed to point to a
/// \c VertexGraph instance.
///
/// \param[out] ierr Error code for Zoltan consumption. Single \c int.
int numVertices(void* graphPtr, int* ierr)
{
*ierr = ZOLTAN_OK;
return static_cast<VertexGraph*>(graphPtr)->numVertices();
}
/// Compute number of neighbours for each vertex in connectivity graph.
///
/// Callback for Zoltan_Set_Num_Edges_Multi_Fn.
///
/// \param[in] graphPtr Opaque user data. Assumed to point to a
/// \c VertexGraph instance.
///
/// \param[in] numCells Number of cells/vertices/objects for which to
/// compute number of neighbours.
///
/// \param[in] localID Local IDs of those cells/vertices/objects for
/// which to compute the respective number of neighbours.
///
/// \param[in,out] numEdges Number of neighbours (in/out edges) for each
/// cell/vertex/object. Size equal to \code numVertices(graphPtr)
/// \endcode. Populated by this function. Allocated by Zoltan.
///
/// \param[out] ierr Error code for Zoltan consumption. Single \c int.
void numEdges(void* graphPtr,
const int /* sizeGID */,
const int /* sizeLID */,
const int numCells,
ZOLTAN_ID_PTR /* globalID */,
ZOLTAN_ID_PTR localID,
int* numEdges,
int* ierr)
{
const auto& ia = static_cast<VertexGraph*>(graphPtr)->startPointers();
for (auto cell = 0*numCells; cell < numCells; ++cell) {
const auto localCell = localID[cell];
numEdges[cell] = ia[localCell + 1] - ia[localCell + 0];
}
*ierr = ZOLTAN_OK;
}
/// Identify objects/vertices in connectivity graph.
///
/// Callback for Zoltan_Set_Obj_List_Fn.
///
/// \param[in] graphPtr Opaque user data. Assumed to point to a
/// \c VertexGraph instance.
///
/// \param[in] numElmsPerGid Number of ZOLTAN_ID_TYPE elements needed to
/// describe a single globally unique object/vertex ID. Must be one
/// (1) in this implementation.
///
/// \param[in] numElmsPerLid Number of ZOLTAN_ID_TYPE elements needed to
/// describe a single local object/vertex ID. Must be one (1) in this
/// implementation.
///
/// \param[in,out] globalIds Globally unique object/vertex IDs. Size
/// equal to \code numElmsPerGid * numVertices(graphPtr) \endcode.
/// Populated by this function. Allocated by Zoltan.
///
/// \param[in,out] localIds Local object/vertex IDs. Size equal to
/// \code numElmsPerLid * numVertices(graphPtr) \endcode. Populated
/// by this function. Allocated by Zoltan.
///
/// \param[out] ierr Error code for Zoltan consumption. Single \c int.
void vertexList(void* graphPtr,
const int numElmsPerGid,
const int numElmsPerLid,
ZOLTAN_ID_PTR globalIds,
ZOLTAN_ID_PTR localIds,
const int /* wgtDim */,
float* /* objWgts */,
int* ierr)
{
if ((numElmsPerGid != numElmsPerLid) || (numElmsPerLid != 1)) {
*ierr = ZOLTAN_FATAL;
}
const auto* graph = static_cast<VertexGraph*>(graphPtr);
if (graph->numVertices() == 0) {
*ierr = ZOLTAN_OK;
return;
}
std::iota(&localIds[0], &localIds[0] + graph->numVertices(), ZOLTAN_ID_TYPE{0});
std::transform(&localIds[0],
&localIds[0] + graph->numVertices(),
&globalIds[0],
[graph](const int localCell) -> ZOLTAN_ID_TYPE
{
return graph->globalId(localCell);
});
*ierr = ZOLTAN_OK;
}
/// Identify neighbours in connectivity graph.
///
/// Callback for Zoltan_Set_Edge_List_Multi_Fn
///
/// \param[in] graphPtr Opaque user data. Assumed to point to a
/// \c VertexGraph instance.
///
/// \param[in] numElmsPerGid Number of ZOLTAN_ID_TYPE elements needed to
/// describe a single globally unique object/vertex ID. Must be one
/// (1) in this implementation.
///
/// \param[in] numElmsPerLid Number of ZOLTAN_ID_TYPE elements needed to
/// describe a single local object/vertex ID. Must be one (1) in this
/// implementation.
///
/// \param[in] numCells Number of cells/vertices/objects for which to
/// identify the neighbouring cells/vertices/objects.
///
/// \param[in] localIds Local object/vertex IDs. Size equal to \code
/// numElmsPerLid * numVertices(graphPtr) \endcode.
///
/// \param[in,out] neighbourGid Globally unique object ID of each
/// neighbour cell/vertex/object. Populated by this function.
/// Allocated by Zoltan.
///
/// \param[in,out] neighbourProc Owner of each neighbouring
/// cell/vertex/object. Populated by this function. Allocated by
/// Zoltan.
///
/// \param[out] ierr Error code for Zoltan consumption. Single \c int.
void edgeList(void* graphPtr,
const int numElmsPerGid,
const int numElmsPerLid,
const int numCells,
ZOLTAN_ID_PTR /* globalIds */,
ZOLTAN_ID_PTR localIds,
int* /* numEdges */,
ZOLTAN_ID_PTR neighbourGid,
int* neighbourProc,
int /* weightDim */,
float* /* edgeWeights */,
int* ierr)
{
const auto* graph = static_cast<VertexGraph*>(graphPtr);
if ((numElmsPerGid != numElmsPerLid) ||
(numElmsPerLid != 1) ||
(numCells != graph->numVertices()))
{
*ierr = ZOLTAN_FATAL;
}
if (graph->numVertices() == 0) {
*ierr = ZOLTAN_OK;
return;
}
const auto& ia = graph->startPointers();
const auto& ja = graph->columnIndices();
for (auto cell = 0*numCells, ix = 0; cell < numCells; ++cell) {
const auto localCell = localIds[cell];
for (auto neighIx = ia[localCell], end = ia[localCell + 1];
neighIx != end; ++neighIx, ++ix)
{
neighbourGid [ix] = static_cast<ZOLTAN_ID_TYPE>(graph->globalId(ja[neighIx]));
neighbourProc[ix] = graph->myRank();
}
}
}
} // extern "C"
/// Partition VertexGraph objects using Zoltan graph partitioning software.
class Partitioner
{
public:
/// Constructor.
///
/// \param[in] comm MPI communicator.
///
/// \param[in] params Control parameters for Zoltan graph partitioning procedure.
explicit Partitioner(const Opm::Parallel::Communication comm,
const Opm::ParallelNLDDPartitioningZoltan::ZoltanParamMap& params);
/// Destructor.
///
/// Destroys Zoltan context.
~Partitioner();
/// Partition VertexGraph instance.
///
/// \param[in] graphPtr Graph being partitioned. Assumed to point
/// to a \c VertexGraph instance that is fully populated by caller.
///
/// \param[in] numCells Number of vertices in graph pointed to by \p
/// graphPtr. Used to size the resulting partition vector.
///
/// \return Partition vector. Non-negative block IDs for each of
/// the \p numCells cells/vertices/objects in \p graphPtr.
std::vector<int> operator()(void* graphPtr, const std::size_t numCells);
private:
/// Helper RAII type to manage partition ("part") memory allocated
/// by Zoltan.
struct ZoltanPart
{
/// Globally unique cell/vertex/object IDs.
ZOLTAN_ID_PTR globalId{nullptr};
/// Local cell/vertex/object IDs.
ZOLTAN_ID_PTR localId{nullptr};
/// Owning process for each cell/vertex/object.
int* process{nullptr};
/// Block/domain to which each cell/vertex/object is assigned.
int* block{nullptr};
/// Destructor.
///
/// Releases partition ("part") memory acquired by Zoltan.
~ZoltanPart();
};
/// Zoltan library context.
Zoltan_Struct* zz_{nullptr};
};
Partitioner::ZoltanPart::~ZoltanPart()
{
Zoltan_LB_Free_Part(&this->globalId,
&this->localId,
&this->process,
&this->block);
}
Partitioner::Partitioner(const Opm::Parallel::Communication comm,
const Opm::ParallelNLDDPartitioningZoltan::ZoltanParamMap& params)
{
const auto argc = 0;
char* argv[] = { nullptr };
auto ver = 0.0f;
const auto rc = Zoltan_Initialize(argc, argv, &ver);
if (rc != ZOLTAN_OK) {
OPM_THROW(std::runtime_error, "Unable to Initialise Zoltan");
}
this->zz_ = Zoltan_Create(comm);
setZoltanParameters(params, this->zz_);
}
Partitioner::~Partitioner()
{
Zoltan_Destroy(&this->zz_);
}
std::vector<int>
Partitioner::operator()(void* graphPtr, const std::size_t numCells)
{
Zoltan_Set_Num_Obj_Fn (this->zz_, &numVertices, graphPtr);
Zoltan_Set_Num_Edges_Multi_Fn(this->zz_, &numEdges , graphPtr);
Zoltan_Set_Obj_List_Fn (this->zz_, &vertexList, graphPtr);
Zoltan_Set_Edge_List_Multi_Fn(this->zz_, &edgeList , graphPtr);
auto send = ZoltanPart{}; // Objects to export/send to "other" processes/parts
auto recv = ZoltanPart{}; // Objects to import/receive from "other" processes/parts
auto partitionChanged = 0;
auto numElmPerGid = 1;
auto numElmPerLid = 1;
auto numRecv = 0;
auto numSend = 0;
const auto rc = Zoltan_LB_Partition
(this->zz_,
&partitionChanged,
&numElmPerGid, &numElmPerLid,
&numRecv, &recv.globalId, &recv.localId, &recv.process, &recv.block,
&numSend, &send.globalId, &send.localId, &send.process, &send.block);
if (rc != ZOLTAN_OK) {
OPM_THROW(std::runtime_error, "Failed to Partition Domain Graph");
}
auto blocks = std::vector<int>(numCells, 0);
for (auto e = 0*numSend; e < numSend; ++e) {
blocks[send.localId[e]] = send.block[e];
}
return blocks;
}
void forceSameDomain(const std::vector<int>& cells,
std::vector<int>& parts)
{
if (cells.empty()) { return; }
const auto first = parts[cells.front()];
for (auto& cell : cells) {
parts[cell] = first;
}
}
std::vector<int> compressPartitionIDs(std::vector<int>&& parts0)
{
auto partition = std::move(parts0);
if (! partition.empty()) {
auto mmPos = std::minmax_element(partition.begin(), partition.end());
auto seen = std::vector<bool>(*mmPos.second - *mmPos.first + 1, false);
for (const auto& domain : partition) {
seen[domain - *mmPos.first] = domain >= 0;
}
auto num_domains = 0;
auto compressed = std::vector<int>(*mmPos.second - *mmPos.first + 1, -1);
for (auto i = 0*compressed.size(); i < compressed.size(); ++i) {
if (seen[i]) {
compressed[i] = num_domains++;
}
}
for (auto& domain : partition) {
if (domain >= 0) {
domain = compressed[domain - *mmPos.first];
}
}
}
return partition;
}
} // Anonymous namespace
std::vector<int>
Opm::ParallelNLDDPartitioningZoltan::partitionElements(const ZoltanParamMap& params) const
{
const auto vertexId = EnumerateSeenVertices { this->numElements_, this->conns_ };
auto graph = VertexGraph {
this->comm_.rank(), this->numElements_,
this->conns_, vertexId, this->globalCell_
};
const auto partsForReachableCells = Partitioner {
this->comm_, params
}(static_cast<void*>(&graph), graph.numVertices());
// Map reachable cells back to full cell numbering.
auto parts = std::vector<int>(this->numElements_, -1);
for (auto elm = 0*this->numElements_; elm < this->numElements_; ++elm) {
if (const auto reachableElmIx = vertexId[elm]; reachableElmIx >= 0) {
parts[elm] = partsForReachableCells[reachableElmIx];
}
}
for (const auto& cells : this->sameDomain_) {
::forceSameDomain(cells, parts);
}
return compressPartitionIDs(std::move(parts));
}

View File

@ -0,0 +1,129 @@
/*
Copyright 2023 Equinor ASA
This file is part of the Open Porous Media Project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef OPM_UTILITY_PARALLEL_NLDD_PARTITIONING_ZOLTAN_HPP
#define OPM_UTILITY_PARALLEL_NLDD_PARTITIONING_ZOLTAN_HPP
#include <opm/simulators/utils/ParallelCommunication.hpp>
#include <cstddef>
#include <functional>
#include <map>
#include <string>
#include <utility>
#include <vector>
namespace Opm {
/// Partition connectivity graph into non-overlapping domains using the
/// Zoltan graph partitioning software package. Primarily intended for
/// use in NLDD-based non-linear solves.
class ParallelNLDDPartitioningZoltan
{
public:
/// Callback type for mapping a vertex/cell ID to a globally unique
/// ID.
using GlobalCellID = std::function<int(int)>;
/// Collection of parameters to control the partitioning procedure.
using ZoltanParamMap = std::map<std::string, std::string>;
/// Constructor.
///
/// \param[in] comm MPI communication object. Needed by Zoltan.
///
/// \param[in] numElements Number of potential vertices in
/// connectivity graph. Typically the total number of cells on
/// the current rank, i.e., both owned cells and overlap cells.
///
/// \param[in] globalCell Callback for mapping (local) vertex IDs to
/// globally unique vertex IDs.
explicit ParallelNLDDPartitioningZoltan(const Parallel::Communication comm,
const std::size_t numElements,
const GlobalCellID& globalCell)
: comm_ { comm }
, numElements_ { numElements }
, globalCell_ { globalCell }
{}
/// Insert directed graph edge between two vertices.
///
/// \param[in] c1 Source vertex.
///
/// \param]in] c2 Sink/destination vertex.
void registerConnection(std::size_t c1, std::size_t c2)
{
this->conns_.emplace_back(c1, c2);
}
/// Force collection of cells to be in same result domain.
///
/// Mostly as a means to ensuring wells do not intersect multiple
/// domains/blocks.
///
/// \param[in] cells Cell collection. Typically those cells which
/// are intersected by a single well.
void forceSameDomain(std::vector<int>&& cells)
{
this->sameDomain_.emplace_back(std::move(cells));
}
/// Partition connectivity graph using Zoltan graph partitioning
/// package.
///
/// Honours any prescribed requirement that certain cells be placed
/// in a single domain/block.
///
/// \param[in] params Parameters for Zoltan. Override default
/// settings.
///
/// \return Partition vector of size \p numElements. Reachable
/// vertices/cells are partitioned into N blocks numbered 0..N-1.
/// Unreachable vertices get a block ID of -1.
std::vector<int>
partitionElements(const ZoltanParamMap& params) const;
private:
/// Connection/graph edge.
using Connection = std::pair<std::size_t, std::size_t>;
/// MPI communication object. Needed by Zoltan.
Parallel::Communication comm_{};
/// Maximum number of vertices in connectivity graph. Needed to
/// size partition vector return value.
std::size_t numElements_{};
/// Callback for mapping (local) vertex/cell IDs to globally unique
/// vertex/cell IDs.
GlobalCellID globalCell_{};
/// Connectivity graph edges.
std::vector<Connection> conns_{};
/// Collections of vertices/cells which must be coalesced to the
/// same domain/block. All vertices within a single collection will
/// be placed on the same domain, but cells from different
/// collections may be placed on different domains.
std::vector<std::vector<int>> sameDomain_{};
};
} // namespace Opm
#endif // OPM_UTILITY_PARALLEL_NLDD_PARTITIONING_ZOLTAN_HPP

View File

@ -2612,22 +2612,30 @@ namespace Opm {
setupDomains(const std::vector<Domain>& domains)
{
OPM_BEGIN_PARALLEL_TRY_CATCH();
// TODO: This loop nest may be slow for very large numbers
// of domains and wells, but that has not been observed on
// tests so far. Using the partition vector instead would
// be faster if we need to change.
// TODO: This loop nest may be slow for very large numbers of
// domains and wells, but that has not been observed on tests so
// far. Using the partition vector instead would be faster if we
// need to change.
for (const auto& wellPtr : this->well_container_) {
const int first_well_cell = wellPtr->cells()[0];
const int first_well_cell = wellPtr->cells().front();
for (const auto& domain : domains) {
const bool found = std::binary_search(domain.cells.begin(), domain.cells.end(), first_well_cell);
if (found) {
auto cell_present = [&domain](const auto cell)
{
return std::binary_search(domain.cells.begin(),
domain.cells.end(), cell);
};
if (cell_present(first_well_cell)) {
// Assuming that if the first well cell is found in a domain,
// then all of that well's cells are in that same domain.
well_domain_[wellPtr->name()] = domain.index;
// Verify that all of that well's cells are in that same domain.
for (int well_cell : wellPtr->cells()) {
if (!std::binary_search(domain.cells.begin(), domain.cells.end(), well_cell)) {
OPM_THROW(std::runtime_error, "Well found on multiple domains.");
if (! cell_present(well_cell)) {
OPM_THROW(std::runtime_error,
fmt::format("Well '{}' found on multiple domains.",
wellPtr->name()));
}
}
}