Make Zoltan-Based Domain Partitioning MPI Aware

This commit switches the current implementation of
'partitionCellsZoltan()', i.e., 'partitionCells("zoltan", ...)' into
using the MPI-aware ParallelNLDDPartitioningZoltan utility.  In
doing so we make 'partitionCellsZoltan()' private since its
availability is not guaranteed.  We also slightly reorder the
parameters and switch from passing a "Grid" into passing a
"GridView" as an argument to partitionCells(), and specialise this
function for the known grid views in OPM Flow.

We extract the Zoltan-related parameters out to an Entity-dependent
helper structure and move the complexity of forming this type to a
new helper function, BlackoilModelEbosNldd::partitionCells().
This commit is contained in:
Bård Skaflestad 2023-09-12 16:52:37 +02:00
parent 27aaa28c71
commit ed77c90238
5 changed files with 487 additions and 104 deletions

View File

@ -54,12 +54,14 @@
#include <cassert>
#include <cmath>
#include <cstddef>
#include <functional>
#include <iomanip>
#include <ios>
#include <memory>
#include <numeric>
#include <sstream>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
@ -93,16 +95,8 @@ public:
BlackoilModelEbosNldd(BlackoilModelEbos<TypeTag>& model)
: model_(model)
{
const auto& grid = model_.ebosSimulator().vanguard().grid();
const auto& schedule = model_.ebosSimulator().vanguard().schedule();
// Create partitions.
const auto& [partition_vector, num_domains] =
partitionCells(grid,
schedule.getWellsatEnd(),
model_.param().local_domain_partition_method_,
model_.param().num_local_domains_,
model_.param().local_domain_partition_imbalance_);
const auto& [partition_vector, num_domains] = this->partitionCells();
// Scan through partitioning to get correct size for each.
std::vector<int> sizes(num_domains, 0);
@ -121,6 +115,8 @@ public:
// Iterate through grid once, setting the seeds of all partitions.
// Note: owned cells only!
const auto& grid = model_.ebosSimulator().vanguard().grid();
std::vector<int> count(num_domains, 0);
const auto& gridView = grid.leafGridView();
const auto beg = gridView.template begin<0, Dune::Interior_Partition>();
@ -835,6 +831,41 @@ private:
return errorPV;
}
decltype(auto) partitionCells() const
{
const auto& grid = this->model_.ebosSimulator().vanguard().grid();
using GridView = std::remove_cv_t<std::remove_reference_t<decltype(grid.leafGridView())>>;
using Element = std::remove_cv_t<std::remove_reference_t<typename GridView::template Codim<0>::Entity>>;
const auto& param = this->model_.param();
auto zoltan_ctrl = ZoltanPartitioningControl<Element>{};
zoltan_ctrl.domain_imbalance = param.local_domain_partition_imbalance_;
zoltan_ctrl.index =
[elementMapper = &this->model_.ebosSimulator().model().elementMapper()]
(const Element& element)
{
return elementMapper->index(element);
};
zoltan_ctrl.local_to_global =
[cartMapper = &this->model_.ebosSimulator().vanguard().cartesianIndexMapper()]
(const int elemIdx)
{
return cartMapper->cartesianIndex(elemIdx);
};
// Forming the list of wells is expensive, so do this only if needed.
const auto need_wells = param.local_domain_partition_method_ == "zoltan";
const auto wells = need_wells
? this->model_.ebosSimulator().vanguard().schedule().getWellsatEnd()
: std::vector<Well>{};
return ::Opm::partitionCells(param.local_domain_partition_method_,
param.num_local_domains_,
grid.leafGridView(), wells, zoltan_ctrl);
}
BlackoilModelEbos<TypeTag>& model_; //!< Reference to model
std::vector<Domain> domains_; //!< Vector of subdomains
std::vector<std::unique_ptr<Mat>> domain_matrices_; //!< Vector of matrix operator for each subdomain

View File

@ -18,15 +18,26 @@
*/
#include <config.h>
#include <opm/simulators/flow/partitionCells.hpp>
#include <opm/simulators/flow/countGlobalCells.hpp>
#if HAVE_MPI && HAVE_ZOLTAN
#include <opm/simulators/utils/DeferredLogger.hpp>
#include <opm/simulators/utils/DeferredLoggingErrorHelpers.hpp>
#include <opm/simulators/utils/gatherDeferredLogger.hpp>
#include <opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp>
#include <opm/simulators/utils/SetupZoltanParams.hpp>
#include <opm/input/eclipse/Schedule/Well/Connection.hpp>
#include <opm/input/eclipse/Schedule/Well/Well.hpp>
#include <opm/input/eclipse/Schedule/Well/WellConnections.hpp>
#endif // HAVE_MPI && HAVE_ZOLTAN
#include <opm/grid/CpGrid.hpp>
#include <opm/grid/polyhedralgrid.hh>
#include <opm/input/eclipse/Schedule/Well/Well.hpp>
#include <opm/simulators/flow/countGlobalCells.hpp>
#if HAVE_DUNE_ALUGRID
#include <dune/alugrid/grid.hh>
#endif // HAVE_DUNE_ALUGRID
@ -40,68 +51,351 @@
#include <numeric>
#include <stdexcept>
#include <string>
#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
// ===========================================================================
// Zoltan-based partitioning if available in this build configuration.
// ---------------------------------------------------------------------------
#if HAVE_MPI && HAVE_ZOLTAN
namespace {
std::pair<std::vector<int>, int>
countDomains(std::vector<int> partition_vector)
/// Partition rank's interior cells into non-overlapping domains using the
/// Zoltan graph partitioning software package.
class ZoltanPartitioner
{
auto maxPos = std::max_element(partition_vector.begin(),
partition_vector.end());
public:
/// Constructor.
///
/// \tparam GridView DUNE grid view type
///
/// \param[in] grid_view Current rank's reachable cells.
///
/// \param[in] local_to_global Callback for mapping rank's local cells
/// to globally unique cell IDs. May for instance return the
/// Cartesian cell ID of each local cell.
template <class GridView>
explicit ZoltanPartitioner(const GridView& grid_view,
const Opm::ParallelNLDDPartitioningZoltan::GlobalCellID& local_to_global);
const auto num_domains = (maxPos == partition_vector.end())
? 0 : *maxPos + 1;
/// Form internal connectivity graph between rank's reachable, interior
/// cells.
///
/// \tparam GridView DUNE grid view type
///
/// \tparam Element Grid view's representation of a cell.
///
/// \param[in] grid_view Current rank's reachable cells.
///
/// \param[in] wells Collection of wells to which this rank's partition
/// vector must be adapted. Wells must not intersect multiple local
/// domains/blocks.
///
/// \param[in] zoltan_ctrl Control parameters for on-rank subdomain
/// partitioning.
template <class GridView, class Element>
void buildLocalGraph(const GridView& grid_view,
const std::vector<Opm::Well>& wells,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl);
return { std::move(partition_vector), num_domains };
/// Partition rank's interior cells into non-overlapping domains using
/// the Zoltan graph partitioning software package.
///
/// \tparam GridView DUNE grid view type
///
/// \tparam Element Grid view's representation of a cell.
///
/// \param[in] num_local_domains Target number of domains into which the
/// full model will be partitioned. Note: This number is treated as a
/// global parameter across all MPI ranks and is not used as a number
/// of domains per rank. The final number of model subdomains may
/// differ from this number when adapting to well constraints and if,
/// for whatever reason, Zoltan happens to generate a global
/// partitioning for which one domain/block happens to cross process
/// boundaries.
///
/// \param[in] grid_view Current rank's reachable cells.
///
/// \param[in] zoltan_ctrl Control parameters for on-rank subdomain
/// partitioning.
///
/// \return Partition vector for rank's interior cells and number of
/// unique domains across those interior cells.
template <class GridView, class Element>
std::pair<std::vector<int>, int>
partition(const int num_local_domains,
const GridView& grid_view,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl) const;
private:
/// Zoltan partitioning backend.
Opm::ParallelNLDDPartitioningZoltan partitioner_;
/// Form internal connectivity graph between rank's reachable, interior
/// cells.
///
/// \tparam GridView DUNE grid view type
///
/// \tparam Element Grid view's representation of a cell.
///
/// \param[in] grid_view Current rank's reachable cells.
///
/// \param[in] zoltan_ctrl Control parameters for on-rank subdomain
/// partitioning.
///
/// \return Mapping from globally unique cell IDs to local, on-rank
/// active cell IDs. Effectively the cached result of \code
/// zoltan_ctrl.index() \endcode for each globally unique cell ID.
template <class GridView, class Element>
std::unordered_map<int, int>
connectElements(const GridView& grid_view,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl);
/// Adapt internal connectivity graph to account for connections induced
/// by well reservoir connections.
///
/// \tparam Comm MPI communication type.
///
/// \param[in] comm MPI communication object. Needed to get a global
/// view of any exceptions thrown in the implementation.
///
/// \param[in] wells Collection of wells to which this rank's partition
/// vector must be adapted. Wells must not intersect multiple local
/// domains/blocks.
///
/// \param[in] g2l Mapping from globally unique cell IDs to local,
/// on-rank active cell IDs. Return value from \c connectElements().
template <typename Comm>
void connectWells(const Comm comm,
const std::vector<Opm::Well>& wells,
const std::unordered_map<int, int>& g2l);
};
// Note: "grid_view.size(0)" is intentional here. It is not an error. The
// ParallelNLDDPartitioningZoltan type will internally enumerate interior
// cells in a consecutive sequence, regardless of the ordering in the
// grid_view object.
template <class GridView>
ZoltanPartitioner::ZoltanPartitioner(const GridView& grid_view,
const Opm::ParallelNLDDPartitioningZoltan::GlobalCellID& local_to_global)
: partitioner_(grid_view.comm(), grid_view.size(0), local_to_global)
{}
template <class GridView, class Element>
void ZoltanPartitioner::buildLocalGraph(const GridView& grid_view,
const std::vector<Opm::Well>& wells,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl)
{
this->connectWells(grid_view.comm(), wells, this->connectElements(grid_view, zoltan_ctrl));
}
template <typename, class = void>
struct HasZoltanPartitioning : public std::false_type {};
template <class GridView, class Element>
std::pair<std::vector<int>, int>
ZoltanPartitioner::partition(const int num_local_domains,
const GridView& grid_view,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl) const
{
auto partition = std::pair<std::vector<int>, int>{};
template <typename GridType>
struct HasZoltanPartitioning<
GridType,
std::void_t<decltype(std::declval<const GridType&>().zoltanPartitionWithoutScatter
(std::declval<const std::vector<Opm::Well>*>(),
std::declval<const double*>(),
std::declval<const int>(),
std::declval<const double>()))>
> : public std::true_type {};
auto param = Opm::setupZoltanParams("graph");
param.insert_or_assign("NUM_GLOBAL_PARTS", fmt::format("{}", num_local_domains));
param.insert_or_assign("IMBALANCE_TOL" , fmt::format("{}", zoltan_ctrl.domain_imbalance));
} // anonymous namespace
const auto domainsAll = this->partitioner_.partitionElements(param);
namespace Opm {
auto& [domains, num_domains] = partition;
domains.reserve(domainsAll.size());
num_domains = 0;
template<class Grid>
std::pair<std::vector<int>, int> partitionCells(const Grid& grid,
const std::vector<Well>& wells,
const std::string& method,
for (const auto& cell : elements(grid_view, Dune::Partitions::interior)) {
const auto domainId = domains.emplace_back(domainsAll[zoltan_ctrl.index(cell)]);
if (domainId >= num_domains) {
num_domains = domainId + 1;
}
}
// Fix-up for an extreme case: Interior cells whose neighbours are all
// on a different rank--i.e., interior cells which do not have on-rank
// neighbours. Make each such cell be a separate domain on this rank.
// Don't remove this code unless you've taken remedial action elsewhere
// to ensure the situation doesn't happen. For what it's worth, we've
// seen this case occur in practice when testing on field cases.
for (auto& domainId : domains) {
if (domainId < 0) {
domainId = num_domains++;
}
}
return partition;
}
template <class GridView, class Element>
std::unordered_map<int, int>
ZoltanPartitioner::connectElements(const GridView& grid_view,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl)
{
auto g2l = std::unordered_map<int, int>{};
for (const auto& element : elements(grid_view, Dune::Partitions::interior)) {
{
const auto c = zoltan_ctrl.index(element);
g2l.insert_or_assign(zoltan_ctrl.local_to_global(c), c);
}
for (const auto& is : intersections(grid_view, element)) {
if (! is.neighbor()) {
continue;
}
const auto& in = is.inside();
const auto& out = is.outside();
if ((in .partitionType() != Dune::InteriorEntity) ||
(out.partitionType() != Dune::InteriorEntity))
{
// Connect cells in interior only.
continue;
}
const auto c1 = zoltan_ctrl.index(in);
const auto c2 = zoltan_ctrl.index(out);
this->partitioner_.registerConnection(c1, c2);
this->partitioner_.registerConnection(c2, c1);
}
}
return g2l;
}
template <typename Comm>
void ZoltanPartitioner::connectWells(const Comm comm,
const std::vector<Opm::Well>& wells,
const std::unordered_map<int, int>& g2l)
{
auto distributedWells = 0;
for (const auto& well : wells) {
auto cellIx = std::vector<int>{};
auto otherProc = 0;
for (const auto& conn : well.getConnections()) {
auto locPos = g2l.find(conn.global_index());
if (locPos == g2l.end()) {
++otherProc;
continue;
}
cellIx.push_back(locPos->second);
}
if ((otherProc > 0) && !cellIx.empty()) {
++distributedWells;
continue;
}
const auto nc = cellIx.size();
for (auto ic1 = 0*nc; ic1 < nc; ++ic1) {
for (auto ic2 = ic1 + 1; ic2 < nc; ++ic2) {
this->partitioner_.registerConnection(cellIx[ic1], cellIx[ic2]);
this->partitioner_.registerConnection(cellIx[ic2], cellIx[ic1]);
}
}
if (! cellIx.empty()) {
// All cells intersected by a single well go in the same domain.
this->partitioner_.forceSameDomain(std::move(cellIx));
}
}
OPM_BEGIN_PARALLEL_TRY_CATCH()
if (distributedWells > 0) {
const auto* pl = (distributedWells == 1) ? "" : "s";
throw std::invalid_argument {
fmt::format(R"({} distributed well{} detected on rank {}."
This is not supported in the current NLDD implementation.)",
distributedWells, pl, comm.rank())
};
}
OPM_END_PARALLEL_TRY_CATCH(std::string { "ZoltanPartitioner::connectWells()" }, comm)
}
template <class GridView, class Element>
std::pair<std::vector<int>, int>
partitionCellsZoltan(const int num_domains,
const GridView& grid_view,
const std::vector<Opm::Well>& wells,
const Opm::ZoltanPartitioningControl<Element>& zoltan_ctrl)
{
if (num_domains <= 1) { // No partitioning => every cell in domain zero.
const auto num_interior_cells =
Opm::detail::countLocalInteriorCellsGridView(grid_view);
return {
std::piecewise_construct,
std::forward_as_tuple(num_interior_cells, 0),
std::forward_as_tuple(1)
};
}
auto partitioner = ZoltanPartitioner { grid_view, zoltan_ctrl.local_to_global };
partitioner.buildLocalGraph(grid_view, wells, zoltan_ctrl);
return partitioner.partition(num_domains, grid_view, zoltan_ctrl);
}
} // Anonymous namespace
#endif // HAVE_MPI && HAVE_ZOLTAN
// ===========================================================================
template <class GridView, class Element>
std::pair<std::vector<int>, int>
Opm::partitionCells(const std::string& method,
const int num_local_domains,
const double partition_imbalance)
const GridView& grid_view,
[[maybe_unused]] const std::vector<Well>& wells,
[[maybe_unused]] const ZoltanPartitioningControl<Element>& zoltan_ctrl)
{
if (method == "zoltan") {
if constexpr (HasZoltanPartitioning<Grid>::value) {
return partitionCellsZoltan(grid, wells, num_local_domains, partition_imbalance);
} else {
#if HAVE_MPI && HAVE_ZOLTAN
return partitionCellsZoltan(num_local_domains, grid_view, wells, zoltan_ctrl);
#else // !HAVE_MPI || !HAVE_ZOLTAN
OPM_THROW(std::runtime_error, "Zoltan requested for local domain partitioning, "
"but is not available for the current grid type.");
"but is not available in the current build configuration.");
#endif // HAVE_MPI && HAVE_ZOLTAN
}
} else if (method == "simple") {
const int num_cells = detail::countLocalInteriorCells(grid);
else if (method == "simple") {
const int num_cells = detail::countLocalInteriorCellsGridView(grid_view);
return partitionCellsSimple(num_cells, num_local_domains);
} else if (method.size() > 10 && method.substr(method.size() - 10, 10) == ".partition") {
}
else if (method.size() > 10 && method.substr(method.size() - 10, 10) == ".partition") {
// Method string ends with ".partition", interpret as filename for partitioning.
const int num_cells = detail::countLocalInteriorCells(grid);
const int num_cells = detail::countLocalInteriorCellsGridView(grid_view);
return partitionCellsFromFile(method, num_cells);
} else {
}
else {
OPM_THROW(std::runtime_error, "Unknown local domain partitioning method requested: " + method);
}
}
// Read from file, containing one number per cell.
std::pair<std::vector<int>, int> partitionCellsFromFile(const std::string& filename, const int num_cells)
std::pair<std::vector<int>, int>
Opm::partitionCellsFromFile(const std::string& filename, const int num_cells)
{
// Read file into single vector.
std::ifstream is(filename);
@ -119,7 +413,8 @@ std::pair<std::vector<int>, int> partitionCellsFromFile(const std::string& filen
// Trivially simple partitioner
std::pair<std::vector<int>, int> partitionCellsSimple(const int num_cells, const int num_domains)
std::pair<std::vector<int>, int>
Opm::partitionCellsSimple(const int num_cells, const int num_domains)
{
// Build the partitions.
const int dom_sz = num_cells / num_domains;
@ -137,46 +432,53 @@ std::pair<std::vector<int>, int> partitionCellsSimple(const int num_cells, const
return { part, num_domains };
}
// ===========================================================================
template<class Grid>
std::pair<std::vector<int>, int> partitionCellsZoltan(const Grid& grid,
const std::vector<Well>& wells,
const int num_domains,
const double domain_imbalance)
{
auto partition_vector = grid.zoltanPartitionWithoutScatter
(&wells, nullptr, num_domains, domain_imbalance);
// ---------------------------------------------------------------------------
// Explicit specialisations/instantiations of partitionCells template.
// Deliberately placed at end of file. No other code beyond this separator.
// ---------------------------------------------------------------------------
return countDomains(std::move(partition_vector));
}
#define InstantiatePartitionCells(Grid) \
template std::pair<std::vector<int>, int> \
Opm::partitionCells(const std::string&, \
const int, \
const std::remove_cv_t<std::remove_reference_t< \
decltype(std::declval<Grid>().leafGridView())>>&, \
const std::vector<Opm::Well>&, \
const Opm::ZoltanPartitioningControl< \
typename std::remove_cv_t<std::remove_reference_t< \
decltype(std::declval<Grid>().leafGridView())>>::template Codim<0>::Entity>&)
template std::pair<std::vector<int>,int>
partitionCells<Dune::CpGrid>(const Dune::CpGrid&,
const std::vector<Well>&,
const std::string&,
const int,
const double);
// ---------------------------------------------------------------------------
// Grid types built into Flow.
// ---------------------------------------------------------------------------
InstantiatePartitionCells(Dune::CpGrid);
template std::pair<std::vector<int>,int>
partitionCells<Dune::PolyhedralGrid<3,3,double>>(const Dune::PolyhedralGrid<3,3,double>&,
const std::vector<Well>&,
const std::string&,
const int,
const double);
using PolyGrid = Dune::PolyhedralGrid<3, 3, double>;
InstantiatePartitionCells(PolyGrid);
// ---------------------------------------------------------------------------
// ALUGrid, if available.
// ---------------------------------------------------------------------------
#if HAVE_DUNE_ALUGRID
#if HAVE_MPI
using ALUGrid3CN = Dune::ALUGrid<3, 3, Dune::cube, Dune::nonconforming, Dune::ALUGridMPIComm>;
using ALUGridComm = Dune::ALUGridMPIComm;
#else
using ALUGrid3CN = Dune::ALUGrid<3, 3, Dune::cube, Dune::nonconforming, Dune::ALUGridNoComm>;
#endif //HAVE_MPI
using ALUGridComm = Dune::ALUGridNoComm;
#endif // HAVE_MPI
template std::pair<std::vector<int>,int>
partitionCells<ALUGrid3CN>(const ALUGrid3CN&,
const std::vector<Well>&,
const std::string&,
const int,
const double);
#endif
// ---------------------------------------------------------------------------
// Base case.
// ---------------------------------------------------------------------------
using BaseALUGrid = Dune::ALUGrid<3, 3, Dune::cube, Dune::nonconforming, ALUGridComm>;
InstantiatePartitionCells(BaseALUGrid);
#endif // HAVE_DUNE_ALUGRID
} // namespace Opm
// ===========================================================================
#undef InstantiatePartitionCells
// ---------------------------------------------------------------------------
// End of file. No statements below this separator.
// ---------------------------------------------------------------------------

View File

@ -20,6 +20,7 @@
#ifndef OPM_ASPINPARTITION_HEADER_INCLUDED
#define OPM_ASPINPARTITION_HEADER_INCLUDED
#include <functional>
#include <string>
#include <utility>
#include <vector>
@ -28,14 +29,70 @@ namespace Opm {
class Well;
/// Partitions the grid using the specified method.
/// \return pair containing a partition vector (partition number for each cell), and the number of partitions.
template<class Grid>
std::pair<std::vector<int>, int> partitionCells(const Grid& grid,
const std::vector<Well>& wells,
const std::string& method,
} // namespace Opm
namespace Opm {
/// Control parameters for on-rank subdomain partitioning using Zoltan library.
///
/// \tparam Element Grid view entity type
template <typename Element>
struct ZoltanPartitioningControl
{
/// Partition imbalance, percentage units. Assumed to be the run's
/// value of ZoltanImbalanceTol.
double domain_imbalance{1.03};
/// Compute a locally unique, for this MPI process, ID of a local
/// cell/element/entity.
std::function<int(const Element&)> index;
/// Compute a globally unique, across all MPI processes, ID for a local
/// cell/element/entity. Might for instance return the cell's
/// linearised index
///
/// i + nx*(j + ny*k)
///
/// of the Cartesian cell (i,j,k).
std::function<int(int)> local_to_global;
};
/// Partition rank's interior cells.
///
/// \param[in] method Partitioning method. Supported values are \c
/// "zoltan", \c "simple", or a filename with the extension \c
/// ".partition". The \c "zoltan" method invokes the Zoltan graph
/// partitioning package and requires both MPI and an active Zoltan
/// library. The \c "simple" method uses a one-dimensional load-balanced
/// approach, and the filename method will read a precomputed partition
/// vector from the named file.
///
/// \param[in] num_local_domains Number of subdomains. Not used when
/// explicit partitioning is input from a file.
///
/// \param[in] comm MPI Communication object for exchanging globally unique
/// cell IDs and for communication within the Zoltan library. Not used
/// unless \code method == "zoltan" \endcode.
///
/// \param[in] grid_view View of rank's cells, both interior and overlap
/// cells. Not used unless \code method == "zoltan" \endcode.
///
/// \param[in] wells Collection of simulation model wells. Not used unless
/// \code method == "zoltan" \endcode.
///
/// \param[in] zoltan_ctrl Control parameters for local Zoltan-based
/// partitioning. Not used unless \code method == "zoltan" \endcode.
///
/// \return Partition vector--subdomain ID for each cell in \p grid_view
/// traversal order for its interior cells--and the number of subdomains
/// on current rank.
template <class GridView, class Element>
std::pair<std::vector<int>, int>
partitionCells(const std::string& method,
const int num_local_domains,
const double partition_imbalance);
const GridView& grid_view,
const std::vector<Well>& wells,
const ZoltanPartitioningControl<Element>& zoltan_ctrl);
/// Read a partitioning from file, assumed to contain one number per cell, its partition number.
/// \return pair containing a partition vector (partition number for each cell), and the number of partitions.
@ -45,13 +102,6 @@ std::pair<std::vector<int>, int> partitionCellsFromFile(const std::string& filen
/// \return pair containing a partition vector (partition number for each cell), and the number of partitions.
std::pair<std::vector<int>, int> partitionCellsSimple(const int num_cells, const int num_domains);
/// Partitions the grid using the Zoltan graph partitioner.
/// \return pair containing a partition vector (partition number for each cell), and the number of partitions.
template<class Grid>
std::pair<std::vector<int>, int> partitionCellsZoltan(const Grid& grid,
const std::vector<Well>& wells,
const int num_domains,
const double domain_imbalance);
} // namespace Opm
#endif // OPM_ASPINPARTITION_HEADER_INCLUDED

View File

@ -195,7 +195,7 @@ namespace {
return static_cast<int>(this->graph_.numVertices());
}
/// Retrieve globally unqique ID of reachable vertex.
/// Retrieve globally unique ID of reachable vertex.
///
/// \param[in] localCell Index of locally reachable cell/vertex.
int globalId(const int localCell) const
@ -422,7 +422,7 @@ extern "C" {
///
/// \param[in] comm MPI communicator.
///
/// \param[in] params Control parameters for Zoltan graph partiitioning procedure.
/// \param[in] params Control parameters for Zoltan graph partitioning procedure.
explicit Partitioner(const Opm::Parallel::Communication comm,
const Opm::ParallelNLDDPartitioningZoltan::ZoltanParamMap& params);

View File

@ -74,7 +74,7 @@ namespace Opm {
/// Force collection of cells to be in same result domain.
///
/// Mostly as a way to ensuring wells do not intersect multiple
/// Mostly as a means to ensuring wells do not intersect multiple
/// domains/blocks.
///
/// \param[in] cells Cell collection. Typically those cells which