From ed77c90238677c803ce14d1a9181d8702ac0b9b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A5rd=20Skaflestad?= Date: Tue, 12 Sep 2023 16:52:37 +0200 Subject: [PATCH] Make Zoltan-Based Domain Partitioning MPI Aware This commit switches the current implementation of 'partitionCellsZoltan()', i.e., 'partitionCells("zoltan", ...)' into using the MPI-aware ParallelNLDDPartitioningZoltan utility. In doing so we make 'partitionCellsZoltan()' private since its availability is not guaranteed. We also slightly reorder the parameters and switch from passing a "Grid" into passing a "GridView" as an argument to partitionCells(), and specialise this function for the known grid views in OPM Flow. We extract the Zoltan-related parameters out to an Entity-dependent helper structure and move the complexity of forming this type to a new helper function, BlackoilModelEbosNldd::partitionCells(). --- opm/simulators/flow/BlackoilModelEbosNldd.hpp | 49 +- opm/simulators/flow/partitionCells.cpp | 456 +++++++++++++++--- opm/simulators/flow/partitionCells.hpp | 80 ++- .../utils/ParallelNLDDPartitioningZoltan.cpp | 4 +- .../utils/ParallelNLDDPartitioningZoltan.hpp | 2 +- 5 files changed, 487 insertions(+), 104 deletions(-) diff --git a/opm/simulators/flow/BlackoilModelEbosNldd.hpp b/opm/simulators/flow/BlackoilModelEbosNldd.hpp index 2f7383f29..ea82c60cc 100644 --- a/opm/simulators/flow/BlackoilModelEbosNldd.hpp +++ b/opm/simulators/flow/BlackoilModelEbosNldd.hpp @@ -54,12 +54,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include #include @@ -93,16 +95,8 @@ public: BlackoilModelEbosNldd(BlackoilModelEbos& model) : model_(model) { - const auto& grid = model_.ebosSimulator().vanguard().grid(); - const auto& schedule = model_.ebosSimulator().vanguard().schedule(); - // Create partitions. - const auto& [partition_vector, num_domains] = - partitionCells(grid, - schedule.getWellsatEnd(), - model_.param().local_domain_partition_method_, - model_.param().num_local_domains_, - model_.param().local_domain_partition_imbalance_); + const auto& [partition_vector, num_domains] = this->partitionCells(); // Scan through partitioning to get correct size for each. std::vector sizes(num_domains, 0); @@ -121,6 +115,8 @@ public: // Iterate through grid once, setting the seeds of all partitions. // Note: owned cells only! + const auto& grid = model_.ebosSimulator().vanguard().grid(); + std::vector count(num_domains, 0); const auto& gridView = grid.leafGridView(); const auto beg = gridView.template begin<0, Dune::Interior_Partition>(); @@ -835,6 +831,41 @@ private: return errorPV; } + decltype(auto) partitionCells() const + { + const auto& grid = this->model_.ebosSimulator().vanguard().grid(); + + using GridView = std::remove_cv_t>; + using Element = std::remove_cv_t::Entity>>; + + const auto& param = this->model_.param(); + + auto zoltan_ctrl = ZoltanPartitioningControl{}; + zoltan_ctrl.domain_imbalance = param.local_domain_partition_imbalance_; + zoltan_ctrl.index = + [elementMapper = &this->model_.ebosSimulator().model().elementMapper()] + (const Element& element) + { + return elementMapper->index(element); + }; + zoltan_ctrl.local_to_global = + [cartMapper = &this->model_.ebosSimulator().vanguard().cartesianIndexMapper()] + (const int elemIdx) + { + return cartMapper->cartesianIndex(elemIdx); + }; + + // Forming the list of wells is expensive, so do this only if needed. + const auto need_wells = param.local_domain_partition_method_ == "zoltan"; + const auto wells = need_wells + ? this->model_.ebosSimulator().vanguard().schedule().getWellsatEnd() + : std::vector{}; + + return ::Opm::partitionCells(param.local_domain_partition_method_, + param.num_local_domains_, + grid.leafGridView(), wells, zoltan_ctrl); + } + BlackoilModelEbos& model_; //!< Reference to model std::vector domains_; //!< Vector of subdomains std::vector> domain_matrices_; //!< Vector of matrix operator for each subdomain diff --git a/opm/simulators/flow/partitionCells.cpp b/opm/simulators/flow/partitionCells.cpp index 07845c5de..41ae00dec 100644 --- a/opm/simulators/flow/partitionCells.cpp +++ b/opm/simulators/flow/partitionCells.cpp @@ -18,15 +18,26 @@ */ #include + #include +#include + +#if HAVE_MPI && HAVE_ZOLTAN +#include +#include +#include +#include +#include + +#include +#include +#include +#endif // HAVE_MPI && HAVE_ZOLTAN + #include #include -#include - -#include - #if HAVE_DUNE_ALUGRID #include #endif // HAVE_DUNE_ALUGRID @@ -40,68 +51,351 @@ #include #include #include +#include #include +#include +#include +#include + +// =========================================================================== +// Zoltan-based partitioning if available in this build configuration. +// --------------------------------------------------------------------------- + +#if HAVE_MPI && HAVE_ZOLTAN namespace { -std::pair, int> -countDomains(std::vector partition_vector) +/// Partition rank's interior cells into non-overlapping domains using the +/// Zoltan graph partitioning software package. +class ZoltanPartitioner { - auto maxPos = std::max_element(partition_vector.begin(), - partition_vector.end()); +public: + /// Constructor. + /// + /// \tparam GridView DUNE grid view type + /// + /// \param[in] grid_view Current rank's reachable cells. + /// + /// \param[in] local_to_global Callback for mapping rank's local cells + /// to globally unique cell IDs. May for instance return the + /// Cartesian cell ID of each local cell. + template + explicit ZoltanPartitioner(const GridView& grid_view, + const Opm::ParallelNLDDPartitioningZoltan::GlobalCellID& local_to_global); - const auto num_domains = (maxPos == partition_vector.end()) - ? 0 : *maxPos + 1; + /// Form internal connectivity graph between rank's reachable, interior + /// cells. + /// + /// \tparam GridView DUNE grid view type + /// + /// \tparam Element Grid view's representation of a cell. + /// + /// \param[in] grid_view Current rank's reachable cells. + /// + /// \param[in] wells Collection of wells to which this rank's partition + /// vector must be adapted. Wells must not intersect multiple local + /// domains/blocks. + /// + /// \param[in] zoltan_ctrl Control parameters for on-rank subdomain + /// partitioning. + template + void buildLocalGraph(const GridView& grid_view, + const std::vector& wells, + const Opm::ZoltanPartitioningControl& zoltan_ctrl); - return { std::move(partition_vector), num_domains }; + /// Partition rank's interior cells into non-overlapping domains using + /// the Zoltan graph partitioning software package. + /// + /// \tparam GridView DUNE grid view type + /// + /// \tparam Element Grid view's representation of a cell. + /// + /// \param[in] num_local_domains Target number of domains into which the + /// full model will be partitioned. Note: This number is treated as a + /// global parameter across all MPI ranks and is not used as a number + /// of domains per rank. The final number of model subdomains may + /// differ from this number when adapting to well constraints and if, + /// for whatever reason, Zoltan happens to generate a global + /// partitioning for which one domain/block happens to cross process + /// boundaries. + /// + /// \param[in] grid_view Current rank's reachable cells. + /// + /// \param[in] zoltan_ctrl Control parameters for on-rank subdomain + /// partitioning. + /// + /// \return Partition vector for rank's interior cells and number of + /// unique domains across those interior cells. + template + std::pair, int> + partition(const int num_local_domains, + const GridView& grid_view, + const Opm::ZoltanPartitioningControl& zoltan_ctrl) const; + +private: + /// Zoltan partitioning backend. + Opm::ParallelNLDDPartitioningZoltan partitioner_; + + /// Form internal connectivity graph between rank's reachable, interior + /// cells. + /// + /// \tparam GridView DUNE grid view type + /// + /// \tparam Element Grid view's representation of a cell. + /// + /// \param[in] grid_view Current rank's reachable cells. + /// + /// \param[in] zoltan_ctrl Control parameters for on-rank subdomain + /// partitioning. + /// + /// \return Mapping from globally unique cell IDs to local, on-rank + /// active cell IDs. Effectively the cached result of \code + /// zoltan_ctrl.index() \endcode for each globally unique cell ID. + template + std::unordered_map + connectElements(const GridView& grid_view, + const Opm::ZoltanPartitioningControl& zoltan_ctrl); + + /// Adapt internal connectivity graph to account for connections induced + /// by well reservoir connections. + /// + /// \tparam Comm MPI communication type. + /// + /// \param[in] comm MPI communication object. Needed to get a global + /// view of any exceptions thrown in the implementation. + /// + /// \param[in] wells Collection of wells to which this rank's partition + /// vector must be adapted. Wells must not intersect multiple local + /// domains/blocks. + /// + /// \param[in] g2l Mapping from globally unique cell IDs to local, + /// on-rank active cell IDs. Return value from \c connectElements(). + template + void connectWells(const Comm comm, + const std::vector& wells, + const std::unordered_map& g2l); +}; + +// Note: "grid_view.size(0)" is intentional here. It is not an error. The +// ParallelNLDDPartitioningZoltan type will internally enumerate interior +// cells in a consecutive sequence, regardless of the ordering in the +// grid_view object. +template +ZoltanPartitioner::ZoltanPartitioner(const GridView& grid_view, + const Opm::ParallelNLDDPartitioningZoltan::GlobalCellID& local_to_global) + : partitioner_(grid_view.comm(), grid_view.size(0), local_to_global) +{} + +template +void ZoltanPartitioner::buildLocalGraph(const GridView& grid_view, + const std::vector& wells, + const Opm::ZoltanPartitioningControl& zoltan_ctrl) +{ + this->connectWells(grid_view.comm(), wells, this->connectElements(grid_view, zoltan_ctrl)); } -template -struct HasZoltanPartitioning : public std::false_type {}; +template +std::pair, int> +ZoltanPartitioner::partition(const int num_local_domains, + const GridView& grid_view, + const Opm::ZoltanPartitioningControl& zoltan_ctrl) const +{ + auto partition = std::pair, int>{}; -template -struct HasZoltanPartitioning< - GridType, - std::void_t().zoltanPartitionWithoutScatter - (std::declval*>(), - std::declval(), - std::declval(), - std::declval()))> - > : public std::true_type {}; + auto param = Opm::setupZoltanParams("graph"); + param.insert_or_assign("NUM_GLOBAL_PARTS", fmt::format("{}", num_local_domains)); + param.insert_or_assign("IMBALANCE_TOL" , fmt::format("{}", zoltan_ctrl.domain_imbalance)); -} // anonymous namespace + const auto domainsAll = this->partitioner_.partitionElements(param); -namespace Opm { + auto& [domains, num_domains] = partition; + domains.reserve(domainsAll.size()); + num_domains = 0; -template -std::pair, int> partitionCells(const Grid& grid, - const std::vector& wells, - const std::string& method, - const int num_local_domains, - const double partition_imbalance) + for (const auto& cell : elements(grid_view, Dune::Partitions::interior)) { + const auto domainId = domains.emplace_back(domainsAll[zoltan_ctrl.index(cell)]); + + if (domainId >= num_domains) { + num_domains = domainId + 1; + } + } + + // Fix-up for an extreme case: Interior cells whose neighbours are all + // on a different rank--i.e., interior cells which do not have on-rank + // neighbours. Make each such cell be a separate domain on this rank. + // Don't remove this code unless you've taken remedial action elsewhere + // to ensure the situation doesn't happen. For what it's worth, we've + // seen this case occur in practice when testing on field cases. + for (auto& domainId : domains) { + if (domainId < 0) { + domainId = num_domains++; + } + } + + return partition; +} + +template +std::unordered_map +ZoltanPartitioner::connectElements(const GridView& grid_view, + const Opm::ZoltanPartitioningControl& zoltan_ctrl) +{ + auto g2l = std::unordered_map{}; + + for (const auto& element : elements(grid_view, Dune::Partitions::interior)) { + { + const auto c = zoltan_ctrl.index(element); + g2l.insert_or_assign(zoltan_ctrl.local_to_global(c), c); + } + + for (const auto& is : intersections(grid_view, element)) { + if (! is.neighbor()) { + continue; + } + + const auto& in = is.inside(); + const auto& out = is.outside(); + + if ((in .partitionType() != Dune::InteriorEntity) || + (out.partitionType() != Dune::InteriorEntity)) + { + // Connect cells in interior only. + continue; + } + + const auto c1 = zoltan_ctrl.index(in); + const auto c2 = zoltan_ctrl.index(out); + + this->partitioner_.registerConnection(c1, c2); + this->partitioner_.registerConnection(c2, c1); + } + } + + return g2l; +} + +template +void ZoltanPartitioner::connectWells(const Comm comm, + const std::vector& wells, + const std::unordered_map& g2l) +{ + auto distributedWells = 0; + + for (const auto& well : wells) { + auto cellIx = std::vector{}; + auto otherProc = 0; + + for (const auto& conn : well.getConnections()) { + auto locPos = g2l.find(conn.global_index()); + if (locPos == g2l.end()) { + ++otherProc; + continue; + } + + cellIx.push_back(locPos->second); + } + + if ((otherProc > 0) && !cellIx.empty()) { + ++distributedWells; + continue; + } + + const auto nc = cellIx.size(); + for (auto ic1 = 0*nc; ic1 < nc; ++ic1) { + for (auto ic2 = ic1 + 1; ic2 < nc; ++ic2) { + this->partitioner_.registerConnection(cellIx[ic1], cellIx[ic2]); + this->partitioner_.registerConnection(cellIx[ic2], cellIx[ic1]); + } + } + + if (! cellIx.empty()) { + // All cells intersected by a single well go in the same domain. + this->partitioner_.forceSameDomain(std::move(cellIx)); + } + } + + OPM_BEGIN_PARALLEL_TRY_CATCH() + + if (distributedWells > 0) { + const auto* pl = (distributedWells == 1) ? "" : "s"; + + throw std::invalid_argument { + fmt::format(R"({} distributed well{} detected on rank {}." +This is not supported in the current NLDD implementation.)", + distributedWells, pl, comm.rank()) + }; + } + + OPM_END_PARALLEL_TRY_CATCH(std::string { "ZoltanPartitioner::connectWells()" }, comm) +} + +template +std::pair, int> +partitionCellsZoltan(const int num_domains, + const GridView& grid_view, + const std::vector& wells, + const Opm::ZoltanPartitioningControl& zoltan_ctrl) +{ + if (num_domains <= 1) { // No partitioning => every cell in domain zero. + const auto num_interior_cells = + Opm::detail::countLocalInteriorCellsGridView(grid_view); + + return { + std::piecewise_construct, + std::forward_as_tuple(num_interior_cells, 0), + std::forward_as_tuple(1) + }; + } + + auto partitioner = ZoltanPartitioner { grid_view, zoltan_ctrl.local_to_global }; + partitioner.buildLocalGraph(grid_view, wells, zoltan_ctrl); + + return partitioner.partition(num_domains, grid_view, zoltan_ctrl); +} + +} // Anonymous namespace + +#endif // HAVE_MPI && HAVE_ZOLTAN + +// =========================================================================== + +template +std::pair, int> +Opm::partitionCells(const std::string& method, + const int num_local_domains, + const GridView& grid_view, + [[maybe_unused]] const std::vector& wells, + [[maybe_unused]] const ZoltanPartitioningControl& zoltan_ctrl) { if (method == "zoltan") { - if constexpr (HasZoltanPartitioning::value) { - return partitionCellsZoltan(grid, wells, num_local_domains, partition_imbalance); - } else { - OPM_THROW(std::runtime_error, "Zoltan requested for local domain partitioning, " - "but is not available for the current grid type."); - } - } else if (method == "simple") { - const int num_cells = detail::countLocalInteriorCells(grid); +#if HAVE_MPI && HAVE_ZOLTAN + + return partitionCellsZoltan(num_local_domains, grid_view, wells, zoltan_ctrl); + +#else // !HAVE_MPI || !HAVE_ZOLTAN + + OPM_THROW(std::runtime_error, "Zoltan requested for local domain partitioning, " + "but is not available in the current build configuration."); + +#endif // HAVE_MPI && HAVE_ZOLTAN + } + else if (method == "simple") { + const int num_cells = detail::countLocalInteriorCellsGridView(grid_view); return partitionCellsSimple(num_cells, num_local_domains); - } else if (method.size() > 10 && method.substr(method.size() - 10, 10) == ".partition") { + } + else if (method.size() > 10 && method.substr(method.size() - 10, 10) == ".partition") { // Method string ends with ".partition", interpret as filename for partitioning. - const int num_cells = detail::countLocalInteriorCells(grid); + const int num_cells = detail::countLocalInteriorCellsGridView(grid_view); return partitionCellsFromFile(method, num_cells); - } else { + } + else { OPM_THROW(std::runtime_error, "Unknown local domain partitioning method requested: " + method); } } - // Read from file, containing one number per cell. -std::pair, int> partitionCellsFromFile(const std::string& filename, const int num_cells) +std::pair, int> +Opm::partitionCellsFromFile(const std::string& filename, const int num_cells) { // Read file into single vector. std::ifstream is(filename); @@ -119,7 +413,8 @@ std::pair, int> partitionCellsFromFile(const std::string& filen // Trivially simple partitioner -std::pair, int> partitionCellsSimple(const int num_cells, const int num_domains) +std::pair, int> +Opm::partitionCellsSimple(const int num_cells, const int num_domains) { // Build the partitions. const int dom_sz = num_cells / num_domains; @@ -137,46 +432,53 @@ std::pair, int> partitionCellsSimple(const int num_cells, const return { part, num_domains }; } +// =========================================================================== -template -std::pair, int> partitionCellsZoltan(const Grid& grid, - const std::vector& wells, - const int num_domains, - const double domain_imbalance) -{ - auto partition_vector = grid.zoltanPartitionWithoutScatter - (&wells, nullptr, num_domains, domain_imbalance); +// --------------------------------------------------------------------------- +// Explicit specialisations/instantiations of partitionCells template. +// Deliberately placed at end of file. No other code beyond this separator. +// --------------------------------------------------------------------------- - return countDomains(std::move(partition_vector)); -} +#define InstantiatePartitionCells(Grid) \ + template std::pair, int> \ + Opm::partitionCells(const std::string&, \ + const int, \ + const std::remove_cv_t().leafGridView())>>&, \ + const std::vector&, \ + const Opm::ZoltanPartitioningControl< \ + typename std::remove_cv_t().leafGridView())>>::template Codim<0>::Entity>&) -template std::pair,int> -partitionCells(const Dune::CpGrid&, - const std::vector&, - const std::string&, - const int, - const double); +// --------------------------------------------------------------------------- +// Grid types built into Flow. +// --------------------------------------------------------------------------- +InstantiatePartitionCells(Dune::CpGrid); -template std::pair,int> -partitionCells>(const Dune::PolyhedralGrid<3,3,double>&, - const std::vector&, - const std::string&, - const int, - const double); +using PolyGrid = Dune::PolyhedralGrid<3, 3, double>; +InstantiatePartitionCells(PolyGrid); +// --------------------------------------------------------------------------- +// ALUGrid, if available. +// --------------------------------------------------------------------------- #if HAVE_DUNE_ALUGRID #if HAVE_MPI -using ALUGrid3CN = Dune::ALUGrid<3, 3, Dune::cube, Dune::nonconforming, Dune::ALUGridMPIComm>; +using ALUGridComm = Dune::ALUGridMPIComm; #else -using ALUGrid3CN = Dune::ALUGrid<3, 3, Dune::cube, Dune::nonconforming, Dune::ALUGridNoComm>; -#endif //HAVE_MPI +using ALUGridComm = Dune::ALUGridNoComm; +#endif // HAVE_MPI -template std::pair,int> -partitionCells(const ALUGrid3CN&, - const std::vector&, - const std::string&, - const int, - const double); -#endif +// --------------------------------------------------------------------------- +// Base case. +// --------------------------------------------------------------------------- +using BaseALUGrid = Dune::ALUGrid<3, 3, Dune::cube, Dune::nonconforming, ALUGridComm>; +InstantiatePartitionCells(BaseALUGrid); +#endif // HAVE_DUNE_ALUGRID -} // namespace Opm +// =========================================================================== + +#undef InstantiatePartitionCells + +// --------------------------------------------------------------------------- +// End of file. No statements below this separator. +// --------------------------------------------------------------------------- diff --git a/opm/simulators/flow/partitionCells.hpp b/opm/simulators/flow/partitionCells.hpp index 5dfd9b84a..1fee4f90c 100644 --- a/opm/simulators/flow/partitionCells.hpp +++ b/opm/simulators/flow/partitionCells.hpp @@ -20,6 +20,7 @@ #ifndef OPM_ASPINPARTITION_HEADER_INCLUDED #define OPM_ASPINPARTITION_HEADER_INCLUDED +#include #include #include #include @@ -28,14 +29,70 @@ namespace Opm { class Well; -/// Partitions the grid using the specified method. -/// \return pair containing a partition vector (partition number for each cell), and the number of partitions. -template -std::pair, int> partitionCells(const Grid& grid, - const std::vector& wells, - const std::string& method, - const int num_local_domains, - const double partition_imbalance); +} // namespace Opm + +namespace Opm { + +/// Control parameters for on-rank subdomain partitioning using Zoltan library. +/// +/// \tparam Element Grid view entity type +template +struct ZoltanPartitioningControl +{ + /// Partition imbalance, percentage units. Assumed to be the run's + /// value of ZoltanImbalanceTol. + double domain_imbalance{1.03}; + + /// Compute a locally unique, for this MPI process, ID of a local + /// cell/element/entity. + std::function index; + + /// Compute a globally unique, across all MPI processes, ID for a local + /// cell/element/entity. Might for instance return the cell's + /// linearised index + /// + /// i + nx*(j + ny*k) + /// + /// of the Cartesian cell (i,j,k). + std::function local_to_global; +}; + +/// Partition rank's interior cells. +/// +/// \param[in] method Partitioning method. Supported values are \c +/// "zoltan", \c "simple", or a filename with the extension \c +/// ".partition". The \c "zoltan" method invokes the Zoltan graph +/// partitioning package and requires both MPI and an active Zoltan +/// library. The \c "simple" method uses a one-dimensional load-balanced +/// approach, and the filename method will read a precomputed partition +/// vector from the named file. +/// +/// \param[in] num_local_domains Number of subdomains. Not used when +/// explicit partitioning is input from a file. +/// +/// \param[in] comm MPI Communication object for exchanging globally unique +/// cell IDs and for communication within the Zoltan library. Not used +/// unless \code method == "zoltan" \endcode. +/// +/// \param[in] grid_view View of rank's cells, both interior and overlap +/// cells. Not used unless \code method == "zoltan" \endcode. +/// +/// \param[in] wells Collection of simulation model wells. Not used unless +/// \code method == "zoltan" \endcode. +/// +/// \param[in] zoltan_ctrl Control parameters for local Zoltan-based +/// partitioning. Not used unless \code method == "zoltan" \endcode. +/// +/// \return Partition vector--subdomain ID for each cell in \p grid_view +/// traversal order for its interior cells--and the number of subdomains +/// on current rank. +template +std::pair, int> +partitionCells(const std::string& method, + const int num_local_domains, + const GridView& grid_view, + const std::vector& wells, + const ZoltanPartitioningControl& zoltan_ctrl); /// Read a partitioning from file, assumed to contain one number per cell, its partition number. /// \return pair containing a partition vector (partition number for each cell), and the number of partitions. @@ -45,13 +102,6 @@ std::pair, int> partitionCellsFromFile(const std::string& filen /// \return pair containing a partition vector (partition number for each cell), and the number of partitions. std::pair, int> partitionCellsSimple(const int num_cells, const int num_domains); -/// Partitions the grid using the Zoltan graph partitioner. -/// \return pair containing a partition vector (partition number for each cell), and the number of partitions. -template -std::pair, int> partitionCellsZoltan(const Grid& grid, - const std::vector& wells, - const int num_domains, - const double domain_imbalance); } // namespace Opm #endif // OPM_ASPINPARTITION_HEADER_INCLUDED diff --git a/opm/simulators/utils/ParallelNLDDPartitioningZoltan.cpp b/opm/simulators/utils/ParallelNLDDPartitioningZoltan.cpp index 133c4d28b..e897468eb 100644 --- a/opm/simulators/utils/ParallelNLDDPartitioningZoltan.cpp +++ b/opm/simulators/utils/ParallelNLDDPartitioningZoltan.cpp @@ -195,7 +195,7 @@ namespace { return static_cast(this->graph_.numVertices()); } - /// Retrieve globally unqique ID of reachable vertex. + /// Retrieve globally unique ID of reachable vertex. /// /// \param[in] localCell Index of locally reachable cell/vertex. int globalId(const int localCell) const @@ -422,7 +422,7 @@ extern "C" { /// /// \param[in] comm MPI communicator. /// - /// \param[in] params Control parameters for Zoltan graph partiitioning procedure. + /// \param[in] params Control parameters for Zoltan graph partitioning procedure. explicit Partitioner(const Opm::Parallel::Communication comm, const Opm::ParallelNLDDPartitioningZoltan::ZoltanParamMap& params); diff --git a/opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp b/opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp index 697a21e90..6cda24373 100644 --- a/opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp +++ b/opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp @@ -74,7 +74,7 @@ namespace Opm { /// Force collection of cells to be in same result domain. /// - /// Mostly as a way to ensuring wells do not intersect multiple + /// Mostly as a means to ensuring wells do not intersect multiple /// domains/blocks. /// /// \param[in] cells Cell collection. Typically those cells which