From f195f739492e951e6ead3e85c54e082a8d27d34a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A5rd=20Skaflestad?= Date: Tue, 12 Sep 2023 12:41:19 +0200 Subject: [PATCH 1/3] Extract Binary_Search Out to Helper Mostly for readability. --- .../wells/BlackoilWellModel_impl.hpp | 26 ++++++++++++------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/opm/simulators/wells/BlackoilWellModel_impl.hpp b/opm/simulators/wells/BlackoilWellModel_impl.hpp index 45a76f3b7..efd517048 100644 --- a/opm/simulators/wells/BlackoilWellModel_impl.hpp +++ b/opm/simulators/wells/BlackoilWellModel_impl.hpp @@ -2612,22 +2612,30 @@ namespace Opm { setupDomains(const std::vector& domains) { OPM_BEGIN_PARALLEL_TRY_CATCH(); - // TODO: This loop nest may be slow for very large numbers - // of domains and wells, but that has not been observed on - // tests so far. Using the partition vector instead would - // be faster if we need to change. + // TODO: This loop nest may be slow for very large numbers of + // domains and wells, but that has not been observed on tests so + // far. Using the partition vector instead would be faster if we + // need to change. for (const auto& wellPtr : this->well_container_) { - const int first_well_cell = wellPtr->cells()[0]; + const int first_well_cell = wellPtr->cells().front(); for (const auto& domain : domains) { - const bool found = std::binary_search(domain.cells.begin(), domain.cells.end(), first_well_cell); - if (found) { + auto cell_present = [&domain](const auto cell) + { + return std::binary_search(domain.cells.begin(), + domain.cells.end(), cell); + }; + + if (cell_present(first_well_cell)) { // Assuming that if the first well cell is found in a domain, // then all of that well's cells are in that same domain. well_domain_[wellPtr->name()] = domain.index; + // Verify that all of that well's cells are in that same domain. for (int well_cell : wellPtr->cells()) { - if (!std::binary_search(domain.cells.begin(), domain.cells.end(), well_cell)) { - OPM_THROW(std::runtime_error, "Well found on multiple domains."); + if (! cell_present(well_cell)) { + OPM_THROW(std::runtime_error, + fmt::format("Well '{}' found on multiple domains.", + wellPtr->name())); } } } From 27aaa28c71c27a4f3611cafac41d86f01a799104 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A5rd=20Skaflestad?= Date: Tue, 12 Sep 2023 15:43:16 +0200 Subject: [PATCH 2/3] Add Partitioner for Unstructured Connectivity Graph Invokes Zoltan library and requires MPI. Client code constructs an abstract connectivity graph by defining connections/edges through the 'registerConnection()' member function. May also impose a restriction that certain cells/vertices be placed in the same domain/block in the resulting partition. Client code must supply a callback function that defines globally unique cell/vertex/object IDs, across all MPI ranks, for each vertex in the connectivity graph. Member function 'partitionElement()' forms the resulting partition vector, the size of which is the total number of objects visible to the local rank-typically the number of cells owned by the rank, and the number of overlap cells--i.e., the size of the local grid view. --- CMakeLists_files.cmake | 2 + .../utils/ParallelNLDDPartitioningZoltan.cpp | 607 ++++++++++++++++++ .../utils/ParallelNLDDPartitioningZoltan.hpp | 129 ++++ 3 files changed, 738 insertions(+) create mode 100644 opm/simulators/utils/ParallelNLDDPartitioningZoltan.cpp create mode 100644 opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp diff --git a/CMakeLists_files.cmake b/CMakeLists_files.cmake index 3b8d4d7ab..340f56602 100644 --- a/CMakeLists_files.cmake +++ b/CMakeLists_files.cmake @@ -235,6 +235,7 @@ endif() if(MPI_FOUND) list(APPEND MAIN_SOURCE_FILES opm/simulators/utils/MPIPacker.cpp opm/simulators/utils/ParallelEclipseState.cpp + opm/simulators/utils/ParallelNLDDPartitioningZoltan.cpp opm/simulators/utils/ParallelSerialization.cpp opm/simulators/utils/SetupZoltanParams.cpp) endif() @@ -528,6 +529,7 @@ list (APPEND PUBLIC_HEADER_FILES opm/simulators/utils/gatherDeferredLogger.hpp opm/simulators/utils/moduleVersion.hpp opm/simulators/utils/ParallelEclipseState.hpp + opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp opm/simulators/utils/ParallelRestart.hpp opm/simulators/utils/PropsDataHandle.hpp opm/simulators/utils/SerializationPackers.hpp diff --git a/opm/simulators/utils/ParallelNLDDPartitioningZoltan.cpp b/opm/simulators/utils/ParallelNLDDPartitioningZoltan.cpp new file mode 100644 index 000000000..133c4d28b --- /dev/null +++ b/opm/simulators/utils/ParallelNLDDPartitioningZoltan.cpp @@ -0,0 +1,607 @@ +/* + Copyright 2023 Equinor ASA + + This file is part of the Open Porous Media Project (OPM). + + OPM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OPM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with OPM. If not, see . +*/ + +#include + +#include + +#include + +#include +#include + +// Note: The build system guarantees that we're only built in configurations +// that have both MPI and Zoltan. Zoltan.h redefines 'HAVE_MPI', so let it +// do so, but restore a simple definition afterwards. +#undef HAVE_MPI +#include +#undef HAVE_MPI +#define HAVE_MPI + +#include +#include +#include +#include +#include +#include +#include + +namespace { + + /// Assign Zoltan control parameters. + /// + /// Built-in defaults possibly overriden by user-selected values. + /// + /// \param[in] params User-selected Zoltan control parameters. + /// + /// \param[in,out] zz Opaque Zoltan context. + void setZoltanParameters(const Opm::ParallelNLDDPartitioningZoltan::ZoltanParamMap& params, + Zoltan_Struct* zz) + { + Zoltan_Set_Param(zz, "DEBUG_LEVEL", "0"); + Zoltan_Set_Param(zz, "LB_METHOD", "GRAPH"); + Zoltan_Set_Param(zz, "LB_APPROACH", "PARTITION"); + Zoltan_Set_Param(zz, "NUM_GID_ENTRIES", "1"); + Zoltan_Set_Param(zz, "NUM_LID_ENTRIES", "1"); + Zoltan_Set_Param(zz, "RETURN_LISTS", "EXPORT"); // Self to "other" + Zoltan_Set_Param(zz, "CHECK_GRAPH", "2"); + Zoltan_Set_Param(zz, "EDGE_WEIGHT_DIM", "0"); + Zoltan_Set_Param(zz, "OBJ_WEIGHT_DIM", "0"); + Zoltan_Set_Param(zz, "PHG_EDGE_SIZE_THRESHOLD", "0.35"); // 0-remove all, 1-remove none + + for (const auto& [ param, value ] : params) { + Zoltan_Set_Param(zz, param.c_str(), value.c_str()); + } + } + + /// Create consecutive numbering of reachable vertices + class EnumerateSeenVertices + { + public: + /// Constructor. + /// + /// \tparam Edge Type for representing a directed edge between a + /// pair of vertices. Must support \code std::get<>() \endcode + /// protocol. Typically a \code std::pair<> \endcode of integral + /// types. + /// + /// \param[in] numVertices Maximum number of vertices in graph. + /// + /// \param[in] edges Edge representation of connectivity graph. + template + explicit EnumerateSeenVertices(const std::size_t numVertices, + const std::vector& edges) + : index_(numVertices, -1) + { + auto seen = std::vector(numVertices, false); + + for (const auto& [v1, v2] : edges) { + seen[v1] = true; + seen[v2] = true; + } + + for (auto vertex = 0*numVertices; vertex < numVertices; ++vertex) { + if (seen[vertex]) { + this->index_[vertex] = this->numSeenVertices_++; + } + } + } + + /// Retrieve number of reachable vertices. Less than or equal to \p + /// numVertices. + std::size_t numVertices() const + { + return this->numSeenVertices_; + } + + /// Retrieve reachable index of vertex. + /// + /// \param[in] vertex Vertex ID in original numbering + /// + /// \return Reachable index of \p vertex. -1 if \p vertex is not + /// reachable. + int operator[](const std::size_t vertex) const + { + return this->index_[vertex]; + } + + private: + /// Indices of reachable vertices. + std::vector index_{}; + + /// Number of reachable vertices. One more than the maximum value + /// in \c index_. + int numSeenVertices_{0}; + }; + + /// Unstructured graph of reachable vertices + class VertexGraph + { + public: + /// Constructor. + /// + /// \tparam Edge Type for representing a directed edge between a + /// pair of vertices. Must support \code std::get<>() \endcode + /// protocol. Typically a \code std::pair<> \endcode of integral + /// types. + /// + /// \tparam GlobalCellID Callback type for mapping a vertex ID to a + /// globally unique ID. Typically the same as + /// \code ParallelNLDDPartitioningZoltan::GlobalCellID \endcode. + /// + /// \param[in] myRank Current rank in MPI communicator. Needed by + /// Zoltan. + /// + /// \param[in] numVertices Maximum number of vertices in graph. + /// + /// \param[in] edges Edge representation of connectivity graph. + /// + /// \param[in] vertexId Enumeration of reachable vertices. + /// + /// \param[in] globalCell Callback for mapping (local) vertex IDs to + /// globally unique vertex IDs. + template + explicit VertexGraph(const int myRank, + const std::size_t numVertices, + const std::vector& edges, + const EnumerateSeenVertices& vertexId, + GlobalCellID&& globalCell) + : myRank_ { myRank } + { + // Form undirected connectivity graph. + for (const auto& [v1, v2] : edges) { + this->graph_.addConnection(vertexId[v1], vertexId[v2]); + this->graph_.addConnection(vertexId[v2], vertexId[v1]); + } + + this->graph_.compress(vertexId.numVertices()); + + // Form local-to-global vertex ID mapping for reachable vertices. + this->globalCell_.resize(vertexId.numVertices()); + for (auto vertex = 0*numVertices; vertex < numVertices; ++vertex) { + if (const auto localIx = vertexId[vertex]; localIx >= 0) { + this->globalCell_[localIx] = globalCell(vertex); + } + } + } + + /// Retrive my rank in current MPI communicator. + /// + /// Needed by Zoltan. + int myRank() const + { + return this->myRank_; + } + + /// Retrieve number of vertices in connectivity graph. + int numVertices() const + { + return static_cast(this->graph_.numVertices()); + } + + /// Retrieve globally unqique ID of reachable vertex. + /// + /// \param[in] localCell Index of locally reachable cell/vertex. + int globalId(const int localCell) const + { + return this->globalCell_[localCell]; + } + + /// Get read-only access to start pointers of graph's CSR representation. + /// + /// \return Reference to start pointers (IA array). + decltype(auto) startPointers() const + { + return this->graph_.startPointers(); + } + + /// Get read-only access to column indices of graph's CSR representation. + /// + /// \return Reference to column indices (JA array). + decltype(auto) columnIndices() const + { + return this->graph_.columnIndices(); + } + + private: + // VertexID = int, TrackCompressedIdx = false + using Backend = Opm::utility::CSRGraphFromCoordinates<>; + + /// My rank in current MPI communicator. + int myRank_{}; + + /// Globally unique vertex ID of each locally reachable vertex. + std::vector globalCell_{}; + + /// Vertex connectivity graph. + Backend graph_{}; + }; + +// Use C linkage for Zoltan interface/query functions. Ensures maximum compatibility. +extern "C" { + /// Compute number of vertices in connectivity graph. + /// + /// Callback for Zoltan_Set_Num_Obj_Fn. + /// + /// \param[in] graphPtr Opaque user data. Assumed to point to a + /// \c VertexGraph instance. + /// + /// \param[out] ierr Error code for Zoltan consumption. Single \c int. + int numVertices(void* graphPtr, int* ierr) + { + *ierr = ZOLTAN_OK; + return static_cast(graphPtr)->numVertices(); + } + + /// Compute number of neighbours for each vertex in connectivity graph. + /// + /// Callback for Zoltan_Set_Num_Edges_Multi_Fn. + /// + /// \param[in] graphPtr Opaque user data. Assumed to point to a + /// \c VertexGraph instance. + /// + /// \param[in] numCells Number of cells/vertices/objects for which to + /// compute number of neighbours. + /// + /// \param[in] localID Local IDs of those cells/vertices/objects for + /// which to compute the respective number of neighbours. + /// + /// \param[in,out] numEdges Number of neighbours (in/out edges) for each + /// cell/vertex/object. Size equal to \code numVertices(graphPtr) + /// \endcode. Populated by this function. Allocated by Zoltan. + /// + /// \param[out] ierr Error code for Zoltan consumption. Single \c int. + void numEdges(void* graphPtr, + const int /* sizeGID */, + const int /* sizeLID */, + const int numCells, + ZOLTAN_ID_PTR /* globalID */, + ZOLTAN_ID_PTR localID, + int* numEdges, + int* ierr) + { + const auto& ia = static_cast(graphPtr)->startPointers(); + + for (auto cell = 0*numCells; cell < numCells; ++cell) { + const auto localCell = localID[cell]; + numEdges[cell] = ia[localCell + 1] - ia[localCell + 0]; + } + + *ierr = ZOLTAN_OK; + } + + /// Identify objects/vertices in connectivity graph. + /// + /// Callback for Zoltan_Set_Obj_List_Fn. + /// + /// \param[in] graphPtr Opaque user data. Assumed to point to a + /// \c VertexGraph instance. + /// + /// \param[in] numElmsPerGid Number of ZOLTAN_ID_TYPE elements needed to + /// describe a single globally unique object/vertex ID. Must be one + /// (1) in this implementation. + /// + /// \param[in] numElmsPerLid Number of ZOLTAN_ID_TYPE elements needed to + /// describe a single local object/vertex ID. Must be one (1) in this + /// implementation. + /// + /// \param[in,out] globalIds Globally unique object/vertex IDs. Size + /// equal to \code numElmsPerGid * numVertices(graphPtr) \endcode. + /// Populated by this function. Allocated by Zoltan. + /// + /// \param[in,out] localIds Local object/vertex IDs. Size equal to + /// \code numElmsPerLid * numVertices(graphPtr) \endcode. Populated + /// by this function. Allocated by Zoltan. + /// + /// \param[out] ierr Error code for Zoltan consumption. Single \c int. + void vertexList(void* graphPtr, + const int numElmsPerGid, + const int numElmsPerLid, + ZOLTAN_ID_PTR globalIds, + ZOLTAN_ID_PTR localIds, + const int /* wgtDim */, + float* /* objWgts */, + int* ierr) + { + if ((numElmsPerGid != numElmsPerLid) || (numElmsPerLid != 1)) { + *ierr = ZOLTAN_FATAL; + } + + const auto* graph = static_cast(graphPtr); + + if (graph->numVertices() == 0) { + *ierr = ZOLTAN_OK; + return; + } + + std::iota(&localIds[0], &localIds[0] + graph->numVertices(), ZOLTAN_ID_TYPE{0}); + std::transform(&localIds[0], + &localIds[0] + graph->numVertices(), + &globalIds[0], + [graph](const int localCell) -> ZOLTAN_ID_TYPE + { + return graph->globalId(localCell); + }); + + *ierr = ZOLTAN_OK; + } + + /// Identify neighbours in connectivity graph. + /// + /// Callback for Zoltan_Set_Edge_List_Multi_Fn + /// + /// \param[in] graphPtr Opaque user data. Assumed to point to a + /// \c VertexGraph instance. + /// + /// \param[in] numElmsPerGid Number of ZOLTAN_ID_TYPE elements needed to + /// describe a single globally unique object/vertex ID. Must be one + /// (1) in this implementation. + /// + /// \param[in] numElmsPerLid Number of ZOLTAN_ID_TYPE elements needed to + /// describe a single local object/vertex ID. Must be one (1) in this + /// implementation. + /// + /// \param[in] numCells Number of cells/vertices/objects for which to + /// identify the neighbouring cells/vertices/objects. + /// + /// \param[in] localIds Local object/vertex IDs. Size equal to \code + /// numElmsPerLid * numVertices(graphPtr) \endcode. + /// + /// \param[in,out] neighbourGid Globally unique object ID of each + /// neighbour cell/vertex/object. Populated by this function. + /// Allocated by Zoltan. + /// + /// \param[in,out] neighbourProc Owner of each neighbouring + /// cell/vertex/object. Populated by this function. Allocated by + /// Zoltan. + /// + /// \param[out] ierr Error code for Zoltan consumption. Single \c int. + void edgeList(void* graphPtr, + const int numElmsPerGid, + const int numElmsPerLid, + const int numCells, + ZOLTAN_ID_PTR /* globalIds */, + ZOLTAN_ID_PTR localIds, + int* /* numEdges */, + ZOLTAN_ID_PTR neighbourGid, + int* neighbourProc, + int /* weightDim */, + float* /* edgeWeights */, + int* ierr) + { + const auto* graph = static_cast(graphPtr); + + if ((numElmsPerGid != numElmsPerLid) || + (numElmsPerLid != 1) || + (numCells != graph->numVertices())) + { + *ierr = ZOLTAN_FATAL; + } + + if (graph->numVertices() == 0) { + *ierr = ZOLTAN_OK; + return; + } + + const auto& ia = graph->startPointers(); + const auto& ja = graph->columnIndices(); + + for (auto cell = 0*numCells, ix = 0; cell < numCells; ++cell) { + const auto localCell = localIds[cell]; + for (auto neighIx = ia[localCell], end = ia[localCell + 1]; + neighIx != end; ++neighIx, ++ix) + { + neighbourGid [ix] = static_cast(graph->globalId(ja[neighIx])); + neighbourProc[ix] = graph->myRank(); + } + } + } +} // extern "C" + + /// Partition VertexGraph objects using Zoltan graph partitioning software. + class Partitioner + { + public: + /// Constructor. + /// + /// \param[in] comm MPI communicator. + /// + /// \param[in] params Control parameters for Zoltan graph partiitioning procedure. + explicit Partitioner(const Opm::Parallel::Communication comm, + const Opm::ParallelNLDDPartitioningZoltan::ZoltanParamMap& params); + + /// Destructor. + /// + /// Destroys Zoltan context. + ~Partitioner(); + + /// Partition VertexGraph instance. + /// + /// \param[in] graphPtr Graph being partitioned. Assumed to point + /// to a \c VertexGraph instance that is fully populated by caller. + /// + /// \param[in] numCells Number of vertices in graph pointed to by \p + /// graphPtr. Used to size the resulting partition vector. + /// + /// \return Partition vector. Non-negative block IDs for each of + /// the \p numCells cells/vertices/objects in \p graphPtr. + std::vector operator()(void* graphPtr, const std::size_t numCells); + + private: + /// Helper RAII type to manage partition ("part") memory allocated + /// by Zoltan. + struct ZoltanPart + { + /// Globally unique cell/vertex/object IDs. + ZOLTAN_ID_PTR globalId{nullptr}; + + /// Local cell/vertex/object IDs. + ZOLTAN_ID_PTR localId{nullptr}; + + /// Owning process for each cell/vertex/object. + int* process{nullptr}; + + /// Block/domain to which each cell/vertex/object is assigned. + int* block{nullptr}; + + /// Destructor. + /// + /// Releases partition ("part") memory acquired by Zoltan. + ~ZoltanPart(); + }; + + /// Zoltan library context. + Zoltan_Struct* zz_{nullptr}; + }; + + Partitioner::ZoltanPart::~ZoltanPart() + { + Zoltan_LB_Free_Part(&this->globalId, + &this->localId, + &this->process, + &this->block); + } + + Partitioner::Partitioner(const Opm::Parallel::Communication comm, + const Opm::ParallelNLDDPartitioningZoltan::ZoltanParamMap& params) + { + const auto argc = 0; + char* argv[] = { nullptr }; + auto ver = 0.0f; + + const auto rc = Zoltan_Initialize(argc, argv, &ver); + if (rc != ZOLTAN_OK) { + OPM_THROW(std::runtime_error, "Unable to Initialise Zoltan"); + } + + this->zz_ = Zoltan_Create(comm); + setZoltanParameters(params, this->zz_); + } + + Partitioner::~Partitioner() + { + Zoltan_Destroy(&this->zz_); + } + + std::vector + Partitioner::operator()(void* graphPtr, const std::size_t numCells) + { + Zoltan_Set_Num_Obj_Fn (this->zz_, &numVertices, graphPtr); + Zoltan_Set_Num_Edges_Multi_Fn(this->zz_, &numEdges , graphPtr); + + Zoltan_Set_Obj_List_Fn (this->zz_, &vertexList, graphPtr); + Zoltan_Set_Edge_List_Multi_Fn(this->zz_, &edgeList , graphPtr); + + auto send = ZoltanPart{}; // Objects to export/send to "other" processes/parts + auto recv = ZoltanPart{}; // Objects to import/receive from "other" processes/parts + + auto partitionChanged = 0; + auto numElmPerGid = 1; + auto numElmPerLid = 1; + auto numRecv = 0; + auto numSend = 0; + const auto rc = Zoltan_LB_Partition + (this->zz_, + &partitionChanged, + &numElmPerGid, &numElmPerLid, + &numRecv, &recv.globalId, &recv.localId, &recv.process, &recv.block, + &numSend, &send.globalId, &send.localId, &send.process, &send.block); + + if (rc != ZOLTAN_OK) { + OPM_THROW(std::runtime_error, "Failed to Partition Domain Graph"); + } + + auto blocks = std::vector(numCells, 0); + for (auto e = 0*numSend; e < numSend; ++e) { + blocks[send.localId[e]] = send.block[e]; + } + + return blocks; + } + + void forceSameDomain(const std::vector& cells, + std::vector& parts) + { + if (cells.empty()) { return; } + + const auto first = parts[cells.front()]; + for (auto& cell : cells) { + parts[cell] = first; + } + } + + std::vector compressPartitionIDs(std::vector&& parts0) + { + auto partition = std::move(parts0); + + if (! partition.empty()) { + auto mmPos = std::minmax_element(partition.begin(), partition.end()); + auto seen = std::vector(*mmPos.second - *mmPos.first + 1, false); + for (const auto& domain : partition) { + seen[domain - *mmPos.first] = domain >= 0; + } + + auto num_domains = 0; + auto compressed = std::vector(*mmPos.second - *mmPos.first + 1, -1); + for (auto i = 0*compressed.size(); i < compressed.size(); ++i) { + if (seen[i]) { + compressed[i] = num_domains++; + } + } + + for (auto& domain : partition) { + if (domain >= 0) { + domain = compressed[domain - *mmPos.first]; + } + } + } + + return partition; + } + +} // Anonymous namespace + +std::vector +Opm::ParallelNLDDPartitioningZoltan::partitionElements(const ZoltanParamMap& params) const +{ + const auto vertexId = EnumerateSeenVertices { this->numElements_, this->conns_ }; + + auto graph = VertexGraph { + this->comm_.rank(), this->numElements_, + this->conns_, vertexId, this->globalCell_ + }; + + const auto partsForReachableCells = Partitioner { + this->comm_, params + }(static_cast(&graph), graph.numVertices()); + + // Map reachable cells back to full cell numbering. + auto parts = std::vector(this->numElements_, -1); + for (auto elm = 0*this->numElements_; elm < this->numElements_; ++elm) { + if (const auto reachableElmIx = vertexId[elm]; reachableElmIx >= 0) { + parts[elm] = partsForReachableCells[reachableElmIx]; + } + } + + for (const auto& cells : this->sameDomain_) { + ::forceSameDomain(cells, parts); + } + + return compressPartitionIDs(std::move(parts)); +} diff --git a/opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp b/opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp new file mode 100644 index 000000000..697a21e90 --- /dev/null +++ b/opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp @@ -0,0 +1,129 @@ +/* + Copyright 2023 Equinor ASA + + This file is part of the Open Porous Media Project (OPM). + + OPM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OPM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with OPM. If not, see . +*/ + +#ifndef OPM_UTILITY_PARALLEL_NLDD_PARTITIONING_ZOLTAN_HPP +#define OPM_UTILITY_PARALLEL_NLDD_PARTITIONING_ZOLTAN_HPP + +#include + +#include +#include +#include +#include +#include +#include + +namespace Opm { + + /// Partition connectivity graph into non-overlapping domains using the + /// Zoltan graph partitioning software package. Primarily intended for + /// use in NLDD-based non-linear solves. + class ParallelNLDDPartitioningZoltan + { + public: + /// Callback type for mapping a vertex/cell ID to a globally unique + /// ID. + using GlobalCellID = std::function; + + /// Collection of parameters to control the partitioning procedure. + using ZoltanParamMap = std::map; + + /// Constructor. + /// + /// \param[in] comm MPI communication object. Needed by Zoltan. + /// + /// \param[in] numElements Number of potential vertices in + /// connectivity graph. Typically the total number of cells on + /// the current rank, i.e., both owned cells and overlap cells. + /// + /// \param[in] globalCell Callback for mapping (local) vertex IDs to + /// globally unique vertex IDs. + explicit ParallelNLDDPartitioningZoltan(const Parallel::Communication comm, + const std::size_t numElements, + const GlobalCellID& globalCell) + : comm_ { comm } + , numElements_ { numElements } + , globalCell_ { globalCell } + {} + + /// Insert directed graph edge between two vertices. + /// + /// \param[in] c1 Source vertex. + /// + /// \param]in] c2 Sink/destination vertex. + void registerConnection(std::size_t c1, std::size_t c2) + { + this->conns_.emplace_back(c1, c2); + } + + /// Force collection of cells to be in same result domain. + /// + /// Mostly as a way to ensuring wells do not intersect multiple + /// domains/blocks. + /// + /// \param[in] cells Cell collection. Typically those cells which + /// are intersected by a single well. + void forceSameDomain(std::vector&& cells) + { + this->sameDomain_.emplace_back(std::move(cells)); + } + + /// Partition connectivity graph using Zoltan graph partitioning + /// package. + /// + /// Honours any prescribed requirement that certain cells be placed + /// in a single domain/block. + /// + /// \param[in] params Parameters for Zoltan. Override default + /// settings. + /// + /// \return Partition vector of size \p numElements. Reachable + /// vertices/cells are partitioned into N blocks numbered 0..N-1. + /// Unreachable vertices get a block ID of -1. + std::vector + partitionElements(const ZoltanParamMap& params) const; + + private: + /// Connection/graph edge. + using Connection = std::pair; + + /// MPI communication object. Needed by Zoltan. + Parallel::Communication comm_{}; + + /// Maximum number of vertices in connectivity graph. Needed to + /// size partition vector return value. + std::size_t numElements_{}; + + /// Callback for mapping (local) vertex/cell IDs to globally unique + /// vertex/cell IDs. + GlobalCellID globalCell_{}; + + /// Connectivity graph edges. + std::vector conns_{}; + + /// Collections of vertices/cells which must be coalesced to the + /// same domain/block. All vertices within a single collection will + /// be placed on the same domain, but cells from different + /// collections may be placed on different domains. + std::vector> sameDomain_{}; + }; + +} // namespace Opm + +#endif // OPM_UTILITY_PARALLEL_NLDD_PARTITIONING_ZOLTAN_HPP From ed77c90238677c803ce14d1a9181d8702ac0b9b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?B=C3=A5rd=20Skaflestad?= Date: Tue, 12 Sep 2023 16:52:37 +0200 Subject: [PATCH 3/3] Make Zoltan-Based Domain Partitioning MPI Aware This commit switches the current implementation of 'partitionCellsZoltan()', i.e., 'partitionCells("zoltan", ...)' into using the MPI-aware ParallelNLDDPartitioningZoltan utility. In doing so we make 'partitionCellsZoltan()' private since its availability is not guaranteed. We also slightly reorder the parameters and switch from passing a "Grid" into passing a "GridView" as an argument to partitionCells(), and specialise this function for the known grid views in OPM Flow. We extract the Zoltan-related parameters out to an Entity-dependent helper structure and move the complexity of forming this type to a new helper function, BlackoilModelEbosNldd::partitionCells(). --- opm/simulators/flow/BlackoilModelEbosNldd.hpp | 49 +- opm/simulators/flow/partitionCells.cpp | 456 +++++++++++++++--- opm/simulators/flow/partitionCells.hpp | 80 ++- .../utils/ParallelNLDDPartitioningZoltan.cpp | 4 +- .../utils/ParallelNLDDPartitioningZoltan.hpp | 2 +- 5 files changed, 487 insertions(+), 104 deletions(-) diff --git a/opm/simulators/flow/BlackoilModelEbosNldd.hpp b/opm/simulators/flow/BlackoilModelEbosNldd.hpp index 2f7383f29..ea82c60cc 100644 --- a/opm/simulators/flow/BlackoilModelEbosNldd.hpp +++ b/opm/simulators/flow/BlackoilModelEbosNldd.hpp @@ -54,12 +54,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include #include @@ -93,16 +95,8 @@ public: BlackoilModelEbosNldd(BlackoilModelEbos& model) : model_(model) { - const auto& grid = model_.ebosSimulator().vanguard().grid(); - const auto& schedule = model_.ebosSimulator().vanguard().schedule(); - // Create partitions. - const auto& [partition_vector, num_domains] = - partitionCells(grid, - schedule.getWellsatEnd(), - model_.param().local_domain_partition_method_, - model_.param().num_local_domains_, - model_.param().local_domain_partition_imbalance_); + const auto& [partition_vector, num_domains] = this->partitionCells(); // Scan through partitioning to get correct size for each. std::vector sizes(num_domains, 0); @@ -121,6 +115,8 @@ public: // Iterate through grid once, setting the seeds of all partitions. // Note: owned cells only! + const auto& grid = model_.ebosSimulator().vanguard().grid(); + std::vector count(num_domains, 0); const auto& gridView = grid.leafGridView(); const auto beg = gridView.template begin<0, Dune::Interior_Partition>(); @@ -835,6 +831,41 @@ private: return errorPV; } + decltype(auto) partitionCells() const + { + const auto& grid = this->model_.ebosSimulator().vanguard().grid(); + + using GridView = std::remove_cv_t>; + using Element = std::remove_cv_t::Entity>>; + + const auto& param = this->model_.param(); + + auto zoltan_ctrl = ZoltanPartitioningControl{}; + zoltan_ctrl.domain_imbalance = param.local_domain_partition_imbalance_; + zoltan_ctrl.index = + [elementMapper = &this->model_.ebosSimulator().model().elementMapper()] + (const Element& element) + { + return elementMapper->index(element); + }; + zoltan_ctrl.local_to_global = + [cartMapper = &this->model_.ebosSimulator().vanguard().cartesianIndexMapper()] + (const int elemIdx) + { + return cartMapper->cartesianIndex(elemIdx); + }; + + // Forming the list of wells is expensive, so do this only if needed. + const auto need_wells = param.local_domain_partition_method_ == "zoltan"; + const auto wells = need_wells + ? this->model_.ebosSimulator().vanguard().schedule().getWellsatEnd() + : std::vector{}; + + return ::Opm::partitionCells(param.local_domain_partition_method_, + param.num_local_domains_, + grid.leafGridView(), wells, zoltan_ctrl); + } + BlackoilModelEbos& model_; //!< Reference to model std::vector domains_; //!< Vector of subdomains std::vector> domain_matrices_; //!< Vector of matrix operator for each subdomain diff --git a/opm/simulators/flow/partitionCells.cpp b/opm/simulators/flow/partitionCells.cpp index 07845c5de..41ae00dec 100644 --- a/opm/simulators/flow/partitionCells.cpp +++ b/opm/simulators/flow/partitionCells.cpp @@ -18,15 +18,26 @@ */ #include + #include +#include + +#if HAVE_MPI && HAVE_ZOLTAN +#include +#include +#include +#include +#include + +#include +#include +#include +#endif // HAVE_MPI && HAVE_ZOLTAN + #include #include -#include - -#include - #if HAVE_DUNE_ALUGRID #include #endif // HAVE_DUNE_ALUGRID @@ -40,68 +51,351 @@ #include #include #include +#include #include +#include +#include +#include + +// =========================================================================== +// Zoltan-based partitioning if available in this build configuration. +// --------------------------------------------------------------------------- + +#if HAVE_MPI && HAVE_ZOLTAN namespace { -std::pair, int> -countDomains(std::vector partition_vector) +/// Partition rank's interior cells into non-overlapping domains using the +/// Zoltan graph partitioning software package. +class ZoltanPartitioner { - auto maxPos = std::max_element(partition_vector.begin(), - partition_vector.end()); +public: + /// Constructor. + /// + /// \tparam GridView DUNE grid view type + /// + /// \param[in] grid_view Current rank's reachable cells. + /// + /// \param[in] local_to_global Callback for mapping rank's local cells + /// to globally unique cell IDs. May for instance return the + /// Cartesian cell ID of each local cell. + template + explicit ZoltanPartitioner(const GridView& grid_view, + const Opm::ParallelNLDDPartitioningZoltan::GlobalCellID& local_to_global); - const auto num_domains = (maxPos == partition_vector.end()) - ? 0 : *maxPos + 1; + /// Form internal connectivity graph between rank's reachable, interior + /// cells. + /// + /// \tparam GridView DUNE grid view type + /// + /// \tparam Element Grid view's representation of a cell. + /// + /// \param[in] grid_view Current rank's reachable cells. + /// + /// \param[in] wells Collection of wells to which this rank's partition + /// vector must be adapted. Wells must not intersect multiple local + /// domains/blocks. + /// + /// \param[in] zoltan_ctrl Control parameters for on-rank subdomain + /// partitioning. + template + void buildLocalGraph(const GridView& grid_view, + const std::vector& wells, + const Opm::ZoltanPartitioningControl& zoltan_ctrl); - return { std::move(partition_vector), num_domains }; + /// Partition rank's interior cells into non-overlapping domains using + /// the Zoltan graph partitioning software package. + /// + /// \tparam GridView DUNE grid view type + /// + /// \tparam Element Grid view's representation of a cell. + /// + /// \param[in] num_local_domains Target number of domains into which the + /// full model will be partitioned. Note: This number is treated as a + /// global parameter across all MPI ranks and is not used as a number + /// of domains per rank. The final number of model subdomains may + /// differ from this number when adapting to well constraints and if, + /// for whatever reason, Zoltan happens to generate a global + /// partitioning for which one domain/block happens to cross process + /// boundaries. + /// + /// \param[in] grid_view Current rank's reachable cells. + /// + /// \param[in] zoltan_ctrl Control parameters for on-rank subdomain + /// partitioning. + /// + /// \return Partition vector for rank's interior cells and number of + /// unique domains across those interior cells. + template + std::pair, int> + partition(const int num_local_domains, + const GridView& grid_view, + const Opm::ZoltanPartitioningControl& zoltan_ctrl) const; + +private: + /// Zoltan partitioning backend. + Opm::ParallelNLDDPartitioningZoltan partitioner_; + + /// Form internal connectivity graph between rank's reachable, interior + /// cells. + /// + /// \tparam GridView DUNE grid view type + /// + /// \tparam Element Grid view's representation of a cell. + /// + /// \param[in] grid_view Current rank's reachable cells. + /// + /// \param[in] zoltan_ctrl Control parameters for on-rank subdomain + /// partitioning. + /// + /// \return Mapping from globally unique cell IDs to local, on-rank + /// active cell IDs. Effectively the cached result of \code + /// zoltan_ctrl.index() \endcode for each globally unique cell ID. + template + std::unordered_map + connectElements(const GridView& grid_view, + const Opm::ZoltanPartitioningControl& zoltan_ctrl); + + /// Adapt internal connectivity graph to account for connections induced + /// by well reservoir connections. + /// + /// \tparam Comm MPI communication type. + /// + /// \param[in] comm MPI communication object. Needed to get a global + /// view of any exceptions thrown in the implementation. + /// + /// \param[in] wells Collection of wells to which this rank's partition + /// vector must be adapted. Wells must not intersect multiple local + /// domains/blocks. + /// + /// \param[in] g2l Mapping from globally unique cell IDs to local, + /// on-rank active cell IDs. Return value from \c connectElements(). + template + void connectWells(const Comm comm, + const std::vector& wells, + const std::unordered_map& g2l); +}; + +// Note: "grid_view.size(0)" is intentional here. It is not an error. The +// ParallelNLDDPartitioningZoltan type will internally enumerate interior +// cells in a consecutive sequence, regardless of the ordering in the +// grid_view object. +template +ZoltanPartitioner::ZoltanPartitioner(const GridView& grid_view, + const Opm::ParallelNLDDPartitioningZoltan::GlobalCellID& local_to_global) + : partitioner_(grid_view.comm(), grid_view.size(0), local_to_global) +{} + +template +void ZoltanPartitioner::buildLocalGraph(const GridView& grid_view, + const std::vector& wells, + const Opm::ZoltanPartitioningControl& zoltan_ctrl) +{ + this->connectWells(grid_view.comm(), wells, this->connectElements(grid_view, zoltan_ctrl)); } -template -struct HasZoltanPartitioning : public std::false_type {}; +template +std::pair, int> +ZoltanPartitioner::partition(const int num_local_domains, + const GridView& grid_view, + const Opm::ZoltanPartitioningControl& zoltan_ctrl) const +{ + auto partition = std::pair, int>{}; -template -struct HasZoltanPartitioning< - GridType, - std::void_t().zoltanPartitionWithoutScatter - (std::declval*>(), - std::declval(), - std::declval(), - std::declval()))> - > : public std::true_type {}; + auto param = Opm::setupZoltanParams("graph"); + param.insert_or_assign("NUM_GLOBAL_PARTS", fmt::format("{}", num_local_domains)); + param.insert_or_assign("IMBALANCE_TOL" , fmt::format("{}", zoltan_ctrl.domain_imbalance)); -} // anonymous namespace + const auto domainsAll = this->partitioner_.partitionElements(param); -namespace Opm { + auto& [domains, num_domains] = partition; + domains.reserve(domainsAll.size()); + num_domains = 0; -template -std::pair, int> partitionCells(const Grid& grid, - const std::vector& wells, - const std::string& method, - const int num_local_domains, - const double partition_imbalance) + for (const auto& cell : elements(grid_view, Dune::Partitions::interior)) { + const auto domainId = domains.emplace_back(domainsAll[zoltan_ctrl.index(cell)]); + + if (domainId >= num_domains) { + num_domains = domainId + 1; + } + } + + // Fix-up for an extreme case: Interior cells whose neighbours are all + // on a different rank--i.e., interior cells which do not have on-rank + // neighbours. Make each such cell be a separate domain on this rank. + // Don't remove this code unless you've taken remedial action elsewhere + // to ensure the situation doesn't happen. For what it's worth, we've + // seen this case occur in practice when testing on field cases. + for (auto& domainId : domains) { + if (domainId < 0) { + domainId = num_domains++; + } + } + + return partition; +} + +template +std::unordered_map +ZoltanPartitioner::connectElements(const GridView& grid_view, + const Opm::ZoltanPartitioningControl& zoltan_ctrl) +{ + auto g2l = std::unordered_map{}; + + for (const auto& element : elements(grid_view, Dune::Partitions::interior)) { + { + const auto c = zoltan_ctrl.index(element); + g2l.insert_or_assign(zoltan_ctrl.local_to_global(c), c); + } + + for (const auto& is : intersections(grid_view, element)) { + if (! is.neighbor()) { + continue; + } + + const auto& in = is.inside(); + const auto& out = is.outside(); + + if ((in .partitionType() != Dune::InteriorEntity) || + (out.partitionType() != Dune::InteriorEntity)) + { + // Connect cells in interior only. + continue; + } + + const auto c1 = zoltan_ctrl.index(in); + const auto c2 = zoltan_ctrl.index(out); + + this->partitioner_.registerConnection(c1, c2); + this->partitioner_.registerConnection(c2, c1); + } + } + + return g2l; +} + +template +void ZoltanPartitioner::connectWells(const Comm comm, + const std::vector& wells, + const std::unordered_map& g2l) +{ + auto distributedWells = 0; + + for (const auto& well : wells) { + auto cellIx = std::vector{}; + auto otherProc = 0; + + for (const auto& conn : well.getConnections()) { + auto locPos = g2l.find(conn.global_index()); + if (locPos == g2l.end()) { + ++otherProc; + continue; + } + + cellIx.push_back(locPos->second); + } + + if ((otherProc > 0) && !cellIx.empty()) { + ++distributedWells; + continue; + } + + const auto nc = cellIx.size(); + for (auto ic1 = 0*nc; ic1 < nc; ++ic1) { + for (auto ic2 = ic1 + 1; ic2 < nc; ++ic2) { + this->partitioner_.registerConnection(cellIx[ic1], cellIx[ic2]); + this->partitioner_.registerConnection(cellIx[ic2], cellIx[ic1]); + } + } + + if (! cellIx.empty()) { + // All cells intersected by a single well go in the same domain. + this->partitioner_.forceSameDomain(std::move(cellIx)); + } + } + + OPM_BEGIN_PARALLEL_TRY_CATCH() + + if (distributedWells > 0) { + const auto* pl = (distributedWells == 1) ? "" : "s"; + + throw std::invalid_argument { + fmt::format(R"({} distributed well{} detected on rank {}." +This is not supported in the current NLDD implementation.)", + distributedWells, pl, comm.rank()) + }; + } + + OPM_END_PARALLEL_TRY_CATCH(std::string { "ZoltanPartitioner::connectWells()" }, comm) +} + +template +std::pair, int> +partitionCellsZoltan(const int num_domains, + const GridView& grid_view, + const std::vector& wells, + const Opm::ZoltanPartitioningControl& zoltan_ctrl) +{ + if (num_domains <= 1) { // No partitioning => every cell in domain zero. + const auto num_interior_cells = + Opm::detail::countLocalInteriorCellsGridView(grid_view); + + return { + std::piecewise_construct, + std::forward_as_tuple(num_interior_cells, 0), + std::forward_as_tuple(1) + }; + } + + auto partitioner = ZoltanPartitioner { grid_view, zoltan_ctrl.local_to_global }; + partitioner.buildLocalGraph(grid_view, wells, zoltan_ctrl); + + return partitioner.partition(num_domains, grid_view, zoltan_ctrl); +} + +} // Anonymous namespace + +#endif // HAVE_MPI && HAVE_ZOLTAN + +// =========================================================================== + +template +std::pair, int> +Opm::partitionCells(const std::string& method, + const int num_local_domains, + const GridView& grid_view, + [[maybe_unused]] const std::vector& wells, + [[maybe_unused]] const ZoltanPartitioningControl& zoltan_ctrl) { if (method == "zoltan") { - if constexpr (HasZoltanPartitioning::value) { - return partitionCellsZoltan(grid, wells, num_local_domains, partition_imbalance); - } else { - OPM_THROW(std::runtime_error, "Zoltan requested for local domain partitioning, " - "but is not available for the current grid type."); - } - } else if (method == "simple") { - const int num_cells = detail::countLocalInteriorCells(grid); +#if HAVE_MPI && HAVE_ZOLTAN + + return partitionCellsZoltan(num_local_domains, grid_view, wells, zoltan_ctrl); + +#else // !HAVE_MPI || !HAVE_ZOLTAN + + OPM_THROW(std::runtime_error, "Zoltan requested for local domain partitioning, " + "but is not available in the current build configuration."); + +#endif // HAVE_MPI && HAVE_ZOLTAN + } + else if (method == "simple") { + const int num_cells = detail::countLocalInteriorCellsGridView(grid_view); return partitionCellsSimple(num_cells, num_local_domains); - } else if (method.size() > 10 && method.substr(method.size() - 10, 10) == ".partition") { + } + else if (method.size() > 10 && method.substr(method.size() - 10, 10) == ".partition") { // Method string ends with ".partition", interpret as filename for partitioning. - const int num_cells = detail::countLocalInteriorCells(grid); + const int num_cells = detail::countLocalInteriorCellsGridView(grid_view); return partitionCellsFromFile(method, num_cells); - } else { + } + else { OPM_THROW(std::runtime_error, "Unknown local domain partitioning method requested: " + method); } } - // Read from file, containing one number per cell. -std::pair, int> partitionCellsFromFile(const std::string& filename, const int num_cells) +std::pair, int> +Opm::partitionCellsFromFile(const std::string& filename, const int num_cells) { // Read file into single vector. std::ifstream is(filename); @@ -119,7 +413,8 @@ std::pair, int> partitionCellsFromFile(const std::string& filen // Trivially simple partitioner -std::pair, int> partitionCellsSimple(const int num_cells, const int num_domains) +std::pair, int> +Opm::partitionCellsSimple(const int num_cells, const int num_domains) { // Build the partitions. const int dom_sz = num_cells / num_domains; @@ -137,46 +432,53 @@ std::pair, int> partitionCellsSimple(const int num_cells, const return { part, num_domains }; } +// =========================================================================== -template -std::pair, int> partitionCellsZoltan(const Grid& grid, - const std::vector& wells, - const int num_domains, - const double domain_imbalance) -{ - auto partition_vector = grid.zoltanPartitionWithoutScatter - (&wells, nullptr, num_domains, domain_imbalance); +// --------------------------------------------------------------------------- +// Explicit specialisations/instantiations of partitionCells template. +// Deliberately placed at end of file. No other code beyond this separator. +// --------------------------------------------------------------------------- - return countDomains(std::move(partition_vector)); -} +#define InstantiatePartitionCells(Grid) \ + template std::pair, int> \ + Opm::partitionCells(const std::string&, \ + const int, \ + const std::remove_cv_t().leafGridView())>>&, \ + const std::vector&, \ + const Opm::ZoltanPartitioningControl< \ + typename std::remove_cv_t().leafGridView())>>::template Codim<0>::Entity>&) -template std::pair,int> -partitionCells(const Dune::CpGrid&, - const std::vector&, - const std::string&, - const int, - const double); +// --------------------------------------------------------------------------- +// Grid types built into Flow. +// --------------------------------------------------------------------------- +InstantiatePartitionCells(Dune::CpGrid); -template std::pair,int> -partitionCells>(const Dune::PolyhedralGrid<3,3,double>&, - const std::vector&, - const std::string&, - const int, - const double); +using PolyGrid = Dune::PolyhedralGrid<3, 3, double>; +InstantiatePartitionCells(PolyGrid); +// --------------------------------------------------------------------------- +// ALUGrid, if available. +// --------------------------------------------------------------------------- #if HAVE_DUNE_ALUGRID #if HAVE_MPI -using ALUGrid3CN = Dune::ALUGrid<3, 3, Dune::cube, Dune::nonconforming, Dune::ALUGridMPIComm>; +using ALUGridComm = Dune::ALUGridMPIComm; #else -using ALUGrid3CN = Dune::ALUGrid<3, 3, Dune::cube, Dune::nonconforming, Dune::ALUGridNoComm>; -#endif //HAVE_MPI +using ALUGridComm = Dune::ALUGridNoComm; +#endif // HAVE_MPI -template std::pair,int> -partitionCells(const ALUGrid3CN&, - const std::vector&, - const std::string&, - const int, - const double); -#endif +// --------------------------------------------------------------------------- +// Base case. +// --------------------------------------------------------------------------- +using BaseALUGrid = Dune::ALUGrid<3, 3, Dune::cube, Dune::nonconforming, ALUGridComm>; +InstantiatePartitionCells(BaseALUGrid); +#endif // HAVE_DUNE_ALUGRID -} // namespace Opm +// =========================================================================== + +#undef InstantiatePartitionCells + +// --------------------------------------------------------------------------- +// End of file. No statements below this separator. +// --------------------------------------------------------------------------- diff --git a/opm/simulators/flow/partitionCells.hpp b/opm/simulators/flow/partitionCells.hpp index 5dfd9b84a..1fee4f90c 100644 --- a/opm/simulators/flow/partitionCells.hpp +++ b/opm/simulators/flow/partitionCells.hpp @@ -20,6 +20,7 @@ #ifndef OPM_ASPINPARTITION_HEADER_INCLUDED #define OPM_ASPINPARTITION_HEADER_INCLUDED +#include #include #include #include @@ -28,14 +29,70 @@ namespace Opm { class Well; -/// Partitions the grid using the specified method. -/// \return pair containing a partition vector (partition number for each cell), and the number of partitions. -template -std::pair, int> partitionCells(const Grid& grid, - const std::vector& wells, - const std::string& method, - const int num_local_domains, - const double partition_imbalance); +} // namespace Opm + +namespace Opm { + +/// Control parameters for on-rank subdomain partitioning using Zoltan library. +/// +/// \tparam Element Grid view entity type +template +struct ZoltanPartitioningControl +{ + /// Partition imbalance, percentage units. Assumed to be the run's + /// value of ZoltanImbalanceTol. + double domain_imbalance{1.03}; + + /// Compute a locally unique, for this MPI process, ID of a local + /// cell/element/entity. + std::function index; + + /// Compute a globally unique, across all MPI processes, ID for a local + /// cell/element/entity. Might for instance return the cell's + /// linearised index + /// + /// i + nx*(j + ny*k) + /// + /// of the Cartesian cell (i,j,k). + std::function local_to_global; +}; + +/// Partition rank's interior cells. +/// +/// \param[in] method Partitioning method. Supported values are \c +/// "zoltan", \c "simple", or a filename with the extension \c +/// ".partition". The \c "zoltan" method invokes the Zoltan graph +/// partitioning package and requires both MPI and an active Zoltan +/// library. The \c "simple" method uses a one-dimensional load-balanced +/// approach, and the filename method will read a precomputed partition +/// vector from the named file. +/// +/// \param[in] num_local_domains Number of subdomains. Not used when +/// explicit partitioning is input from a file. +/// +/// \param[in] comm MPI Communication object for exchanging globally unique +/// cell IDs and for communication within the Zoltan library. Not used +/// unless \code method == "zoltan" \endcode. +/// +/// \param[in] grid_view View of rank's cells, both interior and overlap +/// cells. Not used unless \code method == "zoltan" \endcode. +/// +/// \param[in] wells Collection of simulation model wells. Not used unless +/// \code method == "zoltan" \endcode. +/// +/// \param[in] zoltan_ctrl Control parameters for local Zoltan-based +/// partitioning. Not used unless \code method == "zoltan" \endcode. +/// +/// \return Partition vector--subdomain ID for each cell in \p grid_view +/// traversal order for its interior cells--and the number of subdomains +/// on current rank. +template +std::pair, int> +partitionCells(const std::string& method, + const int num_local_domains, + const GridView& grid_view, + const std::vector& wells, + const ZoltanPartitioningControl& zoltan_ctrl); /// Read a partitioning from file, assumed to contain one number per cell, its partition number. /// \return pair containing a partition vector (partition number for each cell), and the number of partitions. @@ -45,13 +102,6 @@ std::pair, int> partitionCellsFromFile(const std::string& filen /// \return pair containing a partition vector (partition number for each cell), and the number of partitions. std::pair, int> partitionCellsSimple(const int num_cells, const int num_domains); -/// Partitions the grid using the Zoltan graph partitioner. -/// \return pair containing a partition vector (partition number for each cell), and the number of partitions. -template -std::pair, int> partitionCellsZoltan(const Grid& grid, - const std::vector& wells, - const int num_domains, - const double domain_imbalance); } // namespace Opm #endif // OPM_ASPINPARTITION_HEADER_INCLUDED diff --git a/opm/simulators/utils/ParallelNLDDPartitioningZoltan.cpp b/opm/simulators/utils/ParallelNLDDPartitioningZoltan.cpp index 133c4d28b..e897468eb 100644 --- a/opm/simulators/utils/ParallelNLDDPartitioningZoltan.cpp +++ b/opm/simulators/utils/ParallelNLDDPartitioningZoltan.cpp @@ -195,7 +195,7 @@ namespace { return static_cast(this->graph_.numVertices()); } - /// Retrieve globally unqique ID of reachable vertex. + /// Retrieve globally unique ID of reachable vertex. /// /// \param[in] localCell Index of locally reachable cell/vertex. int globalId(const int localCell) const @@ -422,7 +422,7 @@ extern "C" { /// /// \param[in] comm MPI communicator. /// - /// \param[in] params Control parameters for Zoltan graph partiitioning procedure. + /// \param[in] params Control parameters for Zoltan graph partitioning procedure. explicit Partitioner(const Opm::Parallel::Communication comm, const Opm::ParallelNLDDPartitioningZoltan::ZoltanParamMap& params); diff --git a/opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp b/opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp index 697a21e90..6cda24373 100644 --- a/opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp +++ b/opm/simulators/utils/ParallelNLDDPartitioningZoltan.hpp @@ -74,7 +74,7 @@ namespace Opm { /// Force collection of cells to be in same result domain. /// - /// Mostly as a way to ensuring wells do not intersect multiple + /// Mostly as a means to ensuring wells do not intersect multiple /// domains/blocks. /// /// \param[in] cells Cell collection. Typically those cells which