mirror of
https://github.com/OPM/opm-simulators.git
synced 2025-02-10 15:05:35 -06:00
Fix NLDD solver for parallel runs with the COMPDAT keyword inside an ACTIONX
This commit is contained in:
parent
7c8173d5a8
commit
87a46d7b01
@ -945,6 +945,9 @@ private:
|
||||
const auto wells = need_wells
|
||||
? this->model_.simulator().vanguard().schedule().getWellsatEnd()
|
||||
: std::vector<Well>{};
|
||||
const auto& possibleFutureConnectionSet = need_wells
|
||||
? this->model_.simulator().vanguard().schedule().getPossibleFutureConnections()
|
||||
: std::unordered_map<std::string, std::set<int>> {};
|
||||
|
||||
// If defaulted parameter for number of domains, choose a reasonable default.
|
||||
constexpr int default_cells_per_domain = 1000;
|
||||
@ -953,7 +956,6 @@ private:
|
||||
? param.num_local_domains_
|
||||
: num_cells / default_cells_per_domain;
|
||||
|
||||
const auto& possibleFutureConnectionSet = this->model_.simulator().vanguard().schedule().getPossibleFutureConnections();
|
||||
return ::Opm::partitionCells(param.local_domain_partition_method_,
|
||||
num_domains,
|
||||
grid.leafGridView(), wells, possibleFutureConnectionSet, zoltan_ctrl);
|
||||
|
@ -37,6 +37,10 @@
|
||||
#include <opm/input/eclipse/Schedule/Well/WellConnections.hpp>
|
||||
#endif // HAVE_MPI && HAVE_ZOLTAN
|
||||
|
||||
#if HAVE_MPI
|
||||
#include <opm/simulators/utils/MPISerializer.hpp>
|
||||
#endif
|
||||
|
||||
#include <opm/grid/CpGrid.hpp>
|
||||
#include <opm/grid/polyhedralgrid.hh>
|
||||
|
||||
@ -179,10 +183,10 @@ private:
|
||||
/// \param[in] g2l Mapping from globally unique cell IDs to local,
|
||||
/// on-rank active cell IDs. Return value from \c connectElements().
|
||||
template <typename Comm>
|
||||
void connectWells(const Comm comm,
|
||||
const std::vector<Opm::Well>& wells,
|
||||
const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections,
|
||||
const std::unordered_map<int, int>& g2l);
|
||||
void connectWells(const Comm comm,
|
||||
const std::vector<Opm::Well>& wells,
|
||||
std::unordered_map<std::string, std::set<int>> possibleFutureConnections,
|
||||
const std::unordered_map<int, int>& g2l);
|
||||
};
|
||||
|
||||
// Note: "grid_view.size(0)" is intentional here. It is not an error. The
|
||||
@ -285,13 +289,17 @@ ZoltanPartitioner::connectElements(const GridView&
|
||||
}
|
||||
|
||||
template <typename Comm>
|
||||
void ZoltanPartitioner::connectWells(const Comm comm,
|
||||
const std::vector<Opm::Well>& wells,
|
||||
const std::unordered_map<std::string, std::set<int>>& possibleFutureConnections,
|
||||
const std::unordered_map<int, int>& g2l)
|
||||
void ZoltanPartitioner::connectWells(const Comm comm,
|
||||
const std::vector<Opm::Well>& wells,
|
||||
std::unordered_map<std::string, std::set<int>> possibleFutureConnections,
|
||||
const std::unordered_map<int, int>& g2l)
|
||||
{
|
||||
auto distributedWells = 0;
|
||||
|
||||
// Communicate Map to other processes, since it is only available on rank 0
|
||||
Opm::Parallel::MpiSerializer ser(comm);
|
||||
ser.broadcast(possibleFutureConnections);
|
||||
|
||||
for (const auto& well : wells) {
|
||||
auto cellIx = std::vector<int>{};
|
||||
auto otherProc = 0;
|
||||
|
@ -194,8 +194,14 @@ namespace Opm {
|
||||
|
||||
// Create cartesian to compressed mapping
|
||||
const auto& schedule_wells = this->schedule().getWellsatEnd();
|
||||
const auto& possibleFutureConnections = this->schedule().getPossibleFutureConnections();
|
||||
auto possibleFutureConnections = this->schedule().getPossibleFutureConnections();
|
||||
|
||||
#if HAVE_MPI
|
||||
// Communicate Map to other processes, since it is only available on rank 0
|
||||
const auto& comm = this->simulator_.vanguard().grid().comm();
|
||||
Parallel::MpiSerializer ser(comm);
|
||||
ser.broadcast(possibleFutureConnections);
|
||||
#endif
|
||||
// initialize the additional cell connections introduced by wells.
|
||||
for (const auto& well : schedule_wells)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user