From 39969673446cfcb924b6bc95ce6aa5d765d9a63b Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Tue, 6 Oct 2020 14:52:44 +0200 Subject: [PATCH 01/13] Added a class with information and comunicator for parallel wells. BlackoilWellModel now stores an instance of this class for each well. Inside that class there is a custom communicator that only contains ranks that will have local cells perforated by the well. This will be used in the application of the distributed well operator. This is another small step in the direction of distributed wells, but it should be safe to merge this (note creation of the custom communicators is a collective operation in MPI but done only once). --- CMakeLists_files.cmake | 3 + opm/simulators/wells/BlackoilWellModel.hpp | 3 + .../wells/BlackoilWellModel_impl.hpp | 13 +- opm/simulators/wells/ParallelWellInfo.cpp | 116 +++++++++++++ opm/simulators/wells/ParallelWellInfo.hpp | 118 ++++++++++++++ tests/test_parallelwellinfo.cpp | 154 ++++++++++++++++++ 6 files changed, 401 insertions(+), 6 deletions(-) create mode 100644 opm/simulators/wells/ParallelWellInfo.cpp create mode 100644 opm/simulators/wells/ParallelWellInfo.hpp create mode 100644 tests/test_parallelwellinfo.cpp diff --git a/CMakeLists_files.cmake b/CMakeLists_files.cmake index f9312ede4..28c7ca555 100644 --- a/CMakeLists_files.cmake +++ b/CMakeLists_files.cmake @@ -40,6 +40,7 @@ list (APPEND MAIN_SOURCE_FILES opm/simulators/utils/DeferredLogger.cpp opm/simulators/utils/gatherDeferredLogger.cpp opm/simulators/utils/ParallelRestart.cpp + opm/simulators/wells/ParallelWellInfo.cpp opm/simulators/wells/VFPProdProperties.cpp opm/simulators/wells/VFPInjProperties.cpp opm/simulators/wells/WellGroupHelpers.cpp @@ -90,6 +91,7 @@ list (APPEND TEST_SOURCE_FILES tests/test_norne_pvt.cpp tests/test_wellprodindexcalculator.cpp tests/test_wellstatefullyimplicitblackoil.cpp + tests/test_parallelwellinfo.cpp ) if(MPI_FOUND) @@ -235,6 +237,7 @@ list (APPEND PUBLIC_HEADER_FILES opm/simulators/wells/MSWellHelpers.hpp opm/simulators/wells/BlackoilWellModel.hpp opm/simulators/wells/BlackoilWellModel_impl.hpp + opm/simulators/wells/ParallelWellInfo.hpp ) list (APPEND EXAMPLE_SOURCE_FILES diff --git a/opm/simulators/wells/BlackoilWellModel.hpp b/opm/simulators/wells/BlackoilWellModel.hpp index 43b079a0c..1dab893fb 100644 --- a/opm/simulators/wells/BlackoilWellModel.hpp +++ b/opm/simulators/wells/BlackoilWellModel.hpp @@ -61,6 +61,7 @@ #include #include #include +#include #include #include #include @@ -278,6 +279,8 @@ namespace Opm { std::vector< std::vector > well_perf_data_; std::vector< WellProdIndexCalculator > prod_index_calc_; + std::vector< ParallelWellInfo > parallel_well_info_; + bool wells_active_; // a vector of all the wells. diff --git a/opm/simulators/wells/BlackoilWellModel_impl.hpp b/opm/simulators/wells/BlackoilWellModel_impl.hpp index 32cda46d1..0920a478d 100644 --- a/opm/simulators/wells/BlackoilWellModel_impl.hpp +++ b/opm/simulators/wells/BlackoilWellModel_impl.hpp @@ -52,17 +52,18 @@ namespace Opm { const auto& cartDims = Opm::UgGridHelpers::cartDims(grid); setupCartesianToCompressed_(Opm::UgGridHelpers::globalCell(grid), cartDims[0]*cartDims[1]*cartDims[2]); - - is_shut_or_defunct_ = [&ebosSimulator](const Well& well) { + auto& parallel_wells = ebosSimulator.vanguard().parallelWells(); + parallel_well_info_.assign(parallel_wells.begin(), parallel_wells.end()); + is_shut_or_defunct_ = [this, &ebosSimulator](const Well& well) { if (well.getStatus() == Well::Status::SHUT) return true; if (ebosSimulator.gridView().comm().size() == 1) return false; std::pair value{well.name(), true}; // false indicate not active! - const auto& parallel_wells = ebosSimulator.vanguard().parallelWells(); - auto candidate = std::lower_bound(parallel_wells.begin(), parallel_wells.end(), - value); - return candidate == parallel_wells.end() || *candidate != value; + auto candidate = std::lower_bound(parallel_well_info_.begin(), + parallel_well_info_.end(), + value); + return candidate == parallel_well_info_.end() || *candidate != value; }; alternative_well_rate_init_ = EWOMS_GET_PARAM(TypeTag, bool, AlternativeWellRateInit); diff --git a/opm/simulators/wells/ParallelWellInfo.cpp b/opm/simulators/wells/ParallelWellInfo.cpp new file mode 100644 index 000000000..9759c393a --- /dev/null +++ b/opm/simulators/wells/ParallelWellInfo.cpp @@ -0,0 +1,116 @@ +/* + Copyright 2020 OPM-OP AS + + This file is part of the Open Porous Media project (OPM). + + OPM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OPM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with OPM. If not, see . +*/ +#include +#include + +namespace Opm +{ + +void ParallelWellInfo::DestroyComm::operator()(Communication* comm) +{ +#if HAVE_MPI + // Only delete custom communicators. + bool del = comm + && (*comm != Dune::MPIHelper::getLocalCommunicator()) + && (*comm != MPI_COMM_WORLD && *comm != MPI_COMM_NULL); + + if ( del ) + { + // Not 100% nice but safe as comm is deleted anyway + // We can only access a copy and no reference. + MPI_Comm mpi_comm = *comm; + MPI_Comm_free(&mpi_comm); + } +#endif + delete comm; +} + +ParallelWellInfo::ParallelWellInfo(const std::string& name, + bool hasLocalCells) + : name_(name), hasLocalCells_ (hasLocalCells), + isOwner_(true), comm_(new Communication(Dune::MPIHelper::getLocalCommunicator())) + {} + +ParallelWellInfo::ParallelWellInfo(const std::pair& well_info, + Communication allComm) + : name_(well_info.first), hasLocalCells_(well_info.second) +{ +#if HAVE_MPI + MPI_Comm newComm; + int color = hasLocalCells_ ? 1 : MPI_UNDEFINED; + MPI_Comm_split(allComm, color, allComm.rank(), &newComm); + comm_.reset(new Communication(newComm)); +#else + comm_.reset(new Communication(Dune::MPIHelper::getLocalCommunicator())); +#endif + isOwner_ = (comm_->rank() == 0); +} + +bool operator<(const ParallelWellInfo& well1, const ParallelWellInfo& well2) +{ + return well1.name() < well2.name() || (! (well2.name() < well1.name()) && well1.hasLocalCells() < well2.hasLocalCells()); +} + +bool operator==(const ParallelWellInfo& well1, const ParallelWellInfo& well2) +{ + bool ret = well1.name() == well2.name() && well1.hasLocalCells() == well2.hasLocalCells() + && well1.isOwner() == well2.isOwner(); +#if HAVE_MPI + using MPIComm = typename Dune::MPIHelper::MPICommunicator; + ret = ret && + static_cast(well1.communication()) == static_cast(well2.communication()); +#endif + return ret; +} + +bool operator!=(const ParallelWellInfo& well1, const ParallelWellInfo& well2) +{ + return ! (well1 == well2); +} + +bool operator<(const std::pair& pair, const ParallelWellInfo& well) +{ + return pair.first < well.name() || ( !( well.name() < pair.first ) && pair.second < well.hasLocalCells() ); +} + +bool operator<( const ParallelWellInfo& well, const std::pair& pair) +{ + return well.name() < pair.first || ( !( pair.first < well.name() ) && well.hasLocalCells() < pair.second ); +} + +bool operator==(const std::pair& pair, const ParallelWellInfo& well) +{ + return pair.first == well.name() && pair.second == well.hasLocalCells(); +} + +bool operator==(const ParallelWellInfo& well, const std::pair& pair) +{ + return pair == well; +} + +bool operator!=(const std::pair& pair, const ParallelWellInfo& well) +{ + return pair.first != well.name() || pair.second != well.hasLocalCells(); +} + +bool operator!=(const ParallelWellInfo& well, const std::pair& pair) +{ + return pair != well; +} +} // end namespace Opm diff --git a/opm/simulators/wells/ParallelWellInfo.hpp b/opm/simulators/wells/ParallelWellInfo.hpp new file mode 100644 index 000000000..ca8990977 --- /dev/null +++ b/opm/simulators/wells/ParallelWellInfo.hpp @@ -0,0 +1,118 @@ +/* + Copyright 2020 OPM-OP AS + + This file is part of the Open Porous Media project (OPM). + + OPM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OPM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with OPM. If not, see . +*/ +#ifndef OPM_PARALLELWELLINFO_HEADER_INCLUDED +#define OPM_PARALLELWELLINFO_HEADER_INCLUDED + +#include +#include + +#include + +namespace Opm +{ + +/// \brief Class encapsulating some information about parallel wells +/// +/// e.g. It provides a communicator for well information +class ParallelWellInfo +{ +public: + using MPIComm = typename Dune::MPIHelper::MPICommunicator; +#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7) + using Communication = Dune::Communication; +#else + using Communication = Dune::CollectiveCommunication; +#endif + + + /// \brief Constructs object using MPI_COMM_SELF + ParallelWellInfo(const std::string& name = {""}, + bool hasLocalCells = true); + + /// \brief Constructs object with communication between all rank sharing + /// a well + /// \param well_info Pair of well name and whether local cells might be perforated + /// on this rank + /// \param allComm The communication object with all MPI ranks active in the simulation. + /// Default is the one with all ranks available. + ParallelWellInfo(const std::pair& well_info, + Communication allComm = Communication()); + + const Communication& communication() const + { + return *comm_; + } + + /// \brief Name of the well. + const std::string& name() const + { + return name_; + } + + /// \brief Whether local cells are perforated somewhen + bool hasLocalCells() const + { + return hasLocalCells_; + } + bool isOwner() const + { + return isOwner_; + } + +private: + + /// \brief Deleter that also frees custom MPI communicators + struct DestroyComm + { + void operator()(Communication* comm); + }; + + + /// \brief Name of the well. + std::string name_; + /// \brief Whether local cells are perforated somewhen + bool hasLocalCells_; + /// \brief Whether we own the well and should do reports etc. + bool isOwner_; + /// \brief Communication object for the well + /// + /// Contains only ranks where this well will perforate local cells. + std::unique_ptr comm_; +}; + +bool operator<(const ParallelWellInfo& well1, const ParallelWellInfo& well2); + +bool operator==(const ParallelWellInfo& well1, const ParallelWellInfo& well2); + +bool operator!=(const ParallelWellInfo& well1, const ParallelWellInfo& well2); + +bool operator<(const std::pair& pair, const ParallelWellInfo& well); + +bool operator<( const ParallelWellInfo& well, const std::pair& pair); + +bool operator==(const std::pair& pair, const ParallelWellInfo& well); + +bool operator==(const ParallelWellInfo& well, const std::pair& pair); + +bool operator!=(const std::pair& pair, const ParallelWellInfo& well); + +bool operator!=(const ParallelWellInfo& well, const std::pair& pair); + +} // end namespace Opm +#endif // OPM_PARALLELWELLINFO_HEADER_INCLUDED diff --git a/tests/test_parallelwellinfo.cpp b/tests/test_parallelwellinfo.cpp new file mode 100644 index 000000000..04c6834e3 --- /dev/null +++ b/tests/test_parallelwellinfo.cpp @@ -0,0 +1,154 @@ +/* + Copyright 2020 OPM-OP AS + Copyright 2015 Dr. Blatt - HPC-Simulation-Software & Services. + + This file is part of the Open Porous Media project (OPM). + + OPM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OPM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with OPM. If not, see . +*/ +#include +#include +#include +#include +#include +#include + +#define BOOST_TEST_MODULE ParallelWellInfo +#include +class MPIError { +public: + /** @brief Constructor. */ + MPIError(std::string s, int e) : errorstring(s), errorcode(e){} + /** @brief The error string. */ + std::string errorstring; + /** @brief The mpi error code. */ + int errorcode; +}; + +#ifdef HAVE_MPI +void MPI_err_handler(MPI_Comm *, int *err_code, ...){ + char *err_string=new char[MPI_MAX_ERROR_STRING]; + int err_length; + MPI_Error_string(*err_code, err_string, &err_length); + std::string s(err_string, err_length); + std::cerr << "An MPI Error ocurred:"<& p) +{ + return os << "{" << p.first << " "<< p.second << "}"; +} +} +namespace Opm +{ +std::ostream& operator<<(std::ostream& os, const Opm::ParallelWellInfo& w) +{ + return os << "{" << w.name() << " "<< w.hasLocalCells() << " "<< + w.isOwner() << "}"; +} +} + +BOOST_AUTO_TEST_CASE(ParallelWellComparison) +{ + int argc = 0; + char** argv = nullptr; + const auto& helper = Dune::MPIHelper::instance(argc, argv); + std::vector> pairs; + if (helper.rank() == 0) + pairs = {{"Test1", true},{"Test2", true}, {"Test1", false} }; + else + pairs = {{"Test1", false},{"Test2", true}, {"Test1", true} }; + + std::vector well_info; + well_info.assign(pairs.begin(), pairs.end()); + + BOOST_CHECK_EQUAL_COLLECTIONS(pairs.begin(), pairs.end(), + well_info.begin(), well_info.end()); + + BOOST_CHECK_EQUAL_COLLECTIONS(well_info.begin(), well_info.end(), + pairs.begin(), pairs.end()); + + BOOST_TEST(well_info[0] < pairs[1]); + BOOST_TEST(pairs[0] != well_info[1]); + BOOST_TEST(pairs[0] < well_info[1]); + BOOST_TEST(well_info[0] == pairs[0]); + + BOOST_TEST(well_info[0] != well_info[1]); + + Opm::ParallelWellInfo well0, well1; + + BOOST_TEST(well0 == well1); +#if HAVE_MPI + BOOST_TEST(well0.communication()==helper.getLocalCommunicator()); +#endif + Opm::ParallelWellInfo well2("Test", false); + std::pair pwell={"Test", true}; + BOOST_TEST(well2 < pwell); + Opm::ParallelWellInfo well3("Test", true); + BOOST_TEST(! (well3 < pwell)); + pwell.second = false; + BOOST_TEST(! (well3 < pwell)); + + if (helper.rank() == 0) + BOOST_TEST(well_info[0].communication().size()==1); + +#if HAVE_MPI + Opm::ParallelWellInfo::Communication comm{MPI_COMM_WORLD}; + + BOOST_TEST(well_info[1].communication().size() == comm.size()); + + if (helper.rank() > 0) + { + BOOST_TEST(well_info[2].communication().size() == comm.size()-1); + } +#endif + +} From 8ee58096ba564cd6a485eea67589c4a0625c26ca Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Fri, 9 Oct 2020 15:09:28 +0200 Subject: [PATCH 02/13] Make the parallel reduction when applying the Wells. The B matrix is basically a component-wise multiplication with a vector followed by a parallel reduction. We do that reduction to all ranks computing for the well to save the broadcast when applying C^T. --- ebos/eclbasevanguard.hh | 10 ++ opm/simulators/wells/BlackoilWellModel.hpp | 3 +- .../wells/BlackoilWellModel_impl.hpp | 15 ++- opm/simulators/wells/MultisegmentWell.hpp | 4 +- .../wells/MultisegmentWell_impl.hpp | 6 +- opm/simulators/wells/StandardWell.hpp | 8 +- opm/simulators/wells/StandardWell_impl.hpp | 15 ++- opm/simulators/wells/WellHelpers.hpp | 126 ++++++++++++++++++ opm/simulators/wells/WellInterface.hpp | 6 +- opm/simulators/wells/WellInterface_impl.hpp | 2 + tests/test_wellmodel.cpp | 6 +- 11 files changed, 188 insertions(+), 13 deletions(-) diff --git a/ebos/eclbasevanguard.hh b/ebos/eclbasevanguard.hh index 37c9bba04..6356399e2 100644 --- a/ebos/eclbasevanguard.hh +++ b/ebos/eclbasevanguard.hh @@ -416,6 +416,16 @@ public: int outputInterval = EWOMS_GET_PARAM(TypeTag, int, EclOutputInterval); if (outputInterval >= 0) schedule().restart().overrideRestartWriteInterval(outputInterval); + + // Initialize parallelWells with all local wells + const auto& schedule_wells = schedule().getWellsatEnd(); + parallelWells_.reserve(schedule_wells.size()); + + for (const auto& well: schedule_wells) + { + parallelWells_.emplace_back(well.name(), true); + } + std::sort(parallelWells_.begin(), parallelWells_.end()); } /*! diff --git a/opm/simulators/wells/BlackoilWellModel.hpp b/opm/simulators/wells/BlackoilWellModel.hpp index 1dab893fb..b28cdc371 100644 --- a/opm/simulators/wells/BlackoilWellModel.hpp +++ b/opm/simulators/wells/BlackoilWellModel.hpp @@ -280,6 +280,7 @@ namespace Opm { std::vector< WellProdIndexCalculator > prod_index_calc_; std::vector< ParallelWellInfo > parallel_well_info_; + std::vector< ParallelWellInfo* > local_parallel_well_info_; bool wells_active_; @@ -360,7 +361,7 @@ namespace Opm { /// \param timeStepIdx The index of the time step. /// \param[out] globalNumWells the number of wells globally. std::vector< Well > getLocalNonshutWells(const int timeStepIdx, - int& globalNumWells) const; + int& globalNumWells); // compute the well fluxes and assemble them in to the reservoir equations as source terms // and in the well equations. diff --git a/opm/simulators/wells/BlackoilWellModel_impl.hpp b/opm/simulators/wells/BlackoilWellModel_impl.hpp index 0920a478d..7d82c690a 100644 --- a/opm/simulators/wells/BlackoilWellModel_impl.hpp +++ b/opm/simulators/wells/BlackoilWellModel_impl.hpp @@ -217,11 +217,23 @@ namespace Opm { template std::vector< Well > BlackoilWellModel:: - getLocalNonshutWells(const int timeStepIdx, int& globalNumWells) const + getLocalNonshutWells(const int timeStepIdx, int& globalNumWells) { auto w = schedule().getWells(timeStepIdx); globalNumWells = w.size(); w.erase(std::remove_if(w.begin(), w.end(), is_shut_or_defunct_), w.end()); + local_parallel_well_info_.clear(); + local_parallel_well_info_.reserve(w.size()); + for (const auto& well : w) + { + auto wellPair = std::make_pair(well.name(), true); + auto pwell = std::lower_bound(parallel_well_info_.begin(), + parallel_well_info_.end(), + wellPair); + assert(pwell != parallel_well_info_.end() && + *pwell == wellPair); + local_parallel_well_info_.push_back(&(*pwell)); + } return w; } @@ -802,6 +814,7 @@ namespace Opm { const auto& perf_data = this->well_perf_data_[wellID]; return std::make_unique(this->wells_ecl_[wellID], + *local_parallel_well_info_[wellID], time_step, this->param_, *this->rateConverter_, diff --git a/opm/simulators/wells/MultisegmentWell.hpp b/opm/simulators/wells/MultisegmentWell.hpp index 280f91dd8..cabefd73d 100644 --- a/opm/simulators/wells/MultisegmentWell.hpp +++ b/opm/simulators/wells/MultisegmentWell.hpp @@ -98,7 +98,9 @@ namespace Opm // TODO: for now, we only use one type to save some implementation efforts, while improve later. typedef DenseAd::Evaluation EvalWell; - MultisegmentWell(const Well& well, const int time_step, + MultisegmentWell(const Well& well, + const ParallelWellInfo& pw_info, + const int time_step, const ModelParameters& param, const RateConverterType& rate_converter, const int pvtRegionIdx, diff --git a/opm/simulators/wells/MultisegmentWell_impl.hpp b/opm/simulators/wells/MultisegmentWell_impl.hpp index d899ff75c..ca058a400 100644 --- a/opm/simulators/wells/MultisegmentWell_impl.hpp +++ b/opm/simulators/wells/MultisegmentWell_impl.hpp @@ -31,7 +31,9 @@ namespace Opm template MultisegmentWell:: - MultisegmentWell(const Well& well, const int time_step, + MultisegmentWell(const Well& well, + const ParallelWellInfo& pw_info, + const int time_step, const ModelParameters& param, const RateConverterType& rate_converter, const int pvtRegionIdx, @@ -40,7 +42,7 @@ namespace Opm const int index_of_well, const int first_perf_index, const std::vector& perf_data) - : Base(well, time_step, param, rate_converter, pvtRegionIdx, num_components, num_phases, index_of_well, first_perf_index, perf_data) + : Base(well, pw_info, time_step, param, rate_converter, pvtRegionIdx, num_components, num_phases, index_of_well, first_perf_index, perf_data) , segment_perforations_(numberOfSegments()) , segment_inlets_(numberOfSegments()) , cell_perforation_depth_diffs_(number_of_perforations_, 0.0) diff --git a/opm/simulators/wells/StandardWell.hpp b/opm/simulators/wells/StandardWell.hpp index b00b6f0e4..de14cf081 100644 --- a/opm/simulators/wells/StandardWell.hpp +++ b/opm/simulators/wells/StandardWell.hpp @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -158,7 +159,9 @@ namespace Opm using Base::contiBrineEqIdx; static const int contiEnergyEqIdx = Indices::contiEnergyEqIdx; - StandardWell(const Well& well, const int time_step, + StandardWell(const Well& well, + const ParallelWellInfo& pw_info, + const int time_step, const ModelParameters& param, const RateConverterType& rate_converter, const int pvtRegionIdx, @@ -377,6 +380,9 @@ namespace Opm // diagonal matrix for the well DiagMatWell invDuneD_; + // Wrapper for the parallel application of B for distributed wells + wellhelpers::ParallelStandardWellB parallelB_; + // several vector used in the matrix calculation mutable BVectorWell Bx_; mutable BVectorWell invDrw_; diff --git a/opm/simulators/wells/StandardWell_impl.hpp b/opm/simulators/wells/StandardWell_impl.hpp index da9657775..cfb80443c 100644 --- a/opm/simulators/wells/StandardWell_impl.hpp +++ b/opm/simulators/wells/StandardWell_impl.hpp @@ -33,7 +33,9 @@ namespace Opm template StandardWell:: - StandardWell(const Well& well, const int time_step, + StandardWell(const Well& well, + const ParallelWellInfo& pw_info, + const int time_step, const ModelParameters& param, const RateConverterType& rate_converter, const int pvtRegionIdx, @@ -42,9 +44,10 @@ namespace Opm const int index_of_well, const int first_perf_index, const std::vector& perf_data) - : Base(well, time_step, param, rate_converter, pvtRegionIdx, num_components, num_phases, index_of_well, first_perf_index, perf_data) + : Base(well, pw_info, time_step, param, rate_converter, pvtRegionIdx, num_components, num_phases, index_of_well, first_perf_index, perf_data) , perf_densities_(number_of_perforations_) , perf_pressure_diffs_(number_of_perforations_) + , parallelB_(duneB_, pw_info) , F0_(numWellConservationEq) , ipr_a_(number_of_phases_) , ipr_b_(number_of_phases_) @@ -657,6 +660,9 @@ namespace Opm // Update the connection connectionRates_ = connectionRates; + // accumulate resWell_ and invDuneD_ in parallel to get effects of all perforations (might be distributed) + wellhelpers::sumDistributedWellEntries(invDuneD_[0][0], resWell_[0], + this->parallel_well_info_->communication()); // add vol * dF/dt + Q to the well equations; for (int componentIdx = 0; componentIdx < numWellConservationEq; ++componentIdx) { // TODO: following the development in MSW, we need to convert the volume of the wellbore to be surface volume @@ -2475,7 +2481,8 @@ namespace Opm assert( invDrw_.size() == invDuneD_.N() ); // Bx_ = duneB_ * x - duneB_.mv(x, Bx_); + parallelB_.mv(x, Bx_); + // invDBx = invDuneD_ * Bx_ // TODO: with this, we modified the content of the invDrw_. // Is it necessary to do this to save some memory? @@ -2574,7 +2581,7 @@ namespace Opm BVectorWell resWell = resWell_; // resWell = resWell - B * x - duneB_.mmv(x, resWell); + parallelB_.mmv(x, resWell); // xw = D^-1 * resWell invDuneD_.mv(resWell, xw); } diff --git a/opm/simulators/wells/WellHelpers.hpp b/opm/simulators/wells/WellHelpers.hpp index f89df78f7..c0ae7edb3 100644 --- a/opm/simulators/wells/WellHelpers.hpp +++ b/opm/simulators/wells/WellHelpers.hpp @@ -1,6 +1,7 @@ /* Copyright 2016 SINTEF ICT, Applied Mathematics. Copyright 2016 Statoil ASA. + Copyright 2020 OPM-OP AS. This file is part of the Open Porous Media project (OPM). @@ -22,6 +23,12 @@ #ifndef OPM_WELLHELPERS_HEADER_INCLUDED #define OPM_WELLHELPERS_HEADER_INCLUDED +#include +#include + +#include +#include +#include #include @@ -33,6 +40,93 @@ namespace Opm { namespace wellhelpers { + /// \brief A wrapper around the B matrix for distributed wells + /// + /// For standard wells the B matrix, is basically a multiplication + /// of the equation of the perforated cells followed by a reduction + /// (summation) of these to the well equations. + /// + /// This class does that in the functions mv and mmv (from the DUNE + /// matrix interface. + /// + /// \tparam Scalar The scalar used for the computation. + template + class ParallelStandardWellB + { + public: + using Block = Dune::DynamicMatrix; + using Matrix = Dune::BCRSMatrix; + + ParallelStandardWellB(const Matrix& B, const ParallelWellInfo& pinfo) + : B_(&B), pinfo_(&pinfo) + {} + + //! y = A x + template + void mv (const X& x, Y& y) const + { + #if !defined(NDEBUG) && HAVE_MPI + // We need to make sure that all ranks are actually computing + // for the same well. Doing this by checking the name of the well. + int cstring_size = pinfo_->name().size()+1; + std::vector sizes(pinfo_->communication().size()); + pinfo_->communication().allgather(&cstring_size, 1, sizes.data()); + std::vector offsets(sizes.size()+1, 0); //last entry will be accumulated size + std::partial_sum(sizes.begin(), sizes.end(), offsets.begin() + 1); + std::vector cstrings(offsets[sizes.size()]); + bool consistentWells = true; + char* send = const_cast(pinfo_->name().c_str()); + pinfo_->communication().allgatherv(send, cstring_size, + cstrings.data(), sizes.data(), + offsets.data()); + for(std::size_t i = 0; i < sizes.size(); ++i) + { + std::string name(cstrings.data()+offsets[i]); + if (name != pinfo_->name()) + { + if (pinfo_->communication().rank() == 0) + { + //only one process per well logs, might not be 0 of MPI_COMM_WORLD, though + std::string msg = std::string("Fatal Error: Not all ranks are computing for the same well") + + " well should be " + pinfo_->name() + " but is " + + name; + OpmLog::debug(msg); + } + consistentWells = false; + break; + } + } + pinfo_->communication().barrier(); + // As not all processes are involved here we need to use MPI_Abort and hope MPI kills them all + if (!consistentWells) + { + MPI_Abort(MPI_COMM_WORLD, 1); + } +#endif + B_->mv(x, y); + // The B matrix is basically a component-wise multiplication + // with a vector followed by a parallel reduction. We do that + // reduction to all ranks computing for the well to save the + // broadcast when applying C^T. + using YField = typename Y::block_type::value_type; + assert(y.size() == 1); + this->pinfo_->communication().allreduce>(y[0].container().data(), + y[0].container().size()); + } + + //! y = A x + template + void mmv (const X& x, Y& y) const + { + Y temp(y); + mv(x, temp); // includes parallel reduction + y -= temp; + } + private: + const Matrix* B_; + const ParallelWellInfo* pinfo_; + }; + inline double computeHydrostaticCorrection(const double well_ref_depth, const double vfp_ref_depth, const double rho, const double gravity) { @@ -44,6 +138,38 @@ namespace Opm { + /// \brief Sums entries of the diagonal Matrix for distributed wells + template + void sumDistributedWellEntries(Dune::DynamicMatrix& mat, Dune::DynamicVector& vec, + const Comm& comm) + { + // DynamicMatrix does not use one contiguous array for storing the data + // but a DynamicVector of DynamicVectors. Hence we need to copy the data + // to contiguous memory for MPI. + if (comm.size() == 1) + { + return; + } + std::vector allEntries; + allEntries.reserve(mat.N()*mat.M()+vec.size()); + for(const auto& row: mat) + { + allEntries.insert(allEntries.end(), row.begin(), row.end()); + } + allEntries.insert(allEntries.end(), vec.begin(), vec.end()); + comm.sum(allEntries.data(), allEntries.size()); + auto pos = allEntries.begin(); + auto cols = mat.cols(); + for(auto&& row: mat) + { + std::copy(pos, pos + cols, &(row[0])); + pos += cols; + } + assert(std::size_t(allEntries.end() - pos) == vec.size()); + std::copy(pos, allEntries.end(), &(vec[0])); + } + + template std::array diff --git a/opm/simulators/wells/WellInterface.hpp b/opm/simulators/wells/WellInterface.hpp index f90ca706d..b560a571f 100644 --- a/opm/simulators/wells/WellInterface.hpp +++ b/opm/simulators/wells/WellInterface.hpp @@ -121,7 +121,9 @@ namespace Opm has_brine, Indices::numPhases >; /// Constructor - WellInterface(const Well& well, const int time_step, + WellInterface(const Well& well, + const ParallelWellInfo& pw_info, + const int time_step, const ModelParameters& param, const RateConverterType& rate_converter, const int pvtRegionIdx, @@ -317,6 +319,8 @@ namespace Opm Well well_ecl_; + const ParallelWellInfo* parallel_well_info_; + const int current_step_; // simulation parameters diff --git a/opm/simulators/wells/WellInterface_impl.hpp b/opm/simulators/wells/WellInterface_impl.hpp index dbc3e2b22..adabca396 100644 --- a/opm/simulators/wells/WellInterface_impl.hpp +++ b/opm/simulators/wells/WellInterface_impl.hpp @@ -30,6 +30,7 @@ namespace Opm template WellInterface:: WellInterface(const Well& well, + const ParallelWellInfo& pw_info, const int time_step, const ModelParameters& param, const RateConverterType& rate_converter, @@ -40,6 +41,7 @@ namespace Opm const int first_perf_index, const std::vector& perf_data) : well_ecl_(well) + , parallel_well_info_(&pw_info) , current_step_(time_step) , param_(param) , rateConverter_(rate_converter) diff --git a/tests/test_wellmodel.cpp b/tests/test_wellmodel.cpp index 85fac0329..7e19ebb3b 100644 --- a/tests/test_wellmodel.cpp +++ b/tests/test_wellmodel.cpp @@ -126,8 +126,9 @@ BOOST_AUTO_TEST_CASE(TestStandardWellInput) { Opm::PerforationData dummy; std::vector pdata(well.getConnections().size(), dummy); + Opm::ParallelWellInfo pinfo{well.name()}; - BOOST_CHECK_THROW( StandardWell( well, -1, param, *rateConverter, 0, 3, 3, 0, 0, pdata), std::invalid_argument); + BOOST_CHECK_THROW( StandardWell( well, pinfo, -1, param, *rateConverter, 0, 3, 3, 0, 0, pdata), std::invalid_argument); } @@ -156,7 +157,8 @@ BOOST_AUTO_TEST_CASE(TestBehavoir) { std::vector(10, 0))); Opm::PerforationData dummy; std::vector pdata(wells_ecl[w].getConnections().size(), dummy); - wells.emplace_back(new StandardWell(wells_ecl[w], current_timestep, param, *rateConverter, 0, 3, 3, w, 0, pdata) ); + Opm::ParallelWellInfo pinfo{wells_ecl[w].name()}; + wells.emplace_back(new StandardWell(wells_ecl[w], pinfo, current_timestep, param, *rateConverter, 0, 3, 3, w, 0, pdata) ); } } From 3d92e41cadfa64966529757c006521098f1867c5 Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Thu, 26 Nov 2020 21:33:01 +0100 Subject: [PATCH 03/13] Recover prev. iteration count and curves for undistributed wells Rounding errors for `B.mmv(x,y)` are slightly different from ``` Y temp(y); B-mv(x, temp); y -= temp; ``` --- opm/simulators/wells/WellHelpers.hpp | 31 +++++++++++++++------ opm/simulators/wells/WellInterface_impl.hpp | 1 + 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/opm/simulators/wells/WellHelpers.hpp b/opm/simulators/wells/WellHelpers.hpp index c0ae7edb3..53d15793f 100644 --- a/opm/simulators/wells/WellHelpers.hpp +++ b/opm/simulators/wells/WellHelpers.hpp @@ -104,23 +104,38 @@ namespace Opm { } #endif B_->mv(x, y); - // The B matrix is basically a component-wise multiplication - // with a vector followed by a parallel reduction. We do that - // reduction to all ranks computing for the well to save the - // broadcast when applying C^T. - using YField = typename Y::block_type::value_type; - assert(y.size() == 1); - this->pinfo_->communication().allreduce>(y[0].container().data(), - y[0].container().size()); + + if (this->pinfo_->communication().size() > 1) + { + // Only do communication if we must. + // The B matrix is basically a component-wise multiplication + // with a vector followed by a parallel reduction. We do that + // reduction to all ranks computing for the well to save the + // broadcast when applying C^T. + using YField = typename Y::block_type::value_type; + assert(y.size() == 1); + this->pinfo_->communication().allreduce>(y[0].container().data(), + y[0].container().size()); + } } //! y = A x template void mmv (const X& x, Y& y) const { + if (this->pinfo_->communication().size() == 1) + { + // Do the same thing as before. The else branch + // produces different rounding errors and results + // slightly different iteration counts / well curves + B_->mmv(x, y); + } + else + { Y temp(y); mv(x, temp); // includes parallel reduction y -= temp; + } } private: const Matrix* B_; diff --git a/opm/simulators/wells/WellInterface_impl.hpp b/opm/simulators/wells/WellInterface_impl.hpp index adabca396..ce4343d5e 100644 --- a/opm/simulators/wells/WellInterface_impl.hpp +++ b/opm/simulators/wells/WellInterface_impl.hpp @@ -52,6 +52,7 @@ namespace Opm , first_perf_(first_perf_index) , perf_data_(&perf_data) { + assert(well.name()==pw_info.name()); assert(std::is_sorted(perf_data.begin(), perf_data.end(), [](const auto& perf1, const auto& perf2){ return perf1.ecl_index < perf2.ecl_index; From 317e29d15a281fbc7a8de2b18ed94a9febe40799 Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Fri, 27 Nov 2020 18:18:40 +0100 Subject: [PATCH 04/13] Utility for checking connections of distributed well. --- opm/simulators/wells/ParallelWellInfo.cpp | 44 +++++++++++++++++++++++ opm/simulators/wells/ParallelWellInfo.hpp | 26 +++++++++++++- 2 files changed, 69 insertions(+), 1 deletion(-) diff --git a/opm/simulators/wells/ParallelWellInfo.cpp b/opm/simulators/wells/ParallelWellInfo.cpp index 9759c393a..6a4efedfd 100644 --- a/opm/simulators/wells/ParallelWellInfo.cpp +++ b/opm/simulators/wells/ParallelWellInfo.cpp @@ -18,6 +18,7 @@ */ #include #include +#include namespace Opm { @@ -113,4 +114,47 @@ bool operator!=(const ParallelWellInfo& well, const std::pair { return pair != well; } + +CheckDistributedWellConnections::CheckDistributedWellConnections(const Well& well, + const ParallelWellInfo& info) + : well_(well), pwinfo_(info) +{ + foundConnections_.resize(well.getConnections().size(), 0); +} + +void +CheckDistributedWellConnections::connectionFound(std::size_t i) +{ + foundConnections_[i] = 1; +} + +bool +CheckDistributedWellConnections::checkAllConnectionsFound() +{ + // Ecl does not hold any information of remote connections. + assert(pwinfo_.communication().max(foundConnections_.size()) == foundConnections_.size()); + pwinfo_.communication().sum(foundConnections_.data(), + foundConnections_.size()); + + std::string msg = std::string("Cells with these i,j,k indices were not found ") + + "in grid (well = " + pwinfo_.name() + "):"; + bool missingCells = false; + auto start = foundConnections_.begin(); + for(auto conFound = start; conFound != foundConnections_.end(); ++conFound) + { + if (*conFound == 0) + { + const auto& completion = well_.getConnections()[conFound - start]; + msg = msg + " " + std::to_string(completion.getI()) + "," + + std::to_string(completion.getJ()) + "," + + std::to_string(completion.getK()) + " "; + missingCells = true; + } + } + if (missingCells && pwinfo_.isOwner()) + { + OPM_THROW(std::runtime_error, msg); + } + return !missingCells; +} } // end namespace Opm diff --git a/opm/simulators/wells/ParallelWellInfo.hpp b/opm/simulators/wells/ParallelWellInfo.hpp index ca8990977..a1f1fd361 100644 --- a/opm/simulators/wells/ParallelWellInfo.hpp +++ b/opm/simulators/wells/ParallelWellInfo.hpp @@ -17,11 +17,13 @@ along with OPM. If not, see . */ #ifndef OPM_PARALLELWELLINFO_HEADER_INCLUDED -#define OPM_PARALLELWELLINFO_HEADER_INCLUDED +#define OPM_PARALLELWELLINFO_HEADER_INCLUDED #include #include +#include + #include namespace Opm @@ -96,6 +98,28 @@ private: std::unique_ptr comm_; }; +/// \brief Class checking that all connections are on active cells +/// +/// Works for distributed wells, too +class CheckDistributedWellConnections +{ +public: + CheckDistributedWellConnections(const Well& well, + const ParallelWellInfo& info); + + /// \brief Inidicate that the i-th completion was found + /// + /// in the local grid. + /// \param index The index of the completion in Well::getConnections + void connectionFound(std::size_t index); + + bool checkAllConnectionsFound(); +private: + std::vector foundConnections_; + const Well& well_; + const ParallelWellInfo& pwinfo_; +}; + bool operator<(const ParallelWellInfo& well1, const ParallelWellInfo& well2); bool operator==(const ParallelWellInfo& well1, const ParallelWellInfo& well2); From 73919d61366a55cff6f2413f2977d9c95ebe2b2a Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Fri, 27 Nov 2020 18:20:59 +0100 Subject: [PATCH 05/13] Support broadcasting value of first perforation for distributed well This needed to e.g. determine bottom hole pressure, pvt region, etc. --- opm/simulators/wells/ParallelWellInfo.cpp | 13 ++++++++++++- opm/simulators/wells/ParallelWellInfo.hpp | 13 +++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/opm/simulators/wells/ParallelWellInfo.cpp b/opm/simulators/wells/ParallelWellInfo.cpp index 6a4efedfd..3acf5cb1a 100644 --- a/opm/simulators/wells/ParallelWellInfo.cpp +++ b/opm/simulators/wells/ParallelWellInfo.cpp @@ -45,7 +45,8 @@ void ParallelWellInfo::DestroyComm::operator()(Communication* comm) ParallelWellInfo::ParallelWellInfo(const std::string& name, bool hasLocalCells) : name_(name), hasLocalCells_ (hasLocalCells), - isOwner_(true), comm_(new Communication(Dune::MPIHelper::getLocalCommunicator())) + isOwner_(true), rankWithFirstPerf_(-1), + comm_(new Communication(Dune::MPIHelper::getLocalCommunicator())) {} ParallelWellInfo::ParallelWellInfo(const std::pair& well_info, @@ -63,6 +64,16 @@ ParallelWellInfo::ParallelWellInfo(const std::pair& well_info, isOwner_ = (comm_->rank() == 0); } +void ParallelWellInfo::communicateFirstPerforation(bool hasFirst) +{ + int first = hasFirst; + std::vector firstVec(comm_->size()); + comm_->allgather(&first, 1, firstVec.data()); + auto found = std::find_if(firstVec.begin(), firstVec.end(), + [](int i) -> bool{ return i;}); + rankWithFirstPerf_ = found - firstVec.begin(); +} + bool operator<(const ParallelWellInfo& well1, const ParallelWellInfo& well2) { return well1.name() < well2.name() || (! (well2.name() < well1.name()) && well1.hasLocalCells() < well2.hasLocalCells()); diff --git a/opm/simulators/wells/ParallelWellInfo.hpp b/opm/simulators/wells/ParallelWellInfo.hpp index a1f1fd361..497bb095c 100644 --- a/opm/simulators/wells/ParallelWellInfo.hpp +++ b/opm/simulators/wells/ParallelWellInfo.hpp @@ -61,6 +61,17 @@ public: return *comm_; } + /// \brief Collectively decide which ran has first perforation. + void communicateFirstPerforation(bool hasFirst); + + template + T broadcastFirstPerforationValue(const T& t) const + { + T res=t; + comm_->broadcast(&res, 1, rankWithFirstPerf_); + return res; + } + /// \brief Name of the well. const std::string& name() const { @@ -92,6 +103,8 @@ private: bool hasLocalCells_; /// \brief Whether we own the well and should do reports etc. bool isOwner_; + /// \brief Rank with the first perforation on it. + int rankWithFirstPerf_; /// \brief Communication object for the well /// /// Contains only ranks where this well will perforate local cells. From 0126243f027d2713f896afc7b7a21c3c86d66244 Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Fri, 27 Nov 2020 18:24:35 +0100 Subject: [PATCH 06/13] Utility to communicate above value of a perf of distributed well This adds an utility that creates a vector of all above values for the local perforations. For distributed wells this is needed as the perforation above might live on another processor. We use the parallel index sets together with the global index of the cells that are perforated. --- opm/simulators/wells/ParallelWellInfo.cpp | 160 +++++++++++++++++++++- opm/simulators/wells/ParallelWellInfo.hpp | 105 ++++++++++++++ tests/test_parallelwellinfo.cpp | 124 +++++++++++++++++ 3 files changed, 386 insertions(+), 3 deletions(-) diff --git a/opm/simulators/wells/ParallelWellInfo.cpp b/opm/simulators/wells/ParallelWellInfo.cpp index 3acf5cb1a..4dc653b13 100644 --- a/opm/simulators/wells/ParallelWellInfo.cpp +++ b/opm/simulators/wells/ParallelWellInfo.cpp @@ -20,8 +20,123 @@ #include #include +namespace Dune +{ +#if HAVE_MPI +template<> +struct CommPolicy +{ + using Type = double*; + using IndexedType = double; + using IndexedTypeFlag = Dune::SizeOne; + static const void* getAddress(const double*& v, int index) + { + return v + index; + } + static int getSize(const double*&, int) + { + return 1; + } +}; +#endif +} + namespace Opm { +CommunicateAbove::CommunicateAbove([[maybe_unused]] const Communication& comm) +#if HAVE_MPI + : comm_(comm), interface_(comm_) +#endif +{} + +void CommunicateAbove::clear() +{ +#if HAVE_MPI + above_indices_ = {}; + current_indices_ = {}; + interface_.free(); + communicator_.free(); +#endif + count_ = 0; +} + +void CommunicateAbove::beginReset() +{ + clear(); +#if HAVE_MPI + if (comm_.size() > 1) + { + current_indices_.beginResize(); + above_indices_.beginResize(); + } +#endif +} + +void CommunicateAbove::endReset() +{ +#if HAVE_MPI + if (comm_.size() > 1) + { + above_indices_.endResize(); + current_indices_.endResize(); + remote_indices_.setIndexSets(current_indices_, above_indices_, comm_); + remote_indices_.setIncludeSelf(true); + remote_indices_.rebuild(); + using FromSet = Dune::EnumItem; + using ToSet = Dune::AllSet; + interface_.build(remote_indices_, FromSet(), ToSet()); + communicator_.build(interface_); + } +#endif +} + +std::vector CommunicateAbove::communicate(double first_above, + const double* current, + std::size_t size) +{ + std::vector above(size, first_above); + +#if HAVE_MPI + if (comm_.size() > 1) + { + using Handle = Dune::OwnerOverlapCopyCommunication::CopyGatherScatter; + auto aboveData = above.data(); + // Ugly const_cast needed as my compiler says, that + // passing const double*& and double* as parameter is + // incompatible with function decl template forward(const Data&, Data&)) + // That would need the first argument to be double* const& + communicator_.forward(const_cast(current), aboveData); + } + else +#endif + { + if (above.size() > 1) + { + // No comunication needed, just copy. + std::copy(current, current + (above.size() - 1), above.begin()+1); + } + } + return above; +} + +void CommunicateAbove::pushBackEclIndex([[maybe_unused]] int above, + [[maybe_unused]] int current, + [[maybe_unused]] bool isOwner) +{ +#if HAVE_MPI + if (comm_.size() > 1) + { + Attribute attr = owner; + if (!isOwner) + { + attr = overlap; + } + above_indices_.add(above, {count_, attr, true}); + current_indices_.add(current, {count_++, attr, true}); + } +#endif +} + void ParallelWellInfo::DestroyComm::operator()(Communication* comm) { @@ -46,12 +161,14 @@ ParallelWellInfo::ParallelWellInfo(const std::string& name, bool hasLocalCells) : name_(name), hasLocalCells_ (hasLocalCells), isOwner_(true), rankWithFirstPerf_(-1), - comm_(new Communication(Dune::MPIHelper::getLocalCommunicator())) + comm_(new Communication(Dune::MPIHelper::getLocalCommunicator())), + commAbove_(new CommunicateAbove(*comm_)) {} ParallelWellInfo::ParallelWellInfo(const std::pair& well_info, - Communication allComm) - : name_(well_info.first), hasLocalCells_(well_info.second) + [[maybe_unused]] Communication allComm) + : name_(well_info.first), hasLocalCells_(well_info.second), + rankWithFirstPerf_(-1) { #if HAVE_MPI MPI_Comm newComm; @@ -61,6 +178,7 @@ ParallelWellInfo::ParallelWellInfo(const std::pair& well_info, #else comm_.reset(new Communication(Dune::MPIHelper::getLocalCommunicator())); #endif + commAbove_.reset(new CommunicateAbove(*comm_)); isOwner_ = (comm_->rank() == 0); } @@ -74,6 +192,42 @@ void ParallelWellInfo::communicateFirstPerforation(bool hasFirst) rankWithFirstPerf_ = found - firstVec.begin(); } +void ParallelWellInfo::pushBackEclIndex(int above, int current) +{ + commAbove_->pushBackEclIndex(above, current); +} + +void ParallelWellInfo::beginReset() +{ + commAbove_->beginReset(); +} + + +void ParallelWellInfo::endReset() +{ + commAbove_->beginReset(); +} + +void ParallelWellInfo::clearCommunicateAbove() +{ + commAbove_->clear(); +} + +std::vector ParallelWellInfo::communicateAboveValues(double zero_value, + const double* current_values, + std::size_t size) const +{ + return commAbove_->communicate(zero_value, current_values, + size); +} + +std::vector ParallelWellInfo::communicateAboveValues(double zero_value, + const std::vector& current_values) const +{ + return commAbove_->communicate(zero_value, current_values.data(), + current_values.size()); +} + bool operator<(const ParallelWellInfo& well1, const ParallelWellInfo& well2) { return well1.name() < well2.name() || (! (well2.name() < well1.name()) && well1.hasLocalCells() < well2.hasLocalCells()); diff --git a/opm/simulators/wells/ParallelWellInfo.hpp b/opm/simulators/wells/ParallelWellInfo.hpp index 497bb095c..981fd4aa8 100644 --- a/opm/simulators/wells/ParallelWellInfo.hpp +++ b/opm/simulators/wells/ParallelWellInfo.hpp @@ -21,6 +21,8 @@ #define OPM_PARALLELWELLINFO_HEADER_INCLUDED #include #include +#include +#include #include @@ -29,6 +31,73 @@ namespace Opm { +/// \brief Class to facilitate getting values associated with the above perforation +/// +class CommunicateAbove +{ + enum Attribute { + owner=1, overlap=2 + }; + using MPIComm = typename Dune::MPIHelper::MPICommunicator; +#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7) + using Communication = Dune::Communication; +#else + using Communication = Dune::CollectiveCommunication; +#endif + using LocalIndex = Dune::ParallelLocalIndex; +#if HAVE_MPI + using IndexSet = Dune::ParallelIndexSet; + using RI = Dune::RemoteIndices; +#endif + +public: + CommunicateAbove(const Communication& comm); + /// \brief Adds information about the ecl indices of the perforations. + /// + /// \warning Theses indices need to be push in the same order as they + /// appear in the ECL well specifiation. Use -1 if there is + /// no perforation above. + /// \param above The ECL index of the next open perforation above. + /// \param current The ECL index of the current open perforation. + void pushBackEclIndex(int above, int current, bool owner=true); + + /// \brief Clear all the parallel information + void clear(); + + /// \brief Indicates that we will add the index information + /// \see pushBackEclIndex + void beginReset(); + + /// \brief Indicates that the index information is complete. + /// + /// Sets up the commmunication structures to be used by + /// communicate() + void endReset(); + + /// \brief Creates an array of values for the perforation above. + /// \param first_value Value to use for above of the first perforation + /// \param current C-array of the values at the perforations + /// \param size The size of the C-array and the returned vector + /// \return a vector containing the values for the perforation above. + std::vector communicate(double first_value, + const double* current, + std::size_t size); + +private: +#if HAVE_MPI + Communication comm_; + /// \brief Mapping of the local well index to ecl index + IndexSet current_indices_; + /// \brief Mapping of the above well index to ecl index + IndexSet above_indices_; + RI remote_indices_; + Dune::Interface interface_; + Dune::BufferedCommunicator communicator_; +#endif + std::size_t count_{}; +}; + + /// \brief Class encapsulating some information about parallel wells /// /// e.g. It provides a communicator for well information @@ -72,6 +141,29 @@ public: return res; } + /// \brief Creates an array of values for the perforation above. + /// \param first_value Value to use for above of the first perforation + /// \param current C-array of the values at the perforations + /// \param size The size of the C-array and the returned vector + /// \return a vector containing the values for the perforation above. + std::vector communicateAboveValues(double zero_value, + const double* current, + std::size_t size) const; + + /// \brief Creates an array of values for the perforation above. + /// \param first_value Value to use for above of the first perforation + /// \param current vector of current values + std::vector communicateAboveValues(double zero_value, + const std::vector& current) const; + /// \brief Adds information about the ecl indices of the perforations. + /// + /// \warning Theses indices need to be push in the same order as they + /// appear in the ECL well specifiation. Use -1 if there is + /// no perforation above. + /// \param above The ECL index of the next open perforation above. + /// \param current The ECL index of the current open perforation. + void pushBackEclIndex(int above, int current); + /// \brief Name of the well. const std::string& name() const { @@ -88,6 +180,16 @@ public: return isOwner_; } + /// \brief Inidicate that we will reset the ecl index information + /// + /// \see pushBackEclIndex; + void beginReset(); + + /// \brief Inidicate completion of reset of the ecl index information + void endReset(); + + /// \brief Free data of communication data structures. + void clearCommunicateAbove(); private: /// \brief Deleter that also frees custom MPI communicators @@ -109,6 +211,9 @@ private: /// /// Contains only ranks where this well will perforate local cells. std::unique_ptr comm_; + + /// \brief used to communicate the values for the perforation above. + std::unique_ptr commAbove_; }; /// \brief Class checking that all connections are on active cells diff --git a/tests/test_parallelwellinfo.cpp b/tests/test_parallelwellinfo.cpp index 04c6834e3..f2c3843c8 100644 --- a/tests/test_parallelwellinfo.cpp +++ b/tests/test_parallelwellinfo.cpp @@ -152,3 +152,127 @@ BOOST_AUTO_TEST_CASE(ParallelWellComparison) #endif } + +BOOST_AUTO_TEST_CASE(CommunicateAboveSelf) +{ + auto comm = Dune::MPIHelper::getLocalCommunicator(); + Opm::CommunicateAbove commAbove{ comm }; + for(std::size_t count=0; count < 2; ++count) + { + std::vector eclIndex = {0, 1, 2, 3, 7 , 8, 10, 11}; + std::vector current(eclIndex.size()); + std::transform(eclIndex.begin(), eclIndex.end(), current.begin(), + [](double v){ return 1+10.0*v;}); + commAbove.beginReset(); + for (std::size_t i = 0; i < current.size(); ++i) + { + if (i==0) + commAbove.pushBackEclIndex(-1, eclIndex[i]); + else + commAbove.pushBackEclIndex(eclIndex[i-1], eclIndex[i]); + } + commAbove.endReset(); + auto above = commAbove.communicate(-10.0, current.data(), current.size()); + BOOST_TEST(above[0]==-10.0); + BOOST_TEST(above.size() == current.size()); + auto a = above.begin()+1; + std::for_each(current.begin(), current.begin() + (current.size()-1), + [&a](double v){ BOOST_TEST(*(a++) == v);}); + } +} + + +BOOST_AUTO_TEST_CASE(CommunicateAboveSelf1) +{ + auto comm = Dune::MPIHelper::getLocalCommunicator(); + Opm::CommunicateAbove commAbove{ comm }; + for(std::size_t count=0; count < 2; ++count) + { + std::vector eclIndex = {0}; + std::vector current(eclIndex.size()); + std::transform(eclIndex.begin(), eclIndex.end(), current.begin(), + [](double v){ return 1+10.0*v;}); + commAbove.beginReset(); + for (std::size_t i = 0; i < current.size(); ++i) + { + if (i==0) + commAbove.pushBackEclIndex(-1, eclIndex[i]); + else + commAbove.pushBackEclIndex(eclIndex[i-1], eclIndex[i]); + } + commAbove.endReset(); + auto above = commAbove.communicate(-10.0, current.data(), current.size()); + BOOST_TEST(above[0]==-10.0); + BOOST_TEST(above.size() == current.size()); + auto a = above.begin()+1; + std::for_each(current.begin(), current.begin() + (current.size()-1), + [&a](double v){ BOOST_TEST(*(a++) == v);}); + } +} + +BOOST_AUTO_TEST_CASE(CommunicateAboveParalle) +{ + using MPIComm = typename Dune::MPIHelper::MPICommunicator; +#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7) + using Communication = Dune::Communication; +#else + using Communication = Dune::CollectiveCommunication; +#endif + auto comm = Communication(Dune::MPIHelper::getCommunicator()); + + Opm::CommunicateAbove commAbove{ comm }; + for(std::size_t count=0; count < 2; ++count) + { + std::vector globalEclIndex = {0, 1, 2, 3, 7 , 8, 10, 11}; + auto oldSize = globalEclIndex.size(); + std::size_t globalSize = 3 * comm.size(); + auto lastIndex = globalEclIndex.back(); + globalEclIndex.resize(globalSize); + if ( globalSize > oldSize) + { + ++lastIndex; + for(auto entry = globalEclIndex.begin() + oldSize; + entry != globalEclIndex.end(); ++entry, ++lastIndex) + { + *entry = lastIndex; + } + } + + std::vector globalCurrent(globalEclIndex.size()); + std::transform(globalEclIndex.begin(), globalEclIndex.end(), globalCurrent.begin(), + [](double v){ return 1+10.0*v;}); + + std::vector current(3); + + commAbove.beginReset(); + for (std::size_t i = 0; i < current.size(); ++i) + { + auto gi = comm.rank() + comm.size() * i; + + if (gi==0) + { + commAbove.pushBackEclIndex(-1, globalEclIndex[gi]); + } + else + { + commAbove.pushBackEclIndex(globalEclIndex[gi-1], globalEclIndex[gi]); + } + current[i] = globalCurrent[gi]; + } + commAbove.endReset(); + auto above = commAbove.communicate(-10.0, current.data(), current.size()); + if (comm.rank() == 0) + BOOST_TEST(above[0]==-10.0); + + BOOST_TEST(above.size() == current.size()); + + for (std::size_t i = 0; i < current.size(); ++i) + { + auto gi = comm.rank() + comm.size() * i; + if (gi > 0) + { + BOOST_TEST(above[i]==globalCurrent[gi-1]); + } + } + } +} From c5dd4f4533d4b2895d6710eca61bc1ce57799a8c Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Thu, 3 Dec 2020 15:12:35 +0100 Subject: [PATCH 07/13] Make CommunicateAbove constructor explicit. --- opm/simulators/wells/ParallelWellInfo.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opm/simulators/wells/ParallelWellInfo.hpp b/opm/simulators/wells/ParallelWellInfo.hpp index 981fd4aa8..f71703e97 100644 --- a/opm/simulators/wells/ParallelWellInfo.hpp +++ b/opm/simulators/wells/ParallelWellInfo.hpp @@ -51,7 +51,7 @@ class CommunicateAbove #endif public: - CommunicateAbove(const Communication& comm); + explicit CommunicateAbove(const Communication& comm); /// \brief Adds information about the ecl indices of the perforations. /// /// \warning Theses indices need to be push in the same order as they From 575120d4f02ef632a780da516260147de901a8be Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Thu, 3 Dec 2020 15:15:03 +0100 Subject: [PATCH 08/13] Improce documentation a bit. --- opm/simulators/wells/ParallelWellInfo.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/opm/simulators/wells/ParallelWellInfo.hpp b/opm/simulators/wells/ParallelWellInfo.hpp index f71703e97..e21f2f381 100644 --- a/opm/simulators/wells/ParallelWellInfo.hpp +++ b/opm/simulators/wells/ParallelWellInfo.hpp @@ -52,7 +52,7 @@ class CommunicateAbove public: explicit CommunicateAbove(const Communication& comm); - /// \brief Adds information about the ecl indices of the perforations. + /// \brief Adds information about original index of the perforations in ECL Schedule. /// /// \warning Theses indices need to be push in the same order as they /// appear in the ECL well specifiation. Use -1 if there is @@ -130,7 +130,7 @@ public: return *comm_; } - /// \brief Collectively decide which ran has first perforation. + /// \brief Collectively decide which rank has first perforation. void communicateFirstPerforation(bool hasFirst); template From 9cada4a5a5b2ad0d6faa6184bca6569f0cde9d5a Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Thu, 3 Dec 2020 15:29:12 +0100 Subject: [PATCH 09/13] Fix indentation --- opm/simulators/wells/WellHelpers.hpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/opm/simulators/wells/WellHelpers.hpp b/opm/simulators/wells/WellHelpers.hpp index 53d15793f..42513f592 100644 --- a/opm/simulators/wells/WellHelpers.hpp +++ b/opm/simulators/wells/WellHelpers.hpp @@ -65,7 +65,7 @@ namespace Opm { template void mv (const X& x, Y& y) const { - #if !defined(NDEBUG) && HAVE_MPI +#if !defined(NDEBUG) && HAVE_MPI // We need to make sure that all ranks are actually computing // for the same well. Doing this by checking the name of the well. int cstring_size = pinfo_->name().size()+1; @@ -132,9 +132,9 @@ namespace Opm { } else { - Y temp(y); - mv(x, temp); // includes parallel reduction - y -= temp; + Y temp(y); + mv(x, temp); // includes parallel reduction + y -= temp; } } private: From 9c176289955ef8bcc62646175f348c38d46b0516 Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Thu, 3 Dec 2020 15:34:48 +0100 Subject: [PATCH 10/13] Renamed pinfo to parallel_well_info --- opm/simulators/wells/WellHelpers.hpp | 30 ++++++++++++++-------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/opm/simulators/wells/WellHelpers.hpp b/opm/simulators/wells/WellHelpers.hpp index 42513f592..71b6795d7 100644 --- a/opm/simulators/wells/WellHelpers.hpp +++ b/opm/simulators/wells/WellHelpers.hpp @@ -57,8 +57,8 @@ namespace Opm { using Block = Dune::DynamicMatrix; using Matrix = Dune::BCRSMatrix; - ParallelStandardWellB(const Matrix& B, const ParallelWellInfo& pinfo) - : B_(&B), pinfo_(&pinfo) + ParallelStandardWellB(const Matrix& B, const ParallelWellInfo& parallel_well_info) + : B_(&B), parallel_well_info_(¶llel_well_info) {} //! y = A x @@ -68,27 +68,27 @@ namespace Opm { #if !defined(NDEBUG) && HAVE_MPI // We need to make sure that all ranks are actually computing // for the same well. Doing this by checking the name of the well. - int cstring_size = pinfo_->name().size()+1; - std::vector sizes(pinfo_->communication().size()); - pinfo_->communication().allgather(&cstring_size, 1, sizes.data()); + int cstring_size = parallel_well_info_->name().size()+1; + std::vector sizes(parallel_well_info_->communication().size()); + parallel_well_info_->communication().allgather(&cstring_size, 1, sizes.data()); std::vector offsets(sizes.size()+1, 0); //last entry will be accumulated size std::partial_sum(sizes.begin(), sizes.end(), offsets.begin() + 1); std::vector cstrings(offsets[sizes.size()]); bool consistentWells = true; - char* send = const_cast(pinfo_->name().c_str()); - pinfo_->communication().allgatherv(send, cstring_size, + char* send = const_cast(parallel_well_info_->name().c_str()); + parallel_well_info_->communication().allgatherv(send, cstring_size, cstrings.data(), sizes.data(), offsets.data()); for(std::size_t i = 0; i < sizes.size(); ++i) { std::string name(cstrings.data()+offsets[i]); - if (name != pinfo_->name()) + if (name != parallel_well_info_->name()) { - if (pinfo_->communication().rank() == 0) + if (parallel_well_info_->communication().rank() == 0) { //only one process per well logs, might not be 0 of MPI_COMM_WORLD, though std::string msg = std::string("Fatal Error: Not all ranks are computing for the same well") - + " well should be " + pinfo_->name() + " but is " + + " well should be " + parallel_well_info_->name() + " but is " + name; OpmLog::debug(msg); } @@ -96,7 +96,7 @@ namespace Opm { break; } } - pinfo_->communication().barrier(); + parallel_well_info_->communication().barrier(); // As not all processes are involved here we need to use MPI_Abort and hope MPI kills them all if (!consistentWells) { @@ -105,7 +105,7 @@ namespace Opm { #endif B_->mv(x, y); - if (this->pinfo_->communication().size() > 1) + if (this->parallel_well_info_->communication().size() > 1) { // Only do communication if we must. // The B matrix is basically a component-wise multiplication @@ -114,7 +114,7 @@ namespace Opm { // broadcast when applying C^T. using YField = typename Y::block_type::value_type; assert(y.size() == 1); - this->pinfo_->communication().allreduce>(y[0].container().data(), + this->parallel_well_info_->communication().allreduce>(y[0].container().data(), y[0].container().size()); } } @@ -123,7 +123,7 @@ namespace Opm { template void mmv (const X& x, Y& y) const { - if (this->pinfo_->communication().size() == 1) + if (this->parallel_well_info_->communication().size() == 1) { // Do the same thing as before. The else branch // produces different rounding errors and results @@ -139,7 +139,7 @@ namespace Opm { } private: const Matrix* B_; - const ParallelWellInfo* pinfo_; + const ParallelWellInfo* parallel_well_info_; }; inline From eb03712027fd2c50cc17bbda487fe9f50afea78c Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Fri, 4 Dec 2020 10:06:14 +0100 Subject: [PATCH 11/13] Factored out creation of parallel well infos from getLocalNonshutWells. --- opm/simulators/wells/BlackoilWellModel.hpp | 7 ++++++- .../wells/BlackoilWellModel_impl.hpp | 20 +++++++++++++------ 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/opm/simulators/wells/BlackoilWellModel.hpp b/opm/simulators/wells/BlackoilWellModel.hpp index b28cdc371..e26481281 100644 --- a/opm/simulators/wells/BlackoilWellModel.hpp +++ b/opm/simulators/wells/BlackoilWellModel.hpp @@ -361,7 +361,12 @@ namespace Opm { /// \param timeStepIdx The index of the time step. /// \param[out] globalNumWells the number of wells globally. std::vector< Well > getLocalNonshutWells(const int timeStepIdx, - int& globalNumWells); + int& globalNumWells) const; + + /// \brief Create the parallel well information + /// \param localWells The local wells from ECL schedule + std::vector< ParallelWellInfo* > + createLocalParallelWellInfo(const std::vector& localWells); // compute the well fluxes and assemble them in to the reservoir equations as source terms // and in the well equations. diff --git a/opm/simulators/wells/BlackoilWellModel_impl.hpp b/opm/simulators/wells/BlackoilWellModel_impl.hpp index 7d82c690a..97549f646 100644 --- a/opm/simulators/wells/BlackoilWellModel_impl.hpp +++ b/opm/simulators/wells/BlackoilWellModel_impl.hpp @@ -217,14 +217,21 @@ namespace Opm { template std::vector< Well > BlackoilWellModel:: - getLocalNonshutWells(const int timeStepIdx, int& globalNumWells) + getLocalNonshutWells(const int timeStepIdx, int& globalNumWells) const { auto w = schedule().getWells(timeStepIdx); globalNumWells = w.size(); w.erase(std::remove_if(w.begin(), w.end(), is_shut_or_defunct_), w.end()); - local_parallel_well_info_.clear(); - local_parallel_well_info_.reserve(w.size()); - for (const auto& well : w) + return w; + } + + template + std::vector< ParallelWellInfo* > + BlackoilWellModel::createLocalParallelWellInfo(const std::vector& wells) + { + std::vector< ParallelWellInfo* > local_parallel_well_info; + local_parallel_well_info.reserve(wells.size()); + for (const auto& well : wells) { auto wellPair = std::make_pair(well.name(), true); auto pwell = std::lower_bound(parallel_well_info_.begin(), @@ -232,9 +239,9 @@ namespace Opm { wellPair); assert(pwell != parallel_well_info_.end() && *pwell == wellPair); - local_parallel_well_info_.push_back(&(*pwell)); + local_parallel_well_info.push_back(&(*pwell)); } - return w; + return local_parallel_well_info; } template @@ -250,6 +257,7 @@ namespace Opm { int globalNumWells = 0; // Make wells_ecl_ contain only this partition's non-shut wells. wells_ecl_ = getLocalNonshutWells(timeStepIdx, globalNumWells); + local_parallel_well_info_ = createLocalParallelWellInfo(wells_ecl_); this->initializeWellProdIndCalculators(); initializeWellPerfData(); From 2e5b1c8d548b688e12470fc5901bc1be3661ed74 Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Fri, 4 Dec 2020 12:55:27 +0100 Subject: [PATCH 12/13] Use reference for WellInterface::parallel_well_info_ --- opm/simulators/wells/StandardWell_impl.hpp | 2 +- opm/simulators/wells/WellInterface.hpp | 2 +- opm/simulators/wells/WellInterface_impl.hpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/opm/simulators/wells/StandardWell_impl.hpp b/opm/simulators/wells/StandardWell_impl.hpp index cfb80443c..0d81e9546 100644 --- a/opm/simulators/wells/StandardWell_impl.hpp +++ b/opm/simulators/wells/StandardWell_impl.hpp @@ -662,7 +662,7 @@ namespace Opm // accumulate resWell_ and invDuneD_ in parallel to get effects of all perforations (might be distributed) wellhelpers::sumDistributedWellEntries(invDuneD_[0][0], resWell_[0], - this->parallel_well_info_->communication()); + this->parallel_well_info_.communication()); // add vol * dF/dt + Q to the well equations; for (int componentIdx = 0; componentIdx < numWellConservationEq; ++componentIdx) { // TODO: following the development in MSW, we need to convert the volume of the wellbore to be surface volume diff --git a/opm/simulators/wells/WellInterface.hpp b/opm/simulators/wells/WellInterface.hpp index b560a571f..a3bff7353 100644 --- a/opm/simulators/wells/WellInterface.hpp +++ b/opm/simulators/wells/WellInterface.hpp @@ -319,7 +319,7 @@ namespace Opm Well well_ecl_; - const ParallelWellInfo* parallel_well_info_; + const ParallelWellInfo& parallel_well_info_; const int current_step_; diff --git a/opm/simulators/wells/WellInterface_impl.hpp b/opm/simulators/wells/WellInterface_impl.hpp index ce4343d5e..f820ee89d 100644 --- a/opm/simulators/wells/WellInterface_impl.hpp +++ b/opm/simulators/wells/WellInterface_impl.hpp @@ -41,7 +41,7 @@ namespace Opm const int first_perf_index, const std::vector& perf_data) : well_ecl_(well) - , parallel_well_info_(&pw_info) + , parallel_well_info_(pw_info) , current_step_(time_step) , param_(param) , rateConverter_(rate_converter) From 10fd57f37f86de94aec3cc7befd03bbc804313e0 Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Wed, 2 Dec 2020 14:35:10 +0100 Subject: [PATCH 13/13] BlackoilWellModel: Skip capturing this and simulator in lambda Instead create local variables and capture them. --- opm/simulators/wells/BlackoilWellModel_impl.hpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/opm/simulators/wells/BlackoilWellModel_impl.hpp b/opm/simulators/wells/BlackoilWellModel_impl.hpp index 97549f646..01ac6b2f0 100644 --- a/opm/simulators/wells/BlackoilWellModel_impl.hpp +++ b/opm/simulators/wells/BlackoilWellModel_impl.hpp @@ -54,16 +54,18 @@ namespace Opm { cartDims[0]*cartDims[1]*cartDims[2]); auto& parallel_wells = ebosSimulator.vanguard().parallelWells(); parallel_well_info_.assign(parallel_wells.begin(), parallel_wells.end()); - is_shut_or_defunct_ = [this, &ebosSimulator](const Well& well) { + const auto& pwell_info = parallel_well_info_; + std::size_t numProcs = ebosSimulator.gridView().comm().size(); + is_shut_or_defunct_ = [&pwell_info, numProcs](const Well& well) { if (well.getStatus() == Well::Status::SHUT) return true; - if (ebosSimulator.gridView().comm().size() == 1) + if (numProcs == 1u) return false; std::pair value{well.name(), true}; // false indicate not active! - auto candidate = std::lower_bound(parallel_well_info_.begin(), - parallel_well_info_.end(), + auto candidate = std::lower_bound(pwell_info.begin(), + pwell_info.end(), value); - return candidate == parallel_well_info_.end() || *candidate != value; + return candidate == pwell_info.end() || *candidate != value; }; alternative_well_rate_init_ = EWOMS_GET_PARAM(TypeTag, bool, AlternativeWellRateInit);