From 39969673446cfcb924b6bc95ce6aa5d765d9a63b Mon Sep 17 00:00:00 2001 From: Markus Blatt Date: Tue, 6 Oct 2020 14:52:44 +0200 Subject: [PATCH] Added a class with information and comunicator for parallel wells. BlackoilWellModel now stores an instance of this class for each well. Inside that class there is a custom communicator that only contains ranks that will have local cells perforated by the well. This will be used in the application of the distributed well operator. This is another small step in the direction of distributed wells, but it should be safe to merge this (note creation of the custom communicators is a collective operation in MPI but done only once). --- CMakeLists_files.cmake | 3 + opm/simulators/wells/BlackoilWellModel.hpp | 3 + .../wells/BlackoilWellModel_impl.hpp | 13 +- opm/simulators/wells/ParallelWellInfo.cpp | 116 +++++++++++++ opm/simulators/wells/ParallelWellInfo.hpp | 118 ++++++++++++++ tests/test_parallelwellinfo.cpp | 154 ++++++++++++++++++ 6 files changed, 401 insertions(+), 6 deletions(-) create mode 100644 opm/simulators/wells/ParallelWellInfo.cpp create mode 100644 opm/simulators/wells/ParallelWellInfo.hpp create mode 100644 tests/test_parallelwellinfo.cpp diff --git a/CMakeLists_files.cmake b/CMakeLists_files.cmake index f9312ede4..28c7ca555 100644 --- a/CMakeLists_files.cmake +++ b/CMakeLists_files.cmake @@ -40,6 +40,7 @@ list (APPEND MAIN_SOURCE_FILES opm/simulators/utils/DeferredLogger.cpp opm/simulators/utils/gatherDeferredLogger.cpp opm/simulators/utils/ParallelRestart.cpp + opm/simulators/wells/ParallelWellInfo.cpp opm/simulators/wells/VFPProdProperties.cpp opm/simulators/wells/VFPInjProperties.cpp opm/simulators/wells/WellGroupHelpers.cpp @@ -90,6 +91,7 @@ list (APPEND TEST_SOURCE_FILES tests/test_norne_pvt.cpp tests/test_wellprodindexcalculator.cpp tests/test_wellstatefullyimplicitblackoil.cpp + tests/test_parallelwellinfo.cpp ) if(MPI_FOUND) @@ -235,6 +237,7 @@ list (APPEND PUBLIC_HEADER_FILES opm/simulators/wells/MSWellHelpers.hpp opm/simulators/wells/BlackoilWellModel.hpp opm/simulators/wells/BlackoilWellModel_impl.hpp + opm/simulators/wells/ParallelWellInfo.hpp ) list (APPEND EXAMPLE_SOURCE_FILES diff --git a/opm/simulators/wells/BlackoilWellModel.hpp b/opm/simulators/wells/BlackoilWellModel.hpp index 43b079a0c..1dab893fb 100644 --- a/opm/simulators/wells/BlackoilWellModel.hpp +++ b/opm/simulators/wells/BlackoilWellModel.hpp @@ -61,6 +61,7 @@ #include #include #include +#include #include #include #include @@ -278,6 +279,8 @@ namespace Opm { std::vector< std::vector > well_perf_data_; std::vector< WellProdIndexCalculator > prod_index_calc_; + std::vector< ParallelWellInfo > parallel_well_info_; + bool wells_active_; // a vector of all the wells. diff --git a/opm/simulators/wells/BlackoilWellModel_impl.hpp b/opm/simulators/wells/BlackoilWellModel_impl.hpp index 32cda46d1..0920a478d 100644 --- a/opm/simulators/wells/BlackoilWellModel_impl.hpp +++ b/opm/simulators/wells/BlackoilWellModel_impl.hpp @@ -52,17 +52,18 @@ namespace Opm { const auto& cartDims = Opm::UgGridHelpers::cartDims(grid); setupCartesianToCompressed_(Opm::UgGridHelpers::globalCell(grid), cartDims[0]*cartDims[1]*cartDims[2]); - - is_shut_or_defunct_ = [&ebosSimulator](const Well& well) { + auto& parallel_wells = ebosSimulator.vanguard().parallelWells(); + parallel_well_info_.assign(parallel_wells.begin(), parallel_wells.end()); + is_shut_or_defunct_ = [this, &ebosSimulator](const Well& well) { if (well.getStatus() == Well::Status::SHUT) return true; if (ebosSimulator.gridView().comm().size() == 1) return false; std::pair value{well.name(), true}; // false indicate not active! - const auto& parallel_wells = ebosSimulator.vanguard().parallelWells(); - auto candidate = std::lower_bound(parallel_wells.begin(), parallel_wells.end(), - value); - return candidate == parallel_wells.end() || *candidate != value; + auto candidate = std::lower_bound(parallel_well_info_.begin(), + parallel_well_info_.end(), + value); + return candidate == parallel_well_info_.end() || *candidate != value; }; alternative_well_rate_init_ = EWOMS_GET_PARAM(TypeTag, bool, AlternativeWellRateInit); diff --git a/opm/simulators/wells/ParallelWellInfo.cpp b/opm/simulators/wells/ParallelWellInfo.cpp new file mode 100644 index 000000000..9759c393a --- /dev/null +++ b/opm/simulators/wells/ParallelWellInfo.cpp @@ -0,0 +1,116 @@ +/* + Copyright 2020 OPM-OP AS + + This file is part of the Open Porous Media project (OPM). + + OPM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OPM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with OPM. If not, see . +*/ +#include +#include + +namespace Opm +{ + +void ParallelWellInfo::DestroyComm::operator()(Communication* comm) +{ +#if HAVE_MPI + // Only delete custom communicators. + bool del = comm + && (*comm != Dune::MPIHelper::getLocalCommunicator()) + && (*comm != MPI_COMM_WORLD && *comm != MPI_COMM_NULL); + + if ( del ) + { + // Not 100% nice but safe as comm is deleted anyway + // We can only access a copy and no reference. + MPI_Comm mpi_comm = *comm; + MPI_Comm_free(&mpi_comm); + } +#endif + delete comm; +} + +ParallelWellInfo::ParallelWellInfo(const std::string& name, + bool hasLocalCells) + : name_(name), hasLocalCells_ (hasLocalCells), + isOwner_(true), comm_(new Communication(Dune::MPIHelper::getLocalCommunicator())) + {} + +ParallelWellInfo::ParallelWellInfo(const std::pair& well_info, + Communication allComm) + : name_(well_info.first), hasLocalCells_(well_info.second) +{ +#if HAVE_MPI + MPI_Comm newComm; + int color = hasLocalCells_ ? 1 : MPI_UNDEFINED; + MPI_Comm_split(allComm, color, allComm.rank(), &newComm); + comm_.reset(new Communication(newComm)); +#else + comm_.reset(new Communication(Dune::MPIHelper::getLocalCommunicator())); +#endif + isOwner_ = (comm_->rank() == 0); +} + +bool operator<(const ParallelWellInfo& well1, const ParallelWellInfo& well2) +{ + return well1.name() < well2.name() || (! (well2.name() < well1.name()) && well1.hasLocalCells() < well2.hasLocalCells()); +} + +bool operator==(const ParallelWellInfo& well1, const ParallelWellInfo& well2) +{ + bool ret = well1.name() == well2.name() && well1.hasLocalCells() == well2.hasLocalCells() + && well1.isOwner() == well2.isOwner(); +#if HAVE_MPI + using MPIComm = typename Dune::MPIHelper::MPICommunicator; + ret = ret && + static_cast(well1.communication()) == static_cast(well2.communication()); +#endif + return ret; +} + +bool operator!=(const ParallelWellInfo& well1, const ParallelWellInfo& well2) +{ + return ! (well1 == well2); +} + +bool operator<(const std::pair& pair, const ParallelWellInfo& well) +{ + return pair.first < well.name() || ( !( well.name() < pair.first ) && pair.second < well.hasLocalCells() ); +} + +bool operator<( const ParallelWellInfo& well, const std::pair& pair) +{ + return well.name() < pair.first || ( !( pair.first < well.name() ) && well.hasLocalCells() < pair.second ); +} + +bool operator==(const std::pair& pair, const ParallelWellInfo& well) +{ + return pair.first == well.name() && pair.second == well.hasLocalCells(); +} + +bool operator==(const ParallelWellInfo& well, const std::pair& pair) +{ + return pair == well; +} + +bool operator!=(const std::pair& pair, const ParallelWellInfo& well) +{ + return pair.first != well.name() || pair.second != well.hasLocalCells(); +} + +bool operator!=(const ParallelWellInfo& well, const std::pair& pair) +{ + return pair != well; +} +} // end namespace Opm diff --git a/opm/simulators/wells/ParallelWellInfo.hpp b/opm/simulators/wells/ParallelWellInfo.hpp new file mode 100644 index 000000000..ca8990977 --- /dev/null +++ b/opm/simulators/wells/ParallelWellInfo.hpp @@ -0,0 +1,118 @@ +/* + Copyright 2020 OPM-OP AS + + This file is part of the Open Porous Media project (OPM). + + OPM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OPM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with OPM. If not, see . +*/ +#ifndef OPM_PARALLELWELLINFO_HEADER_INCLUDED +#define OPM_PARALLELWELLINFO_HEADER_INCLUDED + +#include +#include + +#include + +namespace Opm +{ + +/// \brief Class encapsulating some information about parallel wells +/// +/// e.g. It provides a communicator for well information +class ParallelWellInfo +{ +public: + using MPIComm = typename Dune::MPIHelper::MPICommunicator; +#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7) + using Communication = Dune::Communication; +#else + using Communication = Dune::CollectiveCommunication; +#endif + + + /// \brief Constructs object using MPI_COMM_SELF + ParallelWellInfo(const std::string& name = {""}, + bool hasLocalCells = true); + + /// \brief Constructs object with communication between all rank sharing + /// a well + /// \param well_info Pair of well name and whether local cells might be perforated + /// on this rank + /// \param allComm The communication object with all MPI ranks active in the simulation. + /// Default is the one with all ranks available. + ParallelWellInfo(const std::pair& well_info, + Communication allComm = Communication()); + + const Communication& communication() const + { + return *comm_; + } + + /// \brief Name of the well. + const std::string& name() const + { + return name_; + } + + /// \brief Whether local cells are perforated somewhen + bool hasLocalCells() const + { + return hasLocalCells_; + } + bool isOwner() const + { + return isOwner_; + } + +private: + + /// \brief Deleter that also frees custom MPI communicators + struct DestroyComm + { + void operator()(Communication* comm); + }; + + + /// \brief Name of the well. + std::string name_; + /// \brief Whether local cells are perforated somewhen + bool hasLocalCells_; + /// \brief Whether we own the well and should do reports etc. + bool isOwner_; + /// \brief Communication object for the well + /// + /// Contains only ranks where this well will perforate local cells. + std::unique_ptr comm_; +}; + +bool operator<(const ParallelWellInfo& well1, const ParallelWellInfo& well2); + +bool operator==(const ParallelWellInfo& well1, const ParallelWellInfo& well2); + +bool operator!=(const ParallelWellInfo& well1, const ParallelWellInfo& well2); + +bool operator<(const std::pair& pair, const ParallelWellInfo& well); + +bool operator<( const ParallelWellInfo& well, const std::pair& pair); + +bool operator==(const std::pair& pair, const ParallelWellInfo& well); + +bool operator==(const ParallelWellInfo& well, const std::pair& pair); + +bool operator!=(const std::pair& pair, const ParallelWellInfo& well); + +bool operator!=(const ParallelWellInfo& well, const std::pair& pair); + +} // end namespace Opm +#endif // OPM_PARALLELWELLINFO_HEADER_INCLUDED diff --git a/tests/test_parallelwellinfo.cpp b/tests/test_parallelwellinfo.cpp new file mode 100644 index 000000000..04c6834e3 --- /dev/null +++ b/tests/test_parallelwellinfo.cpp @@ -0,0 +1,154 @@ +/* + Copyright 2020 OPM-OP AS + Copyright 2015 Dr. Blatt - HPC-Simulation-Software & Services. + + This file is part of the Open Porous Media project (OPM). + + OPM is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + OPM is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with OPM. If not, see . +*/ +#include +#include +#include +#include +#include +#include + +#define BOOST_TEST_MODULE ParallelWellInfo +#include +class MPIError { +public: + /** @brief Constructor. */ + MPIError(std::string s, int e) : errorstring(s), errorcode(e){} + /** @brief The error string. */ + std::string errorstring; + /** @brief The mpi error code. */ + int errorcode; +}; + +#ifdef HAVE_MPI +void MPI_err_handler(MPI_Comm *, int *err_code, ...){ + char *err_string=new char[MPI_MAX_ERROR_STRING]; + int err_length; + MPI_Error_string(*err_code, err_string, &err_length); + std::string s(err_string, err_length); + std::cerr << "An MPI Error ocurred:"<& p) +{ + return os << "{" << p.first << " "<< p.second << "}"; +} +} +namespace Opm +{ +std::ostream& operator<<(std::ostream& os, const Opm::ParallelWellInfo& w) +{ + return os << "{" << w.name() << " "<< w.hasLocalCells() << " "<< + w.isOwner() << "}"; +} +} + +BOOST_AUTO_TEST_CASE(ParallelWellComparison) +{ + int argc = 0; + char** argv = nullptr; + const auto& helper = Dune::MPIHelper::instance(argc, argv); + std::vector> pairs; + if (helper.rank() == 0) + pairs = {{"Test1", true},{"Test2", true}, {"Test1", false} }; + else + pairs = {{"Test1", false},{"Test2", true}, {"Test1", true} }; + + std::vector well_info; + well_info.assign(pairs.begin(), pairs.end()); + + BOOST_CHECK_EQUAL_COLLECTIONS(pairs.begin(), pairs.end(), + well_info.begin(), well_info.end()); + + BOOST_CHECK_EQUAL_COLLECTIONS(well_info.begin(), well_info.end(), + pairs.begin(), pairs.end()); + + BOOST_TEST(well_info[0] < pairs[1]); + BOOST_TEST(pairs[0] != well_info[1]); + BOOST_TEST(pairs[0] < well_info[1]); + BOOST_TEST(well_info[0] == pairs[0]); + + BOOST_TEST(well_info[0] != well_info[1]); + + Opm::ParallelWellInfo well0, well1; + + BOOST_TEST(well0 == well1); +#if HAVE_MPI + BOOST_TEST(well0.communication()==helper.getLocalCommunicator()); +#endif + Opm::ParallelWellInfo well2("Test", false); + std::pair pwell={"Test", true}; + BOOST_TEST(well2 < pwell); + Opm::ParallelWellInfo well3("Test", true); + BOOST_TEST(! (well3 < pwell)); + pwell.second = false; + BOOST_TEST(! (well3 < pwell)); + + if (helper.rank() == 0) + BOOST_TEST(well_info[0].communication().size()==1); + +#if HAVE_MPI + Opm::ParallelWellInfo::Communication comm{MPI_COMM_WORLD}; + + BOOST_TEST(well_info[1].communication().size() == comm.size()); + + if (helper.rank() > 0) + { + BOOST_TEST(well_info[2].communication().size() == comm.size()-1); + } +#endif + +}