Added a class with information and comunicator for parallel wells.

BlackoilWellModel now stores an instance of this class for each
well. Inside that class there is a custom communicator that only
contains ranks that will have local cells perforated by the well.
This will be used in the application of the distributed well operator.

This is another small step in the direction of distributed wells,
but it should be safe to merge this (note creation of the custom
communicators is a collective operation in MPI but done only once).
This commit is contained in:
Markus Blatt 2020-10-06 14:52:44 +02:00
parent 69948f8469
commit 3996967344
6 changed files with 401 additions and 6 deletions

View File

@ -40,6 +40,7 @@ list (APPEND MAIN_SOURCE_FILES
opm/simulators/utils/DeferredLogger.cpp
opm/simulators/utils/gatherDeferredLogger.cpp
opm/simulators/utils/ParallelRestart.cpp
opm/simulators/wells/ParallelWellInfo.cpp
opm/simulators/wells/VFPProdProperties.cpp
opm/simulators/wells/VFPInjProperties.cpp
opm/simulators/wells/WellGroupHelpers.cpp
@ -90,6 +91,7 @@ list (APPEND TEST_SOURCE_FILES
tests/test_norne_pvt.cpp
tests/test_wellprodindexcalculator.cpp
tests/test_wellstatefullyimplicitblackoil.cpp
tests/test_parallelwellinfo.cpp
)
if(MPI_FOUND)
@ -235,6 +237,7 @@ list (APPEND PUBLIC_HEADER_FILES
opm/simulators/wells/MSWellHelpers.hpp
opm/simulators/wells/BlackoilWellModel.hpp
opm/simulators/wells/BlackoilWellModel_impl.hpp
opm/simulators/wells/ParallelWellInfo.hpp
)
list (APPEND EXAMPLE_SOURCE_FILES

View File

@ -61,6 +61,7 @@
#include <opm/simulators/wells/MultisegmentWell.hpp>
#include <opm/simulators/wells/WellGroupHelpers.hpp>
#include <opm/simulators/wells/WellProdIndexCalculator.hpp>
#include <opm/simulators/wells/ParallelWellInfo.hpp>
#include <opm/simulators/timestepping/gatherConvergenceReport.hpp>
#include <dune/common/fmatrix.hh>
#include <dune/istl/bcrsmatrix.hh>
@ -278,6 +279,8 @@ namespace Opm {
std::vector< std::vector<PerforationData> > well_perf_data_;
std::vector< WellProdIndexCalculator > prod_index_calc_;
std::vector< ParallelWellInfo > parallel_well_info_;
bool wells_active_;
// a vector of all the wells.

View File

@ -52,17 +52,18 @@ namespace Opm {
const auto& cartDims = Opm::UgGridHelpers::cartDims(grid);
setupCartesianToCompressed_(Opm::UgGridHelpers::globalCell(grid),
cartDims[0]*cartDims[1]*cartDims[2]);
is_shut_or_defunct_ = [&ebosSimulator](const Well& well) {
auto& parallel_wells = ebosSimulator.vanguard().parallelWells();
parallel_well_info_.assign(parallel_wells.begin(), parallel_wells.end());
is_shut_or_defunct_ = [this, &ebosSimulator](const Well& well) {
if (well.getStatus() == Well::Status::SHUT)
return true;
if (ebosSimulator.gridView().comm().size() == 1)
return false;
std::pair<std::string, bool> value{well.name(), true}; // false indicate not active!
const auto& parallel_wells = ebosSimulator.vanguard().parallelWells();
auto candidate = std::lower_bound(parallel_wells.begin(), parallel_wells.end(),
value);
return candidate == parallel_wells.end() || *candidate != value;
auto candidate = std::lower_bound(parallel_well_info_.begin(),
parallel_well_info_.end(),
value);
return candidate == parallel_well_info_.end() || *candidate != value;
};
alternative_well_rate_init_ = EWOMS_GET_PARAM(TypeTag, bool, AlternativeWellRateInit);

View File

@ -0,0 +1,116 @@
/*
Copyright 2020 OPM-OP AS
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#include <opm/simulators/wells/ParallelWellInfo.hpp>
namespace Opm
{
void ParallelWellInfo::DestroyComm::operator()(Communication* comm)
{
#if HAVE_MPI
// Only delete custom communicators.
bool del = comm
&& (*comm != Dune::MPIHelper::getLocalCommunicator())
&& (*comm != MPI_COMM_WORLD && *comm != MPI_COMM_NULL);
if ( del )
{
// Not 100% nice but safe as comm is deleted anyway
// We can only access a copy and no reference.
MPI_Comm mpi_comm = *comm;
MPI_Comm_free(&mpi_comm);
}
#endif
delete comm;
}
ParallelWellInfo::ParallelWellInfo(const std::string& name,
bool hasLocalCells)
: name_(name), hasLocalCells_ (hasLocalCells),
isOwner_(true), comm_(new Communication(Dune::MPIHelper::getLocalCommunicator()))
{}
ParallelWellInfo::ParallelWellInfo(const std::pair<std::string,bool>& well_info,
Communication allComm)
: name_(well_info.first), hasLocalCells_(well_info.second)
{
#if HAVE_MPI
MPI_Comm newComm;
int color = hasLocalCells_ ? 1 : MPI_UNDEFINED;
MPI_Comm_split(allComm, color, allComm.rank(), &newComm);
comm_.reset(new Communication(newComm));
#else
comm_.reset(new Communication(Dune::MPIHelper::getLocalCommunicator()));
#endif
isOwner_ = (comm_->rank() == 0);
}
bool operator<(const ParallelWellInfo& well1, const ParallelWellInfo& well2)
{
return well1.name() < well2.name() || (! (well2.name() < well1.name()) && well1.hasLocalCells() < well2.hasLocalCells());
}
bool operator==(const ParallelWellInfo& well1, const ParallelWellInfo& well2)
{
bool ret = well1.name() == well2.name() && well1.hasLocalCells() == well2.hasLocalCells()
&& well1.isOwner() == well2.isOwner();
#if HAVE_MPI
using MPIComm = typename Dune::MPIHelper::MPICommunicator;
ret = ret &&
static_cast<MPIComm>(well1.communication()) == static_cast<MPIComm>(well2.communication());
#endif
return ret;
}
bool operator!=(const ParallelWellInfo& well1, const ParallelWellInfo& well2)
{
return ! (well1 == well2);
}
bool operator<(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well)
{
return pair.first < well.name() || ( !( well.name() < pair.first ) && pair.second < well.hasLocalCells() );
}
bool operator<( const ParallelWellInfo& well, const std::pair<std::string, bool>& pair)
{
return well.name() < pair.first || ( !( pair.first < well.name() ) && well.hasLocalCells() < pair.second );
}
bool operator==(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well)
{
return pair.first == well.name() && pair.second == well.hasLocalCells();
}
bool operator==(const ParallelWellInfo& well, const std::pair<std::string, bool>& pair)
{
return pair == well;
}
bool operator!=(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well)
{
return pair.first != well.name() || pair.second != well.hasLocalCells();
}
bool operator!=(const ParallelWellInfo& well, const std::pair<std::string, bool>& pair)
{
return pair != well;
}
} // end namespace Opm

View File

@ -0,0 +1,118 @@
/*
Copyright 2020 OPM-OP AS
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef OPM_PARALLELWELLINFO_HEADER_INCLUDED
#define OPM_PARALLELWELLINFO_HEADER_INCLUDED
#include <dune/common/version.hh>
#include <dune/common/parallel/mpihelper.hh>
#include <memory>
namespace Opm
{
/// \brief Class encapsulating some information about parallel wells
///
/// e.g. It provides a communicator for well information
class ParallelWellInfo
{
public:
using MPIComm = typename Dune::MPIHelper::MPICommunicator;
#if DUNE_VERSION_NEWER(DUNE_COMMON, 2, 7)
using Communication = Dune::Communication<MPIComm>;
#else
using Communication = Dune::CollectiveCommunication<MPIComm>;
#endif
/// \brief Constructs object using MPI_COMM_SELF
ParallelWellInfo(const std::string& name = {""},
bool hasLocalCells = true);
/// \brief Constructs object with communication between all rank sharing
/// a well
/// \param well_info Pair of well name and whether local cells might be perforated
/// on this rank
/// \param allComm The communication object with all MPI ranks active in the simulation.
/// Default is the one with all ranks available.
ParallelWellInfo(const std::pair<std::string,bool>& well_info,
Communication allComm = Communication());
const Communication& communication() const
{
return *comm_;
}
/// \brief Name of the well.
const std::string& name() const
{
return name_;
}
/// \brief Whether local cells are perforated somewhen
bool hasLocalCells() const
{
return hasLocalCells_;
}
bool isOwner() const
{
return isOwner_;
}
private:
/// \brief Deleter that also frees custom MPI communicators
struct DestroyComm
{
void operator()(Communication* comm);
};
/// \brief Name of the well.
std::string name_;
/// \brief Whether local cells are perforated somewhen
bool hasLocalCells_;
/// \brief Whether we own the well and should do reports etc.
bool isOwner_;
/// \brief Communication object for the well
///
/// Contains only ranks where this well will perforate local cells.
std::unique_ptr<Communication, DestroyComm> comm_;
};
bool operator<(const ParallelWellInfo& well1, const ParallelWellInfo& well2);
bool operator==(const ParallelWellInfo& well1, const ParallelWellInfo& well2);
bool operator!=(const ParallelWellInfo& well1, const ParallelWellInfo& well2);
bool operator<(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well);
bool operator<( const ParallelWellInfo& well, const std::pair<std::string, bool>& pair);
bool operator==(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well);
bool operator==(const ParallelWellInfo& well, const std::pair<std::string, bool>& pair);
bool operator!=(const std::pair<std::string, bool>& pair, const ParallelWellInfo& well);
bool operator!=(const ParallelWellInfo& well, const std::pair<std::string, bool>& pair);
} // end namespace Opm
#endif // OPM_PARALLELWELLINFO_HEADER_INCLUDED

View File

@ -0,0 +1,154 @@
/*
Copyright 2020 OPM-OP AS
Copyright 2015 Dr. Blatt - HPC-Simulation-Software & Services.
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#include<config.h>
#include<opm/simulators/wells/ParallelWellInfo.hpp>
#include<vector>
#include<string>
#include<tuple>
#include<ostream>
#define BOOST_TEST_MODULE ParallelWellInfo
#include <boost/test/unit_test.hpp>
class MPIError {
public:
/** @brief Constructor. */
MPIError(std::string s, int e) : errorstring(s), errorcode(e){}
/** @brief The error string. */
std::string errorstring;
/** @brief The mpi error code. */
int errorcode;
};
#ifdef HAVE_MPI
void MPI_err_handler(MPI_Comm *, int *err_code, ...){
char *err_string=new char[MPI_MAX_ERROR_STRING];
int err_length;
MPI_Error_string(*err_code, err_string, &err_length);
std::string s(err_string, err_length);
std::cerr << "An MPI Error ocurred:"<<std::endl<<s<<std::endl;
delete[] err_string;
throw MPIError(s, *err_code);
}
#endif
struct MPIFixture
{
MPIFixture()
{
#if HAVE_MPI
int m_argc = boost::unit_test::framework::master_test_suite().argc;
char** m_argv = boost::unit_test::framework::master_test_suite().argv;
helper = &Dune::MPIHelper::instance(m_argc, m_argv);
#ifdef MPI_2
MPI_Comm_create_errhandler(MPI_err_handler, &handler);
MPI_Comm_set_errhandler(MPI_COMM_WORLD, handler);
#else
MPI_Errhandler_create(MPI_err_handler, &handler);
MPI_Errhandler_set(MPI_COMM_WORLD, handler);
#endif
#endif
}
~MPIFixture()
{
#if HAVE_MPI
MPI_Finalize();
#endif
}
Dune::MPIHelper* helper;
#if HAVE_MPI
MPI_Errhandler handler;
#endif
};
BOOST_GLOBAL_FIXTURE(MPIFixture);
// Needed for BOOST_CHECK_EQUAL_COLLECTIONS
namespace std
{
std::ostream& operator<<(std::ostream& os, const std::pair<std::string, bool>& p)
{
return os << "{" << p.first << " "<< p.second << "}";
}
}
namespace Opm
{
std::ostream& operator<<(std::ostream& os, const Opm::ParallelWellInfo& w)
{
return os << "{" << w.name() << " "<< w.hasLocalCells() << " "<<
w.isOwner() << "}";
}
}
BOOST_AUTO_TEST_CASE(ParallelWellComparison)
{
int argc = 0;
char** argv = nullptr;
const auto& helper = Dune::MPIHelper::instance(argc, argv);
std::vector<std::pair<std::string,bool>> pairs;
if (helper.rank() == 0)
pairs = {{"Test1", true},{"Test2", true}, {"Test1", false} };
else
pairs = {{"Test1", false},{"Test2", true}, {"Test1", true} };
std::vector<Opm::ParallelWellInfo> well_info;
well_info.assign(pairs.begin(), pairs.end());
BOOST_CHECK_EQUAL_COLLECTIONS(pairs.begin(), pairs.end(),
well_info.begin(), well_info.end());
BOOST_CHECK_EQUAL_COLLECTIONS(well_info.begin(), well_info.end(),
pairs.begin(), pairs.end());
BOOST_TEST(well_info[0] < pairs[1]);
BOOST_TEST(pairs[0] != well_info[1]);
BOOST_TEST(pairs[0] < well_info[1]);
BOOST_TEST(well_info[0] == pairs[0]);
BOOST_TEST(well_info[0] != well_info[1]);
Opm::ParallelWellInfo well0, well1;
BOOST_TEST(well0 == well1);
#if HAVE_MPI
BOOST_TEST(well0.communication()==helper.getLocalCommunicator());
#endif
Opm::ParallelWellInfo well2("Test", false);
std::pair<std::string, bool> pwell={"Test", true};
BOOST_TEST(well2 < pwell);
Opm::ParallelWellInfo well3("Test", true);
BOOST_TEST(! (well3 < pwell));
pwell.second = false;
BOOST_TEST(! (well3 < pwell));
if (helper.rank() == 0)
BOOST_TEST(well_info[0].communication().size()==1);
#if HAVE_MPI
Opm::ParallelWellInfo::Communication comm{MPI_COMM_WORLD};
BOOST_TEST(well_info[1].communication().size() == comm.size());
if (helper.rank() > 0)
{
BOOST_TEST(well_info[2].communication().size() == comm.size()-1);
}
#endif
}