mirror of
https://github.com/OPM/opm-simulators.git
synced 2025-02-25 18:55:30 -06:00
Renamed pinfo to parallel_well_info
This commit is contained in:
parent
9cada4a5a5
commit
9c17628995
@ -57,8 +57,8 @@ namespace Opm {
|
||||
using Block = Dune::DynamicMatrix<Scalar>;
|
||||
using Matrix = Dune::BCRSMatrix<Block>;
|
||||
|
||||
ParallelStandardWellB(const Matrix& B, const ParallelWellInfo& pinfo)
|
||||
: B_(&B), pinfo_(&pinfo)
|
||||
ParallelStandardWellB(const Matrix& B, const ParallelWellInfo& parallel_well_info)
|
||||
: B_(&B), parallel_well_info_(¶llel_well_info)
|
||||
{}
|
||||
|
||||
//! y = A x
|
||||
@ -68,27 +68,27 @@ namespace Opm {
|
||||
#if !defined(NDEBUG) && HAVE_MPI
|
||||
// We need to make sure that all ranks are actually computing
|
||||
// for the same well. Doing this by checking the name of the well.
|
||||
int cstring_size = pinfo_->name().size()+1;
|
||||
std::vector<int> sizes(pinfo_->communication().size());
|
||||
pinfo_->communication().allgather(&cstring_size, 1, sizes.data());
|
||||
int cstring_size = parallel_well_info_->name().size()+1;
|
||||
std::vector<int> sizes(parallel_well_info_->communication().size());
|
||||
parallel_well_info_->communication().allgather(&cstring_size, 1, sizes.data());
|
||||
std::vector<int> offsets(sizes.size()+1, 0); //last entry will be accumulated size
|
||||
std::partial_sum(sizes.begin(), sizes.end(), offsets.begin() + 1);
|
||||
std::vector<char> cstrings(offsets[sizes.size()]);
|
||||
bool consistentWells = true;
|
||||
char* send = const_cast<char*>(pinfo_->name().c_str());
|
||||
pinfo_->communication().allgatherv(send, cstring_size,
|
||||
char* send = const_cast<char*>(parallel_well_info_->name().c_str());
|
||||
parallel_well_info_->communication().allgatherv(send, cstring_size,
|
||||
cstrings.data(), sizes.data(),
|
||||
offsets.data());
|
||||
for(std::size_t i = 0; i < sizes.size(); ++i)
|
||||
{
|
||||
std::string name(cstrings.data()+offsets[i]);
|
||||
if (name != pinfo_->name())
|
||||
if (name != parallel_well_info_->name())
|
||||
{
|
||||
if (pinfo_->communication().rank() == 0)
|
||||
if (parallel_well_info_->communication().rank() == 0)
|
||||
{
|
||||
//only one process per well logs, might not be 0 of MPI_COMM_WORLD, though
|
||||
std::string msg = std::string("Fatal Error: Not all ranks are computing for the same well")
|
||||
+ " well should be " + pinfo_->name() + " but is "
|
||||
+ " well should be " + parallel_well_info_->name() + " but is "
|
||||
+ name;
|
||||
OpmLog::debug(msg);
|
||||
}
|
||||
@ -96,7 +96,7 @@ namespace Opm {
|
||||
break;
|
||||
}
|
||||
}
|
||||
pinfo_->communication().barrier();
|
||||
parallel_well_info_->communication().barrier();
|
||||
// As not all processes are involved here we need to use MPI_Abort and hope MPI kills them all
|
||||
if (!consistentWells)
|
||||
{
|
||||
@ -105,7 +105,7 @@ namespace Opm {
|
||||
#endif
|
||||
B_->mv(x, y);
|
||||
|
||||
if (this->pinfo_->communication().size() > 1)
|
||||
if (this->parallel_well_info_->communication().size() > 1)
|
||||
{
|
||||
// Only do communication if we must.
|
||||
// The B matrix is basically a component-wise multiplication
|
||||
@ -114,7 +114,7 @@ namespace Opm {
|
||||
// broadcast when applying C^T.
|
||||
using YField = typename Y::block_type::value_type;
|
||||
assert(y.size() == 1);
|
||||
this->pinfo_->communication().allreduce<std::plus<YField>>(y[0].container().data(),
|
||||
this->parallel_well_info_->communication().allreduce<std::plus<YField>>(y[0].container().data(),
|
||||
y[0].container().size());
|
||||
}
|
||||
}
|
||||
@ -123,7 +123,7 @@ namespace Opm {
|
||||
template<class X, class Y>
|
||||
void mmv (const X& x, Y& y) const
|
||||
{
|
||||
if (this->pinfo_->communication().size() == 1)
|
||||
if (this->parallel_well_info_->communication().size() == 1)
|
||||
{
|
||||
// Do the same thing as before. The else branch
|
||||
// produces different rounding errors and results
|
||||
@ -139,7 +139,7 @@ namespace Opm {
|
||||
}
|
||||
private:
|
||||
const Matrix* B_;
|
||||
const ParallelWellInfo* pinfo_;
|
||||
const ParallelWellInfo* parallel_well_info_;
|
||||
};
|
||||
|
||||
inline
|
||||
|
Loading…
Reference in New Issue
Block a user