Renamed pinfo to parallel_well_info

This commit is contained in:
Markus Blatt
2020-12-03 15:34:48 +01:00
parent 9cada4a5a5
commit 9c17628995

View File

@@ -57,8 +57,8 @@ namespace Opm {
using Block = Dune::DynamicMatrix<Scalar>; using Block = Dune::DynamicMatrix<Scalar>;
using Matrix = Dune::BCRSMatrix<Block>; using Matrix = Dune::BCRSMatrix<Block>;
ParallelStandardWellB(const Matrix& B, const ParallelWellInfo& pinfo) ParallelStandardWellB(const Matrix& B, const ParallelWellInfo& parallel_well_info)
: B_(&B), pinfo_(&pinfo) : B_(&B), parallel_well_info_(&parallel_well_info)
{} {}
//! y = A x //! y = A x
@@ -68,27 +68,27 @@ namespace Opm {
#if !defined(NDEBUG) && HAVE_MPI #if !defined(NDEBUG) && HAVE_MPI
// We need to make sure that all ranks are actually computing // We need to make sure that all ranks are actually computing
// for the same well. Doing this by checking the name of the well. // for the same well. Doing this by checking the name of the well.
int cstring_size = pinfo_->name().size()+1; int cstring_size = parallel_well_info_->name().size()+1;
std::vector<int> sizes(pinfo_->communication().size()); std::vector<int> sizes(parallel_well_info_->communication().size());
pinfo_->communication().allgather(&cstring_size, 1, sizes.data()); parallel_well_info_->communication().allgather(&cstring_size, 1, sizes.data());
std::vector<int> offsets(sizes.size()+1, 0); //last entry will be accumulated size std::vector<int> offsets(sizes.size()+1, 0); //last entry will be accumulated size
std::partial_sum(sizes.begin(), sizes.end(), offsets.begin() + 1); std::partial_sum(sizes.begin(), sizes.end(), offsets.begin() + 1);
std::vector<char> cstrings(offsets[sizes.size()]); std::vector<char> cstrings(offsets[sizes.size()]);
bool consistentWells = true; bool consistentWells = true;
char* send = const_cast<char*>(pinfo_->name().c_str()); char* send = const_cast<char*>(parallel_well_info_->name().c_str());
pinfo_->communication().allgatherv(send, cstring_size, parallel_well_info_->communication().allgatherv(send, cstring_size,
cstrings.data(), sizes.data(), cstrings.data(), sizes.data(),
offsets.data()); offsets.data());
for(std::size_t i = 0; i < sizes.size(); ++i) for(std::size_t i = 0; i < sizes.size(); ++i)
{ {
std::string name(cstrings.data()+offsets[i]); std::string name(cstrings.data()+offsets[i]);
if (name != pinfo_->name()) if (name != parallel_well_info_->name())
{ {
if (pinfo_->communication().rank() == 0) if (parallel_well_info_->communication().rank() == 0)
{ {
//only one process per well logs, might not be 0 of MPI_COMM_WORLD, though //only one process per well logs, might not be 0 of MPI_COMM_WORLD, though
std::string msg = std::string("Fatal Error: Not all ranks are computing for the same well") std::string msg = std::string("Fatal Error: Not all ranks are computing for the same well")
+ " well should be " + pinfo_->name() + " but is " + " well should be " + parallel_well_info_->name() + " but is "
+ name; + name;
OpmLog::debug(msg); OpmLog::debug(msg);
} }
@@ -96,7 +96,7 @@ namespace Opm {
break; break;
} }
} }
pinfo_->communication().barrier(); parallel_well_info_->communication().barrier();
// As not all processes are involved here we need to use MPI_Abort and hope MPI kills them all // As not all processes are involved here we need to use MPI_Abort and hope MPI kills them all
if (!consistentWells) if (!consistentWells)
{ {
@@ -105,7 +105,7 @@ namespace Opm {
#endif #endif
B_->mv(x, y); B_->mv(x, y);
if (this->pinfo_->communication().size() > 1) if (this->parallel_well_info_->communication().size() > 1)
{ {
// Only do communication if we must. // Only do communication if we must.
// The B matrix is basically a component-wise multiplication // The B matrix is basically a component-wise multiplication
@@ -114,7 +114,7 @@ namespace Opm {
// broadcast when applying C^T. // broadcast when applying C^T.
using YField = typename Y::block_type::value_type; using YField = typename Y::block_type::value_type;
assert(y.size() == 1); assert(y.size() == 1);
this->pinfo_->communication().allreduce<std::plus<YField>>(y[0].container().data(), this->parallel_well_info_->communication().allreduce<std::plus<YField>>(y[0].container().data(),
y[0].container().size()); y[0].container().size());
} }
} }
@@ -123,7 +123,7 @@ namespace Opm {
template<class X, class Y> template<class X, class Y>
void mmv (const X& x, Y& y) const void mmv (const X& x, Y& y) const
{ {
if (this->pinfo_->communication().size() == 1) if (this->parallel_well_info_->communication().size() == 1)
{ {
// Do the same thing as before. The else branch // Do the same thing as before. The else branch
// produces different rounding errors and results // produces different rounding errors and results
@@ -139,7 +139,7 @@ namespace Opm {
} }
private: private:
const Matrix* B_; const Matrix* B_;
const ParallelWellInfo* pinfo_; const ParallelWellInfo* parallel_well_info_;
}; };
inline inline