/*
Copyright 2016 SINTEF ICT, Applied Mathematics.
Copyright 2016 Statoil ASA.
Copyright 2020 OPM-OP AS.
This file is part of the Open Porous Media project (OPM).
OPM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OPM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OPM. If not, see .
*/
#include
#include
#include
#include
#include
#include
namespace Opm {
namespace wellhelpers {
template
ParallelStandardWellB::
ParallelStandardWellB(const Matrix& B, const ParallelWellInfo& parallel_well_info)
: B_(B), parallel_well_info_(parallel_well_info)
{}
template
template
void ParallelStandardWellB::
mv (const X& x, Y& y) const
{
#if !defined(NDEBUG) && HAVE_MPI
// We need to make sure that all ranks are actually computing
// for the same well. Doing this by checking the name of the well.
int cstring_size = parallel_well_info_.name().size()+1;
std::vector sizes(parallel_well_info_.communication().size());
parallel_well_info_.communication().allgather(&cstring_size, 1, sizes.data());
std::vector offsets(sizes.size()+1, 0); //last entry will be accumulated size
std::partial_sum(sizes.begin(), sizes.end(), offsets.begin() + 1);
std::vector cstrings(offsets[sizes.size()]);
bool consistentWells = true;
char* send = const_cast(parallel_well_info_.name().c_str());
parallel_well_info_.communication().allgatherv(send, cstring_size,
cstrings.data(), sizes.data(),
offsets.data());
for(std::size_t i = 0; i < sizes.size(); ++i)
{
std::string name(cstrings.data()+offsets[i]);
if (name != parallel_well_info_.name())
{
if (parallel_well_info_.communication().rank() == 0)
{
//only one process per well logs, might not be 0 of MPI_COMM_WORLD, though
OpmLog::error(fmt::format("Not all ranks are computing for the same well,"
" should be {} but is {},", parallel_well_info_.name(), name));
}
consistentWells = false;
break;
}
}
parallel_well_info_.communication().barrier();
// As not all processes are involved here we need to use MPI_Abort and hope MPI kills them all
if (!consistentWells)
{
MPI_Abort(MPI_COMM_WORLD, 1);
}
#endif
B_.mv(x, y);
if (this->parallel_well_info_.communication().size() > 1)
{
// Only do communication if we must.
// The B matrix is basically a component-wise multiplication
// with a vector followed by a parallel reduction. We do that
// reduction to all ranks computing for the well to save the
// broadcast when applying C^T.
using YField = typename Y::block_type::value_type;
assert(y.size() == 1);
this->parallel_well_info_.communication().template allreduce>(y[0].container().data(),
y[0].container().size());
}
}
template
template
void ParallelStandardWellB::
mmv (const X& x, Y& y) const
{
if (this->parallel_well_info_.communication().size() == 1)
{
// Do the same thing as before. The else branch
// produces different rounding errors and results
// slightly different iteration counts / well curves
B_.mmv(x, y);
}
else
{
Y temp(y);
mv(x, temp); // includes parallel reduction
y -= temp;
}
}
double computeHydrostaticCorrection(const double well_ref_depth, const double vfp_ref_depth,
const double rho, const double gravity)
{
const double dh = vfp_ref_depth - well_ref_depth;
const double dp = rho * gravity * dh;
return dp;
}
template
void sumDistributedWellEntries(Dune::DynamicMatrix& mat,
Dune::DynamicVector& vec,
const Comm& comm)
{
// DynamicMatrix does not use one contiguous array for storing the data
// but a DynamicVector of DynamicVectors. Hence we need to copy the data
// to contiguous memory for MPI.
if (comm.size() == 1)
{
return;
}
std::vector allEntries;
allEntries.reserve(mat.N()*mat.M()+vec.size());
for(const auto& row: mat)
{
allEntries.insert(allEntries.end(), row.begin(), row.end());
}
allEntries.insert(allEntries.end(), vec.begin(), vec.end());
comm.sum(allEntries.data(), allEntries.size());
auto pos = allEntries.begin();
auto cols = mat.cols();
for(auto&& row: mat)
{
std::copy(pos, pos + cols, &(row[0]));
pos += cols;
}
assert(std::size_t(allEntries.end() - pos) == vec.size());
std::copy(pos, allEntries.end(), &(vec[0]));
}
template
DenseMatrix transposeDenseDynMatrix(const DenseMatrix& M)
{
DenseMatrix tmp{M.cols(), M.rows()};
for (size_t i = 0; i < M.rows(); ++i) {
for (size_t j = 0; j < M.cols(); ++j) {
tmp[j][i] = M[i][j];
}
}
return tmp;
}
template class ParallelStandardWellB;
template using Vec = Dune::BlockVector>;
using DynVec = Dune::BlockVector>;
#define INSTANCE(Dim) \
template void ParallelStandardWellB::mv,DynVec>(const Vec&,DynVec&) const; \
template void ParallelStandardWellB::mmv,DynVec>(const Vec&,DynVec&) const;
INSTANCE(1)
INSTANCE(2)
INSTANCE(3)
INSTANCE(4)
INSTANCE(5)
INSTANCE(6)
using Comm = Parallel::Communication;
template void sumDistributedWellEntries(Dune::DynamicMatrix& mat,
Dune::DynamicVector& vec,
const Comm& comm);
using DMatrix = Dune::DynamicMatrix;
template DMatrix transposeDenseDynMatrix(const DMatrix&);
} // namespace wellhelpers
} // namespace Opm