mirror of
https://github.com/OPM/opm-simulators.git
synced 2025-02-25 18:55:30 -06:00
BlackoilDetails: split it in two
now we have BlackoilDetails.hpp which contains all stuff that is used by flow_ebos as well as flow and which does not include anything from Eigen, and we have BlackoilLegacyDetails.hpp which contains all stuff that depends on Eigen (and is thus not required by flow_ebos)
This commit is contained in:
@@ -26,9 +26,6 @@
|
||||
|
||||
#include <opm/core/linalg/ParallelIstlInformation.hpp>
|
||||
|
||||
#include <Eigen/Eigen>
|
||||
#include <Eigen/Sparse>
|
||||
|
||||
namespace Opm {
|
||||
namespace detail {
|
||||
|
||||
@@ -93,36 +90,6 @@ namespace detail {
|
||||
return grav;
|
||||
}
|
||||
|
||||
/// \brief Compute the L-infinity norm of a vector
|
||||
/// \warn This function is not suitable to compute on the well equations.
|
||||
/// \param a The container to compute the infinity norm on.
|
||||
/// It has to have one entry for each cell.
|
||||
/// \param info In a parallel this holds the information about the data distribution.
|
||||
template <class ADB>
|
||||
inline
|
||||
double infinityNorm( const ADB& a, const boost::any& pinfo = boost::any() )
|
||||
{
|
||||
static_cast<void>(pinfo); // Suppress warning in non-MPI case.
|
||||
#if HAVE_MPI
|
||||
if ( pinfo.type() == typeid(ParallelISTLInformation) )
|
||||
{
|
||||
const ParallelISTLInformation& real_info =
|
||||
boost::any_cast<const ParallelISTLInformation&>(pinfo);
|
||||
double result=0;
|
||||
real_info.computeReduction(a.value(), Reduction::makeLInfinityNormFunctor<double>(), result);
|
||||
return result;
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
if( a.value().size() > 0 ) {
|
||||
return a.value().matrix().template lpNorm<Eigen::Infinity> ();
|
||||
}
|
||||
else { // this situation can occur when no wells are present
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// \brief Compute the Euclidian norm of a vector
|
||||
/// \warning In the case that num_components is greater than 1
|
||||
@@ -188,109 +155,6 @@ namespace detail {
|
||||
return product;
|
||||
}
|
||||
}
|
||||
/// \brief Compute the reduction within the convergence check.
|
||||
/// \param[in] B A matrix with MaxNumPhases columns and the same number rows
|
||||
/// as the number of cells of the grid. B.col(i) contains the values
|
||||
/// for phase i.
|
||||
/// \param[in] tempV A matrix with MaxNumPhases columns and the same number rows
|
||||
/// as the number of cells of the grid. tempV.col(i) contains the
|
||||
/// values
|
||||
/// for phase i.
|
||||
/// \param[in] R A matrix with MaxNumPhases columns and the same number rows
|
||||
/// as the number of cells of the grid. B.col(i) contains the values
|
||||
/// for phase i.
|
||||
/// \param[out] R_sum An array of size MaxNumPhases where entry i contains the sum
|
||||
/// of R for the phase i.
|
||||
/// \param[out] maxCoeff An array of size MaxNumPhases where entry i contains the
|
||||
/// maximum of tempV for the phase i.
|
||||
/// \param[out] B_avg An array of size MaxNumPhases where entry i contains the average
|
||||
/// of B for the phase i.
|
||||
/// \param[out] maxNormWell The maximum of the well flux equations for each phase.
|
||||
/// \param[in] nc The number of cells of the local grid.
|
||||
/// \return The total pore volume over all cells.
|
||||
inline
|
||||
double
|
||||
convergenceReduction(const Eigen::Array<double, Eigen::Dynamic, Eigen::Dynamic>& B,
|
||||
const Eigen::Array<double, Eigen::Dynamic, Eigen::Dynamic>& tempV,
|
||||
const Eigen::Array<double, Eigen::Dynamic, Eigen::Dynamic>& R,
|
||||
std::vector<double>& R_sum,
|
||||
std::vector<double>& maxCoeff,
|
||||
std::vector<double>& B_avg,
|
||||
std::vector<double>& maxNormWell,
|
||||
int nc,
|
||||
int np,
|
||||
const std::vector<double> pv,
|
||||
std::vector<double> residual_well)
|
||||
{
|
||||
const int nw = residual_well.size() / np;
|
||||
assert(nw * np == int(residual_well.size()));
|
||||
|
||||
// Do the global reductions
|
||||
#if 0 // HAVE_MPI
|
||||
if ( linsolver_.parallelInformation().type() == typeid(ParallelISTLInformation) )
|
||||
{
|
||||
const ParallelISTLInformation& info =
|
||||
boost::any_cast<const ParallelISTLInformation&>(linsolver_.parallelInformation());
|
||||
|
||||
// Compute the global number of cells and porevolume
|
||||
std::vector<int> v(nc, 1);
|
||||
auto nc_and_pv = std::tuple<int, double>(0, 0.0);
|
||||
auto nc_and_pv_operators = std::make_tuple(Opm::Reduction::makeGlobalSumFunctor<int>(),
|
||||
Opm::Reduction::makeGlobalSumFunctor<double>());
|
||||
auto nc_and_pv_containers = std::make_tuple(v, pv);
|
||||
info.computeReduction(nc_and_pv_containers, nc_and_pv_operators, nc_and_pv);
|
||||
|
||||
for ( int idx = 0; idx < np; ++idx )
|
||||
{
|
||||
auto values = std::tuple<double,double,double>(0.0 ,0.0 ,0.0);
|
||||
auto containers = std::make_tuple(B.col(idx),
|
||||
tempV.col(idx),
|
||||
R.col(idx));
|
||||
auto operators = std::make_tuple(Opm::Reduction::makeGlobalSumFunctor<double>(),
|
||||
Opm::Reduction::makeGlobalMaxFunctor<double>(),
|
||||
Opm::Reduction::makeGlobalSumFunctor<double>());
|
||||
info.computeReduction(containers, operators, values);
|
||||
B_avg[idx] = std::get<0>(values)/std::get<0>(nc_and_pv);
|
||||
maxCoeff[idx] = std::get<1>(values);
|
||||
R_sum[idx] = std::get<2>(values);
|
||||
assert(np >= np);
|
||||
if (idx < np) {
|
||||
maxNormWell[idx] = 0.0;
|
||||
for ( int w = 0; w < nw; ++w ) {
|
||||
maxNormWell[idx] = std::max(maxNormWell[idx], std::abs(residual_well[nw*idx + w]));
|
||||
}
|
||||
}
|
||||
}
|
||||
info.communicator().max(maxNormWell.data(), np);
|
||||
// Compute pore volume
|
||||
return std::get<1>(nc_and_pv);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
B_avg.resize(np);
|
||||
maxCoeff.resize(np);
|
||||
R_sum.resize(np);
|
||||
maxNormWell.resize(np);
|
||||
for ( int idx = 0; idx < np; ++idx )
|
||||
{
|
||||
B_avg[idx] = B.col(idx).sum()/nc;
|
||||
maxCoeff[idx] = tempV.col(idx).maxCoeff();
|
||||
R_sum[idx] = R.col(idx).sum();
|
||||
|
||||
assert(np >= np);
|
||||
if (idx < np) {
|
||||
maxNormWell[idx] = 0.0;
|
||||
for ( int w = 0; w < nw; ++w ) {
|
||||
maxNormWell[idx] = std::max(maxNormWell[idx], std::abs(residual_well[nw*idx + w]));
|
||||
}
|
||||
}
|
||||
}
|
||||
// Compute total pore volume
|
||||
return std::accumulate(pv.begin(), pv.end(), 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <class Scalar>
|
||||
inline
|
||||
@@ -334,29 +198,6 @@ namespace detail {
|
||||
return std::accumulate(pv.begin(), pv.end(), 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
/// \brief Compute the L-infinity norm of a vector representing a well equation.
|
||||
/// \param a The container to compute the infinity norm on.
|
||||
/// \param info In a parallel this holds the information about the data distribution.
|
||||
template <class ADB>
|
||||
inline
|
||||
double infinityNormWell( const ADB& a, const boost::any& pinfo )
|
||||
{
|
||||
static_cast<void>(pinfo); // Suppress warning in non-MPI case.
|
||||
double result=0;
|
||||
if( a.value().size() > 0 ) {
|
||||
result = a.value().matrix().template lpNorm<Eigen::Infinity> ();
|
||||
}
|
||||
#if HAVE_MPI
|
||||
if ( pinfo.type() == typeid(ParallelISTLInformation) )
|
||||
{
|
||||
const ParallelISTLInformation& real_info =
|
||||
boost::any_cast<const ParallelISTLInformation&>(pinfo);
|
||||
result = real_info.communicator().max(result);
|
||||
}
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
} // namespace detail
|
||||
} // namespace Opm
|
||||
|
||||
|
||||
Reference in New Issue
Block a user