mirror of
https://github.com/OPM/opm-simulators.git
synced 2025-02-25 18:55:30 -06:00
Correctly compute the scaling factor for the equations in parallel.
When running parallel one cannot use Eigen::Array::mean() for this as the it is just a local part of the complete array. With this commit we correctly compute the number of global cells in the grid and use this together with a parallel reduction to compute a global mean value.
This commit is contained in:
@@ -280,6 +280,8 @@ namespace Opm {
|
|||||||
|
|
||||||
/// \brief Whether we print something to std::cout
|
/// \brief Whether we print something to std::cout
|
||||||
bool terminal_output_;
|
bool terminal_output_;
|
||||||
|
/// \brief The number of cells of the global grid.
|
||||||
|
int global_nc_;
|
||||||
|
|
||||||
std::vector<int> primalVariable_;
|
std::vector<int> primalVariable_;
|
||||||
V pvdt_;
|
V pvdt_;
|
||||||
|
|||||||
@@ -194,13 +194,16 @@ namespace detail {
|
|||||||
int local_number_of_wells = wells_ ? wells_->number_of_wells : 0;
|
int local_number_of_wells = wells_ ? wells_->number_of_wells : 0;
|
||||||
int global_number_of_wells = info.communicator().sum(local_number_of_wells);
|
int global_number_of_wells = info.communicator().sum(local_number_of_wells);
|
||||||
wells_active_ = ( wells_ && global_number_of_wells > 0 );
|
wells_active_ = ( wells_ && global_number_of_wells > 0 );
|
||||||
|
// Compute the global number of cells
|
||||||
|
std::vector<int> v( Opm::AutoDiffGrid::numCells(grid_), 1);
|
||||||
|
global_nc_ = 0;
|
||||||
|
info.computeReduction(v, Opm::Reduction::makeGlobalSumFunctor<int>(), global_nc_);
|
||||||
}else
|
}else
|
||||||
|
#endif
|
||||||
{
|
{
|
||||||
wells_active_ = ( wells_ && wells_->number_of_wells > 0 );
|
wells_active_ = ( wells_ && wells_->number_of_wells > 0 );
|
||||||
|
global_nc_ = Opm::AutoDiffGrid::numCells(grid_);
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
wells_active_ = ( wells_ && wells_->number_of_wells > 0 );
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -969,7 +972,20 @@ namespace detail {
|
|||||||
const int pos = pu.phase_pos[idx];
|
const int pos = pu.phase_pos[idx];
|
||||||
const ADB& temp_b = rq_[pos].b;
|
const ADB& temp_b = rq_[pos].b;
|
||||||
B = 1. / temp_b.value();
|
B = 1. / temp_b.value();
|
||||||
residual_.matbalscale[idx] = B.mean();
|
#if HAVE_MPI
|
||||||
|
if ( linsolver_.parallelInformation().type() == typeid(ParallelISTLInformation) )
|
||||||
|
{
|
||||||
|
const ParallelISTLInformation& real_info =
|
||||||
|
boost::any_cast<const ParallelISTLInformation&>(linsolver_.parallelInformation());
|
||||||
|
double B_global_sum = 0;
|
||||||
|
real_info.computeReduction(B, Reduction::makeGlobalSumFunctor<double>(), B_global_sum);
|
||||||
|
residual_.matbalscale[idx] = B_global_sum / global_nc_;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
residual_.matbalscale[idx] = B.mean();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -237,7 +237,20 @@ namespace Opm {
|
|||||||
if (has_solvent_) {
|
if (has_solvent_) {
|
||||||
const ADB& temp_b = rq_[solvent_pos_].b;
|
const ADB& temp_b = rq_[solvent_pos_].b;
|
||||||
ADB::V B = 1. / temp_b.value();
|
ADB::V B = 1. / temp_b.value();
|
||||||
residual_.matbalscale[solvent_pos_] = B.mean();
|
#if HAVE_MPI
|
||||||
|
if ( linsolver_.parallelInformation().type() == typeid(ParallelISTLInformation) )
|
||||||
|
{
|
||||||
|
const ParallelISTLInformation& real_info =
|
||||||
|
boost::any_cast<const ParallelISTLInformation&>(linsolver_.parallelInformation());
|
||||||
|
double B_global_sum = 0;
|
||||||
|
real_info.computeReduction(B, Reduction::makeGlobalSumFunctor<double>(), B_global_sum);
|
||||||
|
residual_.matbalscale[idx] = B_global_sum / global_nc_;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
#endif
|
||||||
|
{
|
||||||
|
residual_.matbalscale[idx] = B.mean();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user