Merge pull request #2837 from blattms/fix-parallel-average-formation-factors

Fixes parallel computation of average formation factors.
This commit is contained in:
Atgeirr Flø Rasmussen 2020-10-06 19:37:12 +02:00 committed by GitHub
commit f528adb604
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 35 additions and 17 deletions

View File

@ -617,6 +617,27 @@ public:
{ {
return centroids_; return centroids_;
} }
/*!
* \brief Get the number of cells in the global leaf grid view.
* \warn This is a collective operation that needs to be called
* on all ranks.
*/
std::size_t globalNumCells() const
{
const auto& grid = asImp_().grid();
if (grid.comm().size() == 1)
{
return grid.leafGridView().size(0);
}
const auto& gridView = grid.leafGridView();
constexpr int codim = 0;
constexpr auto Part = Dune::Interior_Partition;
auto local_cells = std::distance(gridView.template begin<codim, Part>(),
gridView.template end<codim, Part>());
return grid.comm().sum(local_cells);
}
protected: protected:
void callImplementationInit() void callImplementationInit()
{ {

View File

@ -299,9 +299,9 @@ namespace Opm {
bool has_polymer_; bool has_polymer_;
std::vector<int> pvt_region_idx_; std::vector<int> pvt_region_idx_;
PhaseUsage phase_usage_; PhaseUsage phase_usage_;
size_t global_nc_; size_t global_num_cells_;
// the number of the cells in the local grid // the number of the cells in the local grid
size_t number_of_cells_; size_t local_num_cells_;
double gravity_; double gravity_;
std::vector<double> depth_; std::vector<double> depth_;
bool initial_step_; bool initial_step_;
@ -353,7 +353,7 @@ namespace Opm {
// setting the well_solutions_ based on well_state. // setting the well_solutions_ based on well_state.
void updatePrimaryVariables(Opm::DeferredLogger& deferred_logger); void updatePrimaryVariables(Opm::DeferredLogger& deferred_logger);
void setupCartesianToCompressed_(const int* global_cell, int number_of_cells); void setupCartesianToCompressed_(const int* global_cell, int local_num__cells);
void computeRepRadiusPerfLength(const Grid& grid, Opm::DeferredLogger& deferred_logger); void computeRepRadiusPerfLength(const Grid& grid, Opm::DeferredLogger& deferred_logger);

View File

@ -43,12 +43,9 @@ namespace Opm {
// Create the guide rate container. // Create the guide rate container.
guideRate_.reset(new GuideRate (ebosSimulator_.vanguard().schedule())); guideRate_.reset(new GuideRate (ebosSimulator_.vanguard().schedule()));
// calculate the number of elements of the compressed sequential grid. this needs local_num_cells_ = ebosSimulator_.gridView().size(0);
// to be done in two steps because the dune communicator expects a reference as // Number of cells the global grid view
// argument for sum() global_num_cells_ = ebosSimulator_.vanguard().globalNumCells();
const auto& gridView = ebosSimulator_.gridView();
number_of_cells_ = gridView.size(/*codim=*/0);
global_nc_ = gridView.comm().sum(number_of_cells_);
// Set up cartesian mapping. // Set up cartesian mapping.
const auto& grid = ebosSimulator_.vanguard().grid(); const auto& grid = ebosSimulator_.vanguard().grid();
@ -88,7 +85,7 @@ namespace Opm {
// add the eWoms auxiliary module for the wells to the list // add the eWoms auxiliary module for the wells to the list
ebosSimulator_.model().addAuxiliaryModule(this); ebosSimulator_.model().addAuxiliaryModule(this);
is_cell_perforated_.resize(number_of_cells_, false); is_cell_perforated_.resize(local_num_cells_, false);
} }
template<typename TypeTag> template<typename TypeTag>
@ -243,7 +240,7 @@ namespace Opm {
// The well state initialize bhp with the cell pressure in the top cell. // The well state initialize bhp with the cell pressure in the top cell.
// We must therefore provide it with updated cell pressures // We must therefore provide it with updated cell pressures
size_t nc = number_of_cells_; size_t nc = local_num_cells_;
std::vector<double> cellPressures(nc, 0.0); std::vector<double> cellPressures(nc, 0.0);
ElementContext elemCtx(ebosSimulator_); ElementContext elemCtx(ebosSimulator_);
const auto& gridView = ebosSimulator_.vanguard().gridView(); const auto& gridView = ebosSimulator_.vanguard().gridView();
@ -308,7 +305,7 @@ namespace Opm {
// Compute reservoir volumes for RESV controls. // Compute reservoir volumes for RESV controls.
rateConverter_.reset(new RateConverterType (phase_usage_, rateConverter_.reset(new RateConverterType (phase_usage_,
std::vector<int>(number_of_cells_, 0))); std::vector<int>(local_num_cells_, 0)));
rateConverter_->template defineState<ElementContext>(ebosSimulator_); rateConverter_->template defineState<ElementContext>(ebosSimulator_);
// update VFP properties // update VFP properties
@ -347,7 +344,7 @@ namespace Opm {
// TODO: to see whether we can postpone of the intialization of the well containers to // TODO: to see whether we can postpone of the intialization of the well containers to
// optimize the usage of the following several member variables // optimize the usage of the following several member variables
for (auto& well : well_container_) { for (auto& well : well_container_) {
well->init(&phase_usage_, depth_, gravity_, number_of_cells_); well->init(&phase_usage_, depth_, gravity_, local_num_cells_);
} }
// update the updated cell flag // update the updated cell flag
@ -431,7 +428,7 @@ namespace Opm {
WellInterfacePtr well = createWellForWellTest(well_name, timeStepIdx, deferred_logger); WellInterfacePtr well = createWellForWellTest(well_name, timeStepIdx, deferred_logger);
// some preparation before the well can be used // some preparation before the well can be used
well->init(&phase_usage_, depth_, gravity_, number_of_cells_); well->init(&phase_usage_, depth_, gravity_, local_num_cells_);
const Well& wellEcl = schedule().getWell(well_name, timeStepIdx); const Well& wellEcl = schedule().getWell(well_name, timeStepIdx);
double well_efficiency_factor = wellEcl.getEfficiencyFactor(); double well_efficiency_factor = wellEcl.getEfficiencyFactor();
WellGroupHelpers::accumulateGroupEfficiencyFactor(schedule().getGroup(wellEcl.groupName(), timeStepIdx), schedule(), timeStepIdx, well_efficiency_factor); WellGroupHelpers::accumulateGroupEfficiencyFactor(schedule().getGroup(wellEcl.groupName(), timeStepIdx), schedule(), timeStepIdx, well_efficiency_factor);
@ -1464,12 +1461,12 @@ namespace Opm {
{ {
cartesian_to_compressed_.resize(number_of_cartesian_cells, -1); cartesian_to_compressed_.resize(number_of_cartesian_cells, -1);
if (global_cell) { if (global_cell) {
for (unsigned i = 0; i < number_of_cells_; ++i) { for (unsigned i = 0; i < local_num_cells_; ++i) {
cartesian_to_compressed_[global_cell[i]] = i; cartesian_to_compressed_[global_cell[i]] = i;
} }
} }
else { else {
for (unsigned i = 0; i < number_of_cells_; ++i) { for (unsigned i = 0; i < local_num_cells_; ++i) {
cartesian_to_compressed_[i] = i; cartesian_to_compressed_[i] = i;
} }
} }
@ -1530,7 +1527,7 @@ namespace Opm {
grid.comm().sum(B_avg.data(), B_avg.size()); grid.comm().sum(B_avg.data(), B_avg.size());
for(auto& bval: B_avg) for(auto& bval: B_avg)
{ {
bval/=global_nc_; bval/=global_num_cells_;
} }
} }