mirror of
https://github.com/OPM/opm-simulators.git
synced 2025-02-25 18:55:30 -06:00
Merge pull request #2837 from blattms/fix-parallel-average-formation-factors
Fixes parallel computation of average formation factors.
This commit is contained in:
commit
f528adb604
@ -617,6 +617,27 @@ public:
|
||||
{
|
||||
return centroids_;
|
||||
}
|
||||
|
||||
/*!
|
||||
* \brief Get the number of cells in the global leaf grid view.
|
||||
* \warn This is a collective operation that needs to be called
|
||||
* on all ranks.
|
||||
*/
|
||||
std::size_t globalNumCells() const
|
||||
{
|
||||
const auto& grid = asImp_().grid();
|
||||
if (grid.comm().size() == 1)
|
||||
{
|
||||
return grid.leafGridView().size(0);
|
||||
}
|
||||
const auto& gridView = grid.leafGridView();
|
||||
constexpr int codim = 0;
|
||||
constexpr auto Part = Dune::Interior_Partition;
|
||||
auto local_cells = std::distance(gridView.template begin<codim, Part>(),
|
||||
gridView.template end<codim, Part>());
|
||||
return grid.comm().sum(local_cells);
|
||||
}
|
||||
|
||||
protected:
|
||||
void callImplementationInit()
|
||||
{
|
||||
|
@ -299,9 +299,9 @@ namespace Opm {
|
||||
bool has_polymer_;
|
||||
std::vector<int> pvt_region_idx_;
|
||||
PhaseUsage phase_usage_;
|
||||
size_t global_nc_;
|
||||
size_t global_num_cells_;
|
||||
// the number of the cells in the local grid
|
||||
size_t number_of_cells_;
|
||||
size_t local_num_cells_;
|
||||
double gravity_;
|
||||
std::vector<double> depth_;
|
||||
bool initial_step_;
|
||||
@ -353,7 +353,7 @@ namespace Opm {
|
||||
// setting the well_solutions_ based on well_state.
|
||||
void updatePrimaryVariables(Opm::DeferredLogger& deferred_logger);
|
||||
|
||||
void setupCartesianToCompressed_(const int* global_cell, int number_of_cells);
|
||||
void setupCartesianToCompressed_(const int* global_cell, int local_num__cells);
|
||||
|
||||
void computeRepRadiusPerfLength(const Grid& grid, Opm::DeferredLogger& deferred_logger);
|
||||
|
||||
|
@ -43,12 +43,9 @@ namespace Opm {
|
||||
// Create the guide rate container.
|
||||
guideRate_.reset(new GuideRate (ebosSimulator_.vanguard().schedule()));
|
||||
|
||||
// calculate the number of elements of the compressed sequential grid. this needs
|
||||
// to be done in two steps because the dune communicator expects a reference as
|
||||
// argument for sum()
|
||||
const auto& gridView = ebosSimulator_.gridView();
|
||||
number_of_cells_ = gridView.size(/*codim=*/0);
|
||||
global_nc_ = gridView.comm().sum(number_of_cells_);
|
||||
local_num_cells_ = ebosSimulator_.gridView().size(0);
|
||||
// Number of cells the global grid view
|
||||
global_num_cells_ = ebosSimulator_.vanguard().globalNumCells();
|
||||
|
||||
// Set up cartesian mapping.
|
||||
const auto& grid = ebosSimulator_.vanguard().grid();
|
||||
@ -88,7 +85,7 @@ namespace Opm {
|
||||
// add the eWoms auxiliary module for the wells to the list
|
||||
ebosSimulator_.model().addAuxiliaryModule(this);
|
||||
|
||||
is_cell_perforated_.resize(number_of_cells_, false);
|
||||
is_cell_perforated_.resize(local_num_cells_, false);
|
||||
}
|
||||
|
||||
template<typename TypeTag>
|
||||
@ -243,7 +240,7 @@ namespace Opm {
|
||||
|
||||
// The well state initialize bhp with the cell pressure in the top cell.
|
||||
// We must therefore provide it with updated cell pressures
|
||||
size_t nc = number_of_cells_;
|
||||
size_t nc = local_num_cells_;
|
||||
std::vector<double> cellPressures(nc, 0.0);
|
||||
ElementContext elemCtx(ebosSimulator_);
|
||||
const auto& gridView = ebosSimulator_.vanguard().gridView();
|
||||
@ -308,7 +305,7 @@ namespace Opm {
|
||||
|
||||
// Compute reservoir volumes for RESV controls.
|
||||
rateConverter_.reset(new RateConverterType (phase_usage_,
|
||||
std::vector<int>(number_of_cells_, 0)));
|
||||
std::vector<int>(local_num_cells_, 0)));
|
||||
rateConverter_->template defineState<ElementContext>(ebosSimulator_);
|
||||
|
||||
// update VFP properties
|
||||
@ -347,7 +344,7 @@ namespace Opm {
|
||||
// TODO: to see whether we can postpone of the intialization of the well containers to
|
||||
// optimize the usage of the following several member variables
|
||||
for (auto& well : well_container_) {
|
||||
well->init(&phase_usage_, depth_, gravity_, number_of_cells_);
|
||||
well->init(&phase_usage_, depth_, gravity_, local_num_cells_);
|
||||
}
|
||||
|
||||
// update the updated cell flag
|
||||
@ -431,7 +428,7 @@ namespace Opm {
|
||||
WellInterfacePtr well = createWellForWellTest(well_name, timeStepIdx, deferred_logger);
|
||||
|
||||
// some preparation before the well can be used
|
||||
well->init(&phase_usage_, depth_, gravity_, number_of_cells_);
|
||||
well->init(&phase_usage_, depth_, gravity_, local_num_cells_);
|
||||
const Well& wellEcl = schedule().getWell(well_name, timeStepIdx);
|
||||
double well_efficiency_factor = wellEcl.getEfficiencyFactor();
|
||||
WellGroupHelpers::accumulateGroupEfficiencyFactor(schedule().getGroup(wellEcl.groupName(), timeStepIdx), schedule(), timeStepIdx, well_efficiency_factor);
|
||||
@ -1464,12 +1461,12 @@ namespace Opm {
|
||||
{
|
||||
cartesian_to_compressed_.resize(number_of_cartesian_cells, -1);
|
||||
if (global_cell) {
|
||||
for (unsigned i = 0; i < number_of_cells_; ++i) {
|
||||
for (unsigned i = 0; i < local_num_cells_; ++i) {
|
||||
cartesian_to_compressed_[global_cell[i]] = i;
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (unsigned i = 0; i < number_of_cells_; ++i) {
|
||||
for (unsigned i = 0; i < local_num_cells_; ++i) {
|
||||
cartesian_to_compressed_[i] = i;
|
||||
}
|
||||
}
|
||||
@ -1530,7 +1527,7 @@ namespace Opm {
|
||||
grid.comm().sum(B_avg.data(), B_avg.size());
|
||||
for(auto& bval: B_avg)
|
||||
{
|
||||
bval/=global_nc_;
|
||||
bval/=global_num_cells_;
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user