2021-05-05 05:13:25 -05:00
|
|
|
// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
|
|
|
// vi: set et ts=4 sw=4 sts=4:
|
|
|
|
/*
|
|
|
|
This file is part of the Open Porous Media project (OPM).
|
|
|
|
|
|
|
|
OPM is free software: you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation, either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
OPM is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with OPM. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
Consult the COPYING file in the top-level source directory of this
|
|
|
|
module for the precise wording of the license and the list of
|
|
|
|
copyright holders.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <config.h>
|
2022-05-31 10:47:46 -05:00
|
|
|
|
2021-05-05 05:13:25 -05:00
|
|
|
#include <ebos/eclgenericcpgridvanguard.hh>
|
|
|
|
|
|
|
|
#if HAVE_MPI
|
|
|
|
#include <ebos/eclmpiserializer.hh>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <opm/simulators/utils/ParallelEclipseState.hpp>
|
|
|
|
#include <opm/simulators/utils/ParallelSerialization.hpp>
|
2022-05-31 10:47:46 -05:00
|
|
|
#include <opm/simulators/utils/PropsCentroidsDataHandle.hpp>
|
|
|
|
|
|
|
|
#include <opm/grid/cpgrid/GridHelpers.hpp>
|
|
|
|
|
|
|
|
#include <opm/input/eclipse/Schedule/Schedule.hpp>
|
|
|
|
#include <opm/input/eclipse/Schedule/Well/Well.hpp>
|
|
|
|
|
|
|
|
#include <opm/common/utility/ActiveGridCells.hpp>
|
2021-05-05 05:13:25 -05:00
|
|
|
|
|
|
|
#include <dune/grid/common/mcmgmapper.hh>
|
2022-05-31 10:47:46 -05:00
|
|
|
#include <dune/grid/common/partitionset.hh>
|
|
|
|
#include <dune/common/version.hh>
|
2021-05-05 05:13:25 -05:00
|
|
|
|
|
|
|
#if HAVE_DUNE_FEM
|
|
|
|
#include <dune/fem/gridpart/adaptiveleafgridpart.hh>
|
|
|
|
#include <dune/fem/gridpart/common/gridpart2gridview.hh>
|
|
|
|
#include <ebos/femcpgridcompat.hh>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <cassert>
|
|
|
|
#include <numeric>
|
2022-05-31 10:47:46 -05:00
|
|
|
#include <optional>
|
2021-05-05 05:13:25 -05:00
|
|
|
#include <sstream>
|
2022-05-31 10:47:46 -05:00
|
|
|
#include <stdexcept>
|
|
|
|
#include <string>
|
|
|
|
#include <tuple>
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
#include <fmt/format.h>
|
2021-05-05 05:13:25 -05:00
|
|
|
|
|
|
|
namespace Opm {
|
|
|
|
|
2021-05-18 05:08:32 -05:00
|
|
|
std::optional<std::function<std::vector<int> (const Dune::CpGrid&)>> externalLoadBalancer;
|
|
|
|
|
2021-05-05 05:13:25 -05:00
|
|
|
template<class ElementMapper, class GridView, class Scalar>
|
|
|
|
EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::EclGenericCpGridVanguard()
|
|
|
|
{
|
2022-05-31 10:47:46 -05:00
|
|
|
this->mpiRank = 0;
|
|
|
|
|
2021-05-05 05:13:25 -05:00
|
|
|
#if HAVE_MPI
|
2022-05-31 10:47:46 -05:00
|
|
|
this->mpiRank = EclGenericVanguard::comm().rank();
|
|
|
|
#endif // HAVE_MPI
|
2021-05-05 05:13:25 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
template<class ElementMapper, class GridView, class Scalar>
|
|
|
|
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::releaseEquilGrid()
|
|
|
|
{
|
2022-05-31 10:47:46 -05:00
|
|
|
this->equilGrid_.reset();
|
|
|
|
this->equilCartesianIndexMapper_.reset();
|
2021-05-05 05:13:25 -05:00
|
|
|
}
|
|
|
|
|
2021-05-13 15:40:39 -05:00
|
|
|
#if HAVE_MPI
|
2021-05-05 05:13:25 -05:00
|
|
|
template<class ElementMapper, class GridView, class Scalar>
|
2022-05-31 10:47:46 -05:00
|
|
|
void EclGenericCpGridVanguard<ElementMapper, GridView, Scalar>::
|
|
|
|
doLoadBalance_(const Dune::EdgeWeightMethod edgeWeightsMethod,
|
|
|
|
const bool ownersFirst,
|
|
|
|
const bool serialPartitioning,
|
|
|
|
const bool enableDistributedWells,
|
|
|
|
const double zoltanImbalanceTol,
|
|
|
|
const GridView& gridView,
|
|
|
|
const Schedule& schedule,
|
|
|
|
std::vector<double>& centroids,
|
|
|
|
EclipseState& eclState1,
|
|
|
|
EclGenericVanguard::ParallelWellStruct& parallelWells,
|
|
|
|
const int numJacobiBlocks)
|
2021-05-05 05:13:25 -05:00
|
|
|
{
|
2022-05-31 10:47:46 -05:00
|
|
|
const auto mpiSize = this->grid_->comm().size();
|
|
|
|
|
|
|
|
const auto partitionJacobiBlocks =
|
|
|
|
(numJacobiBlocks > 1) && (mpiSize == 1);
|
|
|
|
|
|
|
|
if ((mpiSize > 1) || (numJacobiBlocks > 1)) {
|
|
|
|
if (this->grid_->size(0) > 0) {
|
|
|
|
// Generally needed in parallel runs both when there is and when
|
|
|
|
// there is not an externally defined load-balancing function.
|
|
|
|
// In addition to being used in CpGrid::loadBalance(), the
|
|
|
|
// transmissibilities are also output to the .INIT file. Thus,
|
|
|
|
// transmissiblity values must exist on the I/O rank for derived
|
|
|
|
// classes such as EclCpGridVanguard<>.
|
2021-05-05 05:13:25 -05:00
|
|
|
this->allocTrans();
|
|
|
|
}
|
|
|
|
|
2022-05-31 10:47:46 -05:00
|
|
|
// CpGrid's loadBalance() method uses transmissibilities as edge
|
|
|
|
// weights. This is arguably a layering violation and extracting
|
|
|
|
// the per-face transmissibilities as a linear array is relatively
|
|
|
|
// expensive. We therefore extract transmissibility values only if
|
|
|
|
// the values are actually needed.
|
|
|
|
auto loadBalancerSet = static_cast<int>(externalLoadBalancer.has_value());
|
|
|
|
this->grid_->comm().broadcast(&loadBalancerSet, 1, 0);
|
2021-05-05 05:13:25 -05:00
|
|
|
|
2022-05-31 10:47:46 -05:00
|
|
|
const auto faceTrans = ((loadBalancerSet == 0) || partitionJacobiBlocks)
|
|
|
|
? this->extractFaceTrans(gridView)
|
|
|
|
: std::vector<double>{};
|
|
|
|
|
|
|
|
const auto wells = ((mpiSize > 1) || partitionJacobiBlocks)
|
|
|
|
? schedule.getWellsatEnd()
|
|
|
|
: std::vector<Well>{};
|
|
|
|
|
|
|
|
// Distribute the grid and switch to the distributed view.
|
2022-04-21 10:18:32 -05:00
|
|
|
if (mpiSize > 1) {
|
2022-05-31 10:47:46 -05:00
|
|
|
this->distributeGrid(edgeWeightsMethod, ownersFirst,
|
|
|
|
serialPartitioning, enableDistributedWells,
|
|
|
|
zoltanImbalanceTol, loadBalancerSet != 0,
|
|
|
|
faceTrans, wells, centroids,
|
|
|
|
eclState1, parallelWells);
|
2021-05-05 05:13:25 -05:00
|
|
|
}
|
2022-05-31 10:47:46 -05:00
|
|
|
|
2021-05-05 05:13:25 -05:00
|
|
|
// Calling Schedule::filterConnections would remove any perforated
|
2022-05-31 10:47:46 -05:00
|
|
|
// cells that exist only on other ranks even in the case of
|
|
|
|
// distributed wells. But we need all connections to figure out the
|
|
|
|
// first cell of a well (e.g. for pressure). Hence this is now
|
|
|
|
// skipped. Rank 0 had everything even before.
|
|
|
|
|
2022-05-31 10:55:50 -05:00
|
|
|
#if HAVE_OPENCL
|
2022-05-31 10:47:46 -05:00
|
|
|
if (partitionJacobiBlocks) {
|
|
|
|
this->cell_part_ = this->grid_->
|
|
|
|
zoltanPartitionWithoutScatter(&wells, faceTrans.data(),
|
|
|
|
numJacobiBlocks,
|
|
|
|
zoltanImbalanceTol);
|
2022-04-21 10:18:32 -05:00
|
|
|
}
|
2022-05-31 10:55:50 -05:00
|
|
|
#endif // HAVE_OPENCL
|
2021-05-05 05:13:25 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template<class ElementMapper, class GridView, class Scalar>
|
|
|
|
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::distributeFieldProps_(EclipseState& eclState1)
|
|
|
|
{
|
2022-05-31 10:47:46 -05:00
|
|
|
const auto mpiSize = this->grid_->comm().size();
|
|
|
|
|
|
|
|
if (mpiSize == 1) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (auto* parallelEclState = dynamic_cast<ParallelEclipseState*>(&eclState1);
|
|
|
|
parallelEclState != nullptr)
|
|
|
|
{
|
|
|
|
// Reset Cartesian index mapper for automatic creation of field
|
|
|
|
// properties
|
|
|
|
parallelEclState->resetCartesianMapper(this->cartesianIndexMapper_.get());
|
|
|
|
parallelEclState->switchToDistributedProps();
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
const auto message = std::string {
|
|
|
|
"Parallel simulator setup is incorrect as "
|
|
|
|
"it does not use ParallelEclipseState"
|
|
|
|
};
|
|
|
|
|
|
|
|
OpmLog::error(message);
|
|
|
|
|
|
|
|
throw std::invalid_argument { message };
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class ElementMapper, class GridView, class Scalar>
|
|
|
|
std::vector<double>
|
|
|
|
EclGenericCpGridVanguard<ElementMapper, GridView, Scalar>::
|
|
|
|
extractFaceTrans(const GridView& gridView) const
|
|
|
|
{
|
|
|
|
auto faceTrans = std::vector<double>(this->grid_->numFaces(), 0.0);
|
|
|
|
|
|
|
|
const auto elemMapper = ElementMapper { gridView, Dune::mcmgElementLayout() };
|
|
|
|
|
|
|
|
for (const auto& elem : elements(gridView, Dune::Partitions::interiorBorder)) {
|
|
|
|
for (const auto& is : intersections(gridView, elem)) {
|
|
|
|
if (!is.neighbor()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto I = static_cast<unsigned int>(elemMapper.index(is.inside()));
|
|
|
|
const auto J = static_cast<unsigned int>(elemMapper.index(is.outside()));
|
|
|
|
|
|
|
|
faceTrans[is.id()] = this->getTransmissibility(I, J);
|
2021-05-05 05:13:25 -05:00
|
|
|
}
|
|
|
|
}
|
2022-05-31 10:47:46 -05:00
|
|
|
|
|
|
|
return faceTrans;
|
2021-05-05 05:13:25 -05:00
|
|
|
}
|
2022-05-31 10:47:46 -05:00
|
|
|
|
|
|
|
template <class ElementMapper, class GridView, class Scalar>
|
|
|
|
void
|
|
|
|
EclGenericCpGridVanguard<ElementMapper, GridView, Scalar>::
|
|
|
|
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
|
|
|
|
const bool ownersFirst,
|
|
|
|
const bool serialPartitioning,
|
|
|
|
const bool enableDistributedWells,
|
|
|
|
const double zoltanImbalanceTol,
|
|
|
|
const bool loadBalancerSet,
|
|
|
|
const std::vector<double>& faceTrans,
|
|
|
|
const std::vector<Well>& wells,
|
|
|
|
std::vector<double>& centroids,
|
|
|
|
EclipseState& eclState1,
|
|
|
|
EclGenericVanguard::ParallelWellStruct& parallelWells)
|
|
|
|
{
|
|
|
|
if (auto* eclState = dynamic_cast<ParallelEclipseState*>(&eclState1);
|
|
|
|
eclState != nullptr)
|
|
|
|
{
|
|
|
|
this->distributeGrid(edgeWeightsMethod, ownersFirst,
|
|
|
|
serialPartitioning, enableDistributedWells,
|
|
|
|
zoltanImbalanceTol, loadBalancerSet, faceTrans,
|
|
|
|
wells, centroids, eclState, parallelWells);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
const auto message = std::string {
|
|
|
|
"Parallel simulator setup is incorrect as "
|
|
|
|
"it does not use ParallelEclipseState"
|
|
|
|
};
|
|
|
|
|
|
|
|
OpmLog::error(message);
|
|
|
|
|
|
|
|
throw std::invalid_argument { message };
|
|
|
|
}
|
|
|
|
|
|
|
|
this->grid_->switchToDistributedView();
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class ElementMapper, class GridView, class Scalar>
|
|
|
|
void
|
|
|
|
EclGenericCpGridVanguard<ElementMapper, GridView, Scalar>::
|
|
|
|
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
|
|
|
|
const bool ownersFirst,
|
|
|
|
const bool serialPartitioning,
|
|
|
|
const bool enableDistributedWells,
|
|
|
|
const double zoltanImbalanceTol,
|
|
|
|
const bool loadBalancerSet,
|
|
|
|
const std::vector<double>& faceTrans,
|
|
|
|
const std::vector<Well>& wells,
|
|
|
|
std::vector<double>& centroids,
|
|
|
|
ParallelEclipseState* eclState,
|
|
|
|
EclGenericVanguard::ParallelWellStruct& parallelWells)
|
|
|
|
{
|
|
|
|
const auto isIORank = this->grid_->comm().rank() == 0;
|
|
|
|
|
|
|
|
const auto* eclGrid = isIORank
|
|
|
|
? &eclState->getInputGrid()
|
|
|
|
: nullptr;
|
|
|
|
|
|
|
|
PropsCentroidsDataHandle<Dune::CpGrid> handle {
|
|
|
|
*this->grid_, *eclState, eclGrid, centroids,
|
|
|
|
this->cartesianIndexMapper()
|
|
|
|
};
|
|
|
|
|
|
|
|
const auto addCornerCells = false;
|
|
|
|
const auto overlapLayers = 1;
|
|
|
|
|
|
|
|
if (loadBalancerSet) {
|
|
|
|
auto parts = isIORank
|
|
|
|
? (*externalLoadBalancer)(*this->grid_)
|
|
|
|
: std::vector<int>{};
|
|
|
|
|
|
|
|
parallelWells =
|
|
|
|
std::get<1>(this->grid_->loadBalance(handle, parts, &wells, ownersFirst,
|
|
|
|
addCornerCells, overlapLayers));
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
const auto useZoltan = true;
|
|
|
|
|
|
|
|
parallelWells =
|
|
|
|
std::get<1>(this->grid_->loadBalance(handle, edgeWeightsMethod,
|
|
|
|
&wells, serialPartitioning,
|
|
|
|
faceTrans.data(), ownersFirst,
|
|
|
|
addCornerCells, overlapLayers,
|
|
|
|
useZoltan, zoltanImbalanceTol,
|
|
|
|
enableDistributedWells));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // HAVE_MPI
|
2021-05-05 05:13:25 -05:00
|
|
|
|
|
|
|
template<class ElementMapper, class GridView, class Scalar>
|
|
|
|
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(EclipseState& eclState)
|
|
|
|
{
|
|
|
|
const EclipseGrid* input_grid = nullptr;
|
|
|
|
std::vector<double> global_porv;
|
|
|
|
// At this stage the ParallelEclipseState instance is still in global
|
|
|
|
// view; on rank 0 we have undistributed data for the entire grid, on
|
|
|
|
// the other ranks the EclipseState is empty.
|
|
|
|
if (mpiRank == 0) {
|
|
|
|
input_grid = &eclState.getInputGrid();
|
|
|
|
global_porv = eclState.fieldProps().porv(true);
|
|
|
|
OpmLog::info("\nProcessing grid");
|
|
|
|
}
|
|
|
|
|
2021-05-25 05:57:11 -05:00
|
|
|
#if HAVE_MPI
|
2021-10-05 06:36:40 -05:00
|
|
|
this->grid_ = std::make_unique<Dune::CpGrid>(EclGenericVanguard::comm());
|
2021-05-25 05:57:11 -05:00
|
|
|
#else
|
2021-10-05 06:36:40 -05:00
|
|
|
this->grid_ = std::make_unique<Dune::CpGrid>();
|
2021-05-25 05:57:11 -05:00
|
|
|
#endif
|
|
|
|
|
2021-10-05 06:40:48 -05:00
|
|
|
// Note: removed_cells is guaranteed to be empty on ranks other than 0.
|
|
|
|
auto removed_cells =
|
2021-10-05 06:36:40 -05:00
|
|
|
this->grid_->processEclipseFormat(input_grid,
|
|
|
|
&eclState,
|
|
|
|
/*isPeriodic=*/false,
|
|
|
|
/*flipNormals=*/false,
|
|
|
|
/*clipZ=*/false);
|
2021-05-05 05:13:25 -05:00
|
|
|
|
|
|
|
if (mpiRank == 0) {
|
|
|
|
const auto& active_porv = eclState.fieldProps().porv(false);
|
|
|
|
const auto& unit_system = eclState.getUnits();
|
|
|
|
const auto& volume_unit = unit_system.name( UnitSystem::measure::volume);
|
|
|
|
double total_pore_volume = unit_system.from_si( UnitSystem::measure::volume, std::accumulate(active_porv.begin(), active_porv.end(), 0.0));
|
|
|
|
OpmLog::info(fmt::format("Total number of active cells: {} / total pore volume: {:0.0f} {}", grid_->numCells(), total_pore_volume , volume_unit));
|
|
|
|
|
|
|
|
double removed_pore_volume = 0;
|
|
|
|
for (const auto& global_index : removed_cells)
|
|
|
|
removed_pore_volume += active_porv[ eclState.getInputGrid().activeIndex(global_index) ];
|
|
|
|
|
|
|
|
if (removed_pore_volume > 0) {
|
|
|
|
removed_pore_volume = unit_system.from_si( UnitSystem::measure::volume, removed_pore_volume );
|
|
|
|
OpmLog::info(fmt::format("Removed {} cells with a pore volume of {:0.0f} {} ({:5.3f} %) due to MINPV/MINPVV",
|
|
|
|
removed_cells.size(),
|
|
|
|
removed_pore_volume,
|
|
|
|
volume_unit,
|
|
|
|
100 * removed_pore_volume / total_pore_volume));
|
|
|
|
}
|
|
|
|
}
|
2021-09-30 04:50:22 -05:00
|
|
|
|
|
|
|
cartesianIndexMapper_ = std::make_unique<CartesianIndexMapper>(*grid_);
|
|
|
|
|
2021-05-05 05:13:25 -05:00
|
|
|
#if HAVE_MPI
|
|
|
|
{
|
|
|
|
const bool has_numerical_aquifer = eclState.aquifer().hasNumericalAquifer();
|
|
|
|
int mpiSize = 1;
|
2021-05-25 05:57:11 -05:00
|
|
|
MPI_Comm_size(grid_->comm(), &mpiSize);
|
2021-10-05 06:36:40 -05:00
|
|
|
|
|
|
|
// when there is numerical aquifers, new NNC are generated during
|
|
|
|
// grid processing we need to pass the NNC from root process to
|
|
|
|
// other processes
|
2021-05-05 05:13:25 -05:00
|
|
|
if (has_numerical_aquifer && mpiSize > 1) {
|
|
|
|
auto nnc_input = eclState.getInputNNC();
|
2021-10-05 01:25:54 -05:00
|
|
|
EclMpiSerializer ser(grid_->comm());
|
2021-05-05 05:13:25 -05:00
|
|
|
ser.broadcast(nnc_input);
|
|
|
|
if (mpiRank > 0) {
|
|
|
|
eclState.setInputNNC(nnc_input);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-10-05 06:36:40 -05:00
|
|
|
// We use separate grid objects: one for the calculation of the initial
|
|
|
|
// condition via EQUIL and one for the actual simulation. The reason is
|
|
|
|
// that the EQUIL code is allergic to distributed grids and the
|
|
|
|
// simulation grid is distributed before the initial condition is
|
|
|
|
// calculated.
|
|
|
|
//
|
|
|
|
// After loadbalance, grid_ will contain a global and distribute view.
|
|
|
|
// equilGrid_ being a shallow copy only the global view.
|
2021-05-05 05:13:25 -05:00
|
|
|
if (mpiRank == 0)
|
|
|
|
{
|
|
|
|
equilGrid_.reset(new Dune::CpGrid(*grid_));
|
2021-09-30 04:50:22 -05:00
|
|
|
equilCartesianIndexMapper_ = std::make_unique<CartesianIndexMapper>(*equilGrid_);
|
2021-05-05 05:13:25 -05:00
|
|
|
|
2021-10-05 14:30:52 -05:00
|
|
|
eclState.reset_actnum(UgGridHelpers::createACTNUM(*grid_));
|
2021-05-05 05:13:25 -05:00
|
|
|
}
|
2021-10-05 06:40:48 -05:00
|
|
|
|
|
|
|
{
|
|
|
|
auto size = removed_cells.size();
|
|
|
|
|
|
|
|
this->grid_->comm().broadcast(&size, 1, 0);
|
|
|
|
|
|
|
|
if (mpiRank != 0) {
|
|
|
|
removed_cells.resize(size);
|
|
|
|
}
|
|
|
|
|
|
|
|
this->grid_->comm().broadcast(removed_cells.data(), size, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Inform the aquifer object that we might have removed/deactivated
|
|
|
|
// cells as part of minimum pore-volume threshold processing.
|
|
|
|
eclState.pruneDeactivatedAquiferConnections(removed_cells);
|
2021-05-05 05:13:25 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
template<class ElementMapper, class GridView, class Scalar>
|
|
|
|
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doFilterConnections_(Schedule& schedule)
|
|
|
|
{
|
|
|
|
// We only filter if we hold the global grid. Otherwise the filtering
|
|
|
|
// is done after load balancing as in the future the other processes
|
|
|
|
// will hold an empty partition for the global grid and hence filtering
|
|
|
|
// here would remove all well connections.
|
2022-05-31 10:47:46 -05:00
|
|
|
if (this->equilGrid_ != nullptr) {
|
2021-05-05 05:13:25 -05:00
|
|
|
ActiveGridCells activeCells(equilGrid().logicalCartesianSize(),
|
|
|
|
equilGrid().globalCell().data(),
|
|
|
|
equilGrid().size(0));
|
2022-05-31 10:47:46 -05:00
|
|
|
|
2021-05-05 05:13:25 -05:00
|
|
|
schedule.filterConnections(activeCells);
|
|
|
|
}
|
2022-05-31 10:47:46 -05:00
|
|
|
|
2021-05-05 05:13:25 -05:00
|
|
|
#if HAVE_MPI
|
2022-05-31 10:47:46 -05:00
|
|
|
try {
|
2021-05-05 05:13:25 -05:00
|
|
|
// Broadcast another time to remove inactive peforations on
|
|
|
|
// slave processors.
|
2021-11-08 05:02:04 -06:00
|
|
|
eclBroadcast(EclGenericVanguard::comm(), schedule);
|
2021-05-05 05:13:25 -05:00
|
|
|
}
|
2022-05-31 10:47:46 -05:00
|
|
|
catch (const std::exception& broadcast_error) {
|
2021-05-05 05:13:25 -05:00
|
|
|
OpmLog::error(fmt::format("Distributing properties to all processes failed\n"
|
|
|
|
"Internal error message: {}", broadcast_error.what()));
|
|
|
|
MPI_Finalize();
|
|
|
|
std::exit(EXIT_FAILURE);
|
|
|
|
}
|
2022-05-31 10:47:46 -05:00
|
|
|
#endif // HAVE_MPI
|
2021-05-05 05:13:25 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
template<class ElementMapper, class GridView, class Scalar>
|
2022-05-31 10:47:46 -05:00
|
|
|
const Dune::CpGrid&
|
|
|
|
EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::equilGrid() const
|
2021-05-05 05:13:25 -05:00
|
|
|
{
|
|
|
|
assert(mpiRank == 0);
|
|
|
|
return *equilGrid_;
|
|
|
|
}
|
|
|
|
|
|
|
|
template<class ElementMapper, class GridView, class Scalar>
|
2022-05-31 10:47:46 -05:00
|
|
|
const Dune::CartesianIndexMapper<Dune::CpGrid>&
|
|
|
|
EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::cartesianIndexMapper() const
|
2021-05-05 05:13:25 -05:00
|
|
|
{
|
|
|
|
return *cartesianIndexMapper_;
|
|
|
|
}
|
|
|
|
|
|
|
|
template<class ElementMapper, class GridView, class Scalar>
|
2022-05-31 10:47:46 -05:00
|
|
|
const Dune::CartesianIndexMapper<Dune::CpGrid>&
|
|
|
|
EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::equilCartesianIndexMapper() const
|
2021-05-05 05:13:25 -05:00
|
|
|
{
|
|
|
|
assert(mpiRank == 0);
|
|
|
|
assert(equilCartesianIndexMapper_);
|
|
|
|
return *equilCartesianIndexMapper_;
|
|
|
|
}
|
|
|
|
|
|
|
|
template<class ElementMapper, class GridView, class Scalar>
|
2022-05-31 10:47:46 -05:00
|
|
|
Scalar
|
|
|
|
EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::
|
|
|
|
computeCellThickness(const typename GridView::template Codim<0>::Entity& element) const
|
2021-05-05 05:13:25 -05:00
|
|
|
{
|
|
|
|
typedef typename Element::Geometry Geometry;
|
|
|
|
static constexpr int zCoord = Element::dimension - 1;
|
|
|
|
Scalar zz1 = 0.0;
|
|
|
|
Scalar zz2 = 0.0;
|
|
|
|
|
|
|
|
const Geometry& geometry = element.geometry();
|
|
|
|
// This code only works with CP-grid where the
|
|
|
|
// number of corners are 8 and
|
|
|
|
// also assumes that the first
|
|
|
|
// 4 corners are the top surface and
|
|
|
|
// the 4 next are the bottomn.
|
|
|
|
assert(geometry.corners() == 8);
|
|
|
|
for (int i=0; i < 4; ++i){
|
|
|
|
zz1 += geometry.corner(i)[zCoord];
|
|
|
|
zz2 += geometry.corner(i+4)[zCoord];
|
|
|
|
}
|
|
|
|
zz1 /=4;
|
|
|
|
zz2 /=4;
|
|
|
|
return zz2-zz1;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if HAVE_DUNE_FEM
|
2022-05-31 10:47:46 -05:00
|
|
|
template class EclGenericCpGridVanguard<
|
|
|
|
Dune::MultipleCodimMultipleGeomTypeMapper<
|
|
|
|
Dune::GridView<
|
|
|
|
Dune::Fem::GridPart2GridViewTraits<
|
|
|
|
Dune::Fem::AdaptiveLeafGridPart<
|
|
|
|
Dune::CpGrid,
|
|
|
|
Dune::PartitionIteratorType(4),
|
|
|
|
false>>>>,
|
|
|
|
Dune::GridView<
|
|
|
|
Dune::Fem::GridPart2GridViewTraits<
|
|
|
|
Dune::Fem::AdaptiveLeafGridPart<
|
|
|
|
Dune::CpGrid,
|
|
|
|
Dune::PartitionIteratorType(4),
|
|
|
|
false>>>,
|
|
|
|
double>;
|
|
|
|
|
|
|
|
template class EclGenericCpGridVanguard<
|
|
|
|
Dune::MultipleCodimMultipleGeomTypeMapper<
|
|
|
|
Dune::Fem::GridPart2GridViewImpl<
|
|
|
|
Dune::Fem::AdaptiveLeafGridPart<
|
|
|
|
Dune::CpGrid,
|
|
|
|
Dune::PartitionIteratorType(4),
|
|
|
|
false>>>,
|
|
|
|
Dune::Fem::GridPart2GridViewImpl<
|
|
|
|
Dune::Fem::AdaptiveLeafGridPart<
|
|
|
|
Dune::CpGrid,
|
|
|
|
Dune::PartitionIteratorType(4),
|
|
|
|
false> >,
|
|
|
|
double>;
|
2021-05-05 05:13:25 -05:00
|
|
|
#else
|
2022-05-31 10:47:46 -05:00
|
|
|
template class EclGenericCpGridVanguard<
|
|
|
|
Dune::MultipleCodimMultipleGeomTypeMapper<
|
|
|
|
Dune::GridView<
|
|
|
|
Dune::DefaultLeafGridViewTraits<Dune::CpGrid>>>,
|
|
|
|
Dune::GridView<
|
|
|
|
Dune::DefaultLeafGridViewTraits<Dune::CpGrid>>,
|
|
|
|
double>;
|
2021-05-05 05:13:25 -05:00
|
|
|
#endif
|
|
|
|
} // namespace Opm
|