Merge pull request #3928 from bska/make-jacobi-prec-opencl-only

Temporarily Limit Block-Jacobi Partitioner to OpenCL Only
This commit is contained in:
Markus Blatt 2022-06-08 17:24:13 +02:00 committed by GitHub
commit 5e00a0ae59
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 356 additions and 184 deletions

View File

@ -84,10 +84,14 @@ template<class TypeTag, class MyTypeTag>
struct EdgeWeightsMethod { struct EdgeWeightsMethod {
using type = UndefinedProperty; using type = UndefinedProperty;
}; };
#if HAVE_OPENCL
template<class TypeTag, class MyTypeTag> template<class TypeTag, class MyTypeTag>
struct NumJacobiBlocks { struct NumJacobiBlocks {
using type = UndefinedProperty; using type = UndefinedProperty;
}; };
#endif // HAVE_OPENCL
template<class TypeTag, class MyTypeTag> template<class TypeTag, class MyTypeTag>
struct OwnerCellsFirst { struct OwnerCellsFirst {
using type = UndefinedProperty; using type = UndefinedProperty;
@ -136,10 +140,14 @@ template<class TypeTag>
struct EdgeWeightsMethod<TypeTag, TTag::EclBaseVanguard> { struct EdgeWeightsMethod<TypeTag, TTag::EclBaseVanguard> {
static constexpr int value = 1; static constexpr int value = 1;
}; };
#if HAVE_OPENCL
template<class TypeTag> template<class TypeTag>
struct NumJacobiBlocks<TypeTag, TTag::EclBaseVanguard> { struct NumJacobiBlocks<TypeTag, TTag::EclBaseVanguard> {
static constexpr int value = 0; static constexpr int value = 0;
}; };
#endif // HAVE_OPENCL
template<class TypeTag> template<class TypeTag>
struct OwnerCellsFirst<TypeTag, TTag::EclBaseVanguard> { struct OwnerCellsFirst<TypeTag, TTag::EclBaseVanguard> {
static constexpr bool value = true; static constexpr bool value = true;
@ -219,8 +227,12 @@ public:
"When restarting: should we try to initialize wells and groups from historical SCHEDULE section."); "When restarting: should we try to initialize wells and groups from historical SCHEDULE section.");
EWOMS_REGISTER_PARAM(TypeTag, int, EdgeWeightsMethod, EWOMS_REGISTER_PARAM(TypeTag, int, EdgeWeightsMethod,
"Choose edge-weighing strategy: 0=uniform, 1=trans, 2=log(trans)."); "Choose edge-weighing strategy: 0=uniform, 1=trans, 2=log(trans).");
#if HAVE_OPENCL
EWOMS_REGISTER_PARAM(TypeTag, int, NumJacobiBlocks, EWOMS_REGISTER_PARAM(TypeTag, int, NumJacobiBlocks,
"Number of blocks to be created for the Block-Jacobi preconditioner."); "Number of blocks to be created for the Block-Jacobi preconditioner.");
#endif
EWOMS_REGISTER_PARAM(TypeTag, bool, OwnerCellsFirst, EWOMS_REGISTER_PARAM(TypeTag, bool, OwnerCellsFirst,
"Order cells owned by rank before ghost/overlap cells."); "Order cells owned by rank before ghost/overlap cells.");
EWOMS_REGISTER_PARAM(TypeTag, bool, SerialPartitioning, EWOMS_REGISTER_PARAM(TypeTag, bool, SerialPartitioning,
@ -245,7 +257,11 @@ public:
{ {
fileName_ = EWOMS_GET_PARAM(TypeTag, std::string, EclDeckFileName); fileName_ = EWOMS_GET_PARAM(TypeTag, std::string, EclDeckFileName);
edgeWeightsMethod_ = Dune::EdgeWeightMethod(EWOMS_GET_PARAM(TypeTag, int, EdgeWeightsMethod)); edgeWeightsMethod_ = Dune::EdgeWeightMethod(EWOMS_GET_PARAM(TypeTag, int, EdgeWeightsMethod));
#if HAVE_OPENCL
numJacobiBlocks_ = EWOMS_GET_PARAM(TypeTag, int, NumJacobiBlocks); numJacobiBlocks_ = EWOMS_GET_PARAM(TypeTag, int, NumJacobiBlocks);
#endif
ownersFirst_ = EWOMS_GET_PARAM(TypeTag, bool, OwnerCellsFirst); ownersFirst_ = EWOMS_GET_PARAM(TypeTag, bool, OwnerCellsFirst);
serialPartitioning_ = EWOMS_GET_PARAM(TypeTag, bool, SerialPartitioning); serialPartitioning_ = EWOMS_GET_PARAM(TypeTag, bool, SerialPartitioning);
zoltanImbalanceTol_ = EWOMS_GET_PARAM(TypeTag, double, ZoltanImbalanceTol); zoltanImbalanceTol_ = EWOMS_GET_PARAM(TypeTag, double, ZoltanImbalanceTol);

View File

@ -180,7 +180,7 @@ protected:
globalTrans_->update(false); globalTrans_->update(false);
} }
double getTransmissibility(unsigned I, unsigned J) override double getTransmissibility(unsigned I, unsigned J) const override
{ {
return globalTrans_->transmissibility(I,J); return globalTrans_->transmissibility(I,J);
} }

View File

@ -22,21 +22,27 @@
*/ */
#include <config.h> #include <config.h>
#include <ebos/eclgenericcpgridvanguard.hh> #include <ebos/eclgenericcpgridvanguard.hh>
#if HAVE_MPI #if HAVE_MPI
#include <ebos/eclmpiserializer.hh> #include <ebos/eclmpiserializer.hh>
#endif #endif
#include <opm/common/utility/ActiveGridCells.hpp>
#include <opm/grid/cpgrid/GridHelpers.hpp>
#include <opm/input/eclipse/Schedule/Schedule.hpp>
#include <opm/simulators/utils/ParallelEclipseState.hpp> #include <opm/simulators/utils/ParallelEclipseState.hpp>
#include <opm/simulators/utils/PropsCentroidsDataHandle.hpp>
#include <opm/simulators/utils/ParallelSerialization.hpp> #include <opm/simulators/utils/ParallelSerialization.hpp>
#include <opm/simulators/utils/PropsCentroidsDataHandle.hpp>
#include <opm/grid/cpgrid/GridHelpers.hpp>
#include <opm/input/eclipse/Schedule/Schedule.hpp>
#include <opm/input/eclipse/Schedule/Well/Well.hpp>
#include <opm/common/utility/ActiveGridCells.hpp>
#include <dune/common/version.hh>
#include <dune/grid/common/mcmgmapper.hh> #include <dune/grid/common/mcmgmapper.hh>
#include <dune/grid/common/partitionset.hh>
#include <dune/common/version.hh>
#if HAVE_DUNE_FEM #if HAVE_DUNE_FEM
#include <dune/fem/gridpart/adaptiveleafgridpart.hh> #include <dune/fem/gridpart/adaptiveleafgridpart.hh>
@ -44,11 +50,16 @@
#include <ebos/femcpgridcompat.hh> #include <ebos/femcpgridcompat.hh>
#endif #endif
#include <fmt/format.h>
#include <cassert> #include <cassert>
#include <numeric> #include <numeric>
#include <optional>
#include <sstream> #include <sstream>
#include <stdexcept>
#include <string>
#include <tuple>
#include <vector>
#include <fmt/format.h>
namespace Opm { namespace Opm {
@ -57,163 +68,236 @@ std::optional<std::function<std::vector<int> (const Dune::CpGrid&)>> externalLoa
template<class ElementMapper, class GridView, class Scalar> template<class ElementMapper, class GridView, class Scalar>
EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::EclGenericCpGridVanguard() EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::EclGenericCpGridVanguard()
{ {
this->mpiRank = 0;
#if HAVE_MPI #if HAVE_MPI
MPI_Comm_rank(EclGenericVanguard::comm(), &mpiRank); this->mpiRank = EclGenericVanguard::comm().rank();
#else #endif // HAVE_MPI
mpiRank = 0;
#endif
} }
template<class ElementMapper, class GridView, class Scalar> template<class ElementMapper, class GridView, class Scalar>
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::releaseEquilGrid() void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::releaseEquilGrid()
{ {
equilGrid_.reset(); this->equilGrid_.reset();
equilCartesianIndexMapper_.reset(); this->equilCartesianIndexMapper_.reset();
} }
#if HAVE_MPI #if HAVE_MPI
template<class ElementMapper, class GridView, class Scalar> template<class ElementMapper, class GridView, class Scalar>
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doLoadBalance_(Dune::EdgeWeightMethod edgeWeightsMethod, void EclGenericCpGridVanguard<ElementMapper, GridView, Scalar>::
bool ownersFirst, doLoadBalance_(const Dune::EdgeWeightMethod edgeWeightsMethod,
bool serialPartitioning, const bool ownersFirst,
bool enableDistributedWells, const bool serialPartitioning,
double zoltanImbalanceTol, const bool enableDistributedWells,
const GridView& gridView, const double zoltanImbalanceTol,
const Schedule& schedule, const GridView& gridView,
std::vector<double>& centroids, const Schedule& schedule,
EclipseState& eclState1, std::vector<double>& centroids,
EclGenericVanguard::ParallelWellStruct& parallelWells, EclipseState& eclState1,
int numJacobiBlocks) EclGenericVanguard::ParallelWellStruct& parallelWells,
const int numJacobiBlocks)
{ {
int mpiSize = 1; const auto mpiSize = this->grid_->comm().size();
MPI_Comm_size(grid_->comm(), &mpiSize);
if (mpiSize > 1 || numJacobiBlocks > 1) { const auto partitionJacobiBlocks =
// the CpGrid's loadBalance() method likes to have the transmissibilities as (numJacobiBlocks > 1) && (mpiSize == 1);
// its edge weights. since this is (kind of) a layering violation and
// transmissibilities are relatively expensive to compute, we only do it if if ((mpiSize > 1) || (numJacobiBlocks > 1)) {
// more than a single process is involved in the simulation. if (this->grid_->size(0) > 0) {
if (grid_->size(0)) // Generally needed in parallel runs both when there is and when
{ // there is not an externally defined load-balancing function.
// In addition to being used in CpGrid::loadBalance(), the
// transmissibilities are also output to the .INIT file. Thus,
// transmissiblity values must exist on the I/O rank for derived
// classes such as EclCpGridVanguard<>.
this->allocTrans(); this->allocTrans();
} }
// convert to transmissibility for faces // CpGrid's loadBalance() method uses transmissibilities as edge
// TODO: grid_->numFaces() is not generic. use grid_->size(1) instead? (might // weights. This is arguably a layering violation and extracting
// not work) // the per-face transmissibilities as a linear array is relatively
unsigned numFaces = grid_->numFaces(); // expensive. We therefore extract transmissibility values only if
std::vector<double> faceTrans; // the values are actually needed.
int loadBalancerSet = externalLoadBalancer.has_value(); auto loadBalancerSet = static_cast<int>(externalLoadBalancer.has_value());
grid_->comm().broadcast(&loadBalancerSet, 1, 0); this->grid_->comm().broadcast(&loadBalancerSet, 1, 0);
if (!loadBalancerSet){
faceTrans.resize(numFaces, 0.0);
ElementMapper elemMapper(gridView, Dune::mcmgElementLayout());
auto elemIt = gridView.template begin</*codim=*/0>();
const auto& elemEndIt = gridView.template end</*codim=*/0>();
for (; elemIt != elemEndIt; ++ elemIt) {
const auto& elem = *elemIt;
auto isIt = gridView.ibegin(elem);
const auto& isEndIt = gridView.iend(elem);
for (; isIt != isEndIt; ++ isIt) {
const auto& is = *isIt;
if (!is.neighbor())
continue;
unsigned I = elemMapper.index(is.inside()); const auto faceTrans = ((loadBalancerSet == 0) || partitionJacobiBlocks)
unsigned J = elemMapper.index(is.outside()); ? this->extractFaceTrans(gridView)
: std::vector<double>{};
// FIXME (?): this is not portable! const auto wells = ((mpiSize > 1) || partitionJacobiBlocks)
unsigned faceIdx = is.id(); ? schedule.getWellsatEnd()
: std::vector<Well>{};
faceTrans[faceIdx] = this->getTransmissibility(I,J); // Distribute the grid and switch to the distributed view.
}
}
}
//distribute the grid and switch to the distributed view.
if (mpiSize > 1) { if (mpiSize > 1) {
{ this->distributeGrid(edgeWeightsMethod, ownersFirst,
const auto wells = schedule.getWellsatEnd(); serialPartitioning, enableDistributedWells,
zoltanImbalanceTol, loadBalancerSet != 0,
try faceTrans, wells, centroids,
{ eclState1, parallelWells);
auto& eclState = dynamic_cast<ParallelEclipseState&>(eclState1);
const EclipseGrid* eclGrid = nullptr;
if (grid_->comm().rank() == 0)
{
eclGrid = &eclState.getInputGrid();
}
PropsCentroidsDataHandle<Dune::CpGrid> handle(*grid_, eclState, eclGrid, centroids,
cartesianIndexMapper());
if (loadBalancerSet)
{
std::vector<int> parts;
if (grid_->comm().rank() == 0)
{
parts = (*externalLoadBalancer)(*grid_);
}
parallelWells = std::get<1>(grid_->loadBalance(handle, parts, &wells, ownersFirst, false, 1));
}
else
{
parallelWells =
std::get<1>(grid_->loadBalance(handle, edgeWeightsMethod, &wells, serialPartitioning,
faceTrans.data(), ownersFirst, false, 1, true, zoltanImbalanceTol,
enableDistributedWells));
}
}
catch(const std::bad_cast& e)
{
std::ostringstream message;
message << "Parallel simulator setup is incorrect as it does not use ParallelEclipseState ("
<< e.what() <<")"<<std::flush;
OpmLog::error(message.str());
std::rethrow_exception(std::current_exception());
}
}
grid_->switchToDistributedView();
} }
// Calling Schedule::filterConnections would remove any perforated // Calling Schedule::filterConnections would remove any perforated
// cells that exist only on other ranks even in the case of distributed wells // cells that exist only on other ranks even in the case of
// But we need all connections to figure out the first cell of a well (e.g. for // distributed wells. But we need all connections to figure out the
// pressure). Hence this is now skipped. Rank 0 had everything even before. // first cell of a well (e.g. for pressure). Hence this is now
// skipped. Rank 0 had everything even before.
if (numJacobiBlocks > 1 && mpiSize == 1) { #if HAVE_OPENCL
const auto wells = schedule.getWellsatEnd(); if (partitionJacobiBlocks) {
cell_part_.resize(grid_->numCells()); this->cell_part_ = this->grid_->
cell_part_ = grid_->zoltanPartitionWithoutScatter(&wells, faceTrans.data(), numJacobiBlocks, zoltanImbalanceTol); zoltanPartitionWithoutScatter(&wells, faceTrans.data(),
numJacobiBlocks,
zoltanImbalanceTol);
} }
#endif // HAVE_OPENCL
} }
} }
template<class ElementMapper, class GridView, class Scalar> template<class ElementMapper, class GridView, class Scalar>
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::distributeFieldProps_(EclipseState& eclState1) void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::distributeFieldProps_(EclipseState& eclState1)
{ {
int mpiSize = 1; const auto mpiSize = this->grid_->comm().size();
MPI_Comm_size(grid_->comm(), &mpiSize);
if (mpiSize > 1) { if (mpiSize == 1) {
try return;
{ }
auto& parallelEclState = dynamic_cast<ParallelEclipseState&>(eclState1);
// reset cartesian index mapper for auto creation of field properties if (auto* parallelEclState = dynamic_cast<ParallelEclipseState*>(&eclState1);
parallelEclState.resetCartesianMapper(cartesianIndexMapper_.get()); parallelEclState != nullptr)
parallelEclState.switchToDistributedProps(); {
} // Reset Cartesian index mapper for automatic creation of field
catch(const std::bad_cast& e) // properties
{ parallelEclState->resetCartesianMapper(this->cartesianIndexMapper_.get());
std::ostringstream message; parallelEclState->switchToDistributedProps();
message << "Parallel simulator setup is incorrect as it does not use ParallelEclipseState (" }
<< e.what() <<")"<<std::flush; else {
OpmLog::error(message.str()); const auto message = std::string {
std::rethrow_exception(std::current_exception()); "Parallel simulator setup is incorrect as "
} "it does not use ParallelEclipseState"
};
OpmLog::error(message);
throw std::invalid_argument { message };
} }
} }
#endif
template <class ElementMapper, class GridView, class Scalar>
std::vector<double>
EclGenericCpGridVanguard<ElementMapper, GridView, Scalar>::
extractFaceTrans(const GridView& gridView) const
{
auto faceTrans = std::vector<double>(this->grid_->numFaces(), 0.0);
const auto elemMapper = ElementMapper { gridView, Dune::mcmgElementLayout() };
for (const auto& elem : elements(gridView, Dune::Partitions::interiorBorder)) {
for (const auto& is : intersections(gridView, elem)) {
if (!is.neighbor()) {
continue;
}
const auto I = static_cast<unsigned int>(elemMapper.index(is.inside()));
const auto J = static_cast<unsigned int>(elemMapper.index(is.outside()));
faceTrans[is.id()] = this->getTransmissibility(I, J);
}
}
return faceTrans;
}
template <class ElementMapper, class GridView, class Scalar>
void
EclGenericCpGridVanguard<ElementMapper, GridView, Scalar>::
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const bool serialPartitioning,
const bool enableDistributedWells,
const double zoltanImbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
std::vector<double>& centroids,
EclipseState& eclState1,
EclGenericVanguard::ParallelWellStruct& parallelWells)
{
if (auto* eclState = dynamic_cast<ParallelEclipseState*>(&eclState1);
eclState != nullptr)
{
this->distributeGrid(edgeWeightsMethod, ownersFirst,
serialPartitioning, enableDistributedWells,
zoltanImbalanceTol, loadBalancerSet, faceTrans,
wells, centroids, eclState, parallelWells);
}
else {
const auto message = std::string {
"Parallel simulator setup is incorrect as "
"it does not use ParallelEclipseState"
};
OpmLog::error(message);
throw std::invalid_argument { message };
}
this->grid_->switchToDistributedView();
}
template <class ElementMapper, class GridView, class Scalar>
void
EclGenericCpGridVanguard<ElementMapper, GridView, Scalar>::
distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const bool serialPartitioning,
const bool enableDistributedWells,
const double zoltanImbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
std::vector<double>& centroids,
ParallelEclipseState* eclState,
EclGenericVanguard::ParallelWellStruct& parallelWells)
{
const auto isIORank = this->grid_->comm().rank() == 0;
const auto* eclGrid = isIORank
? &eclState->getInputGrid()
: nullptr;
PropsCentroidsDataHandle<Dune::CpGrid> handle {
*this->grid_, *eclState, eclGrid, centroids,
this->cartesianIndexMapper()
};
const auto addCornerCells = false;
const auto overlapLayers = 1;
if (loadBalancerSet) {
auto parts = isIORank
? (*externalLoadBalancer)(*this->grid_)
: std::vector<int>{};
parallelWells =
std::get<1>(this->grid_->loadBalance(handle, parts, &wells, ownersFirst,
addCornerCells, overlapLayers));
}
else {
const auto useZoltan = true;
parallelWells =
std::get<1>(this->grid_->loadBalance(handle, edgeWeightsMethod,
&wells, serialPartitioning,
faceTrans.data(), ownersFirst,
addCornerCells, overlapLayers,
useZoltan, zoltanImbalanceTol,
enableDistributedWells));
}
}
#endif // HAVE_MPI
template<class ElementMapper, class GridView, class Scalar> template<class ElementMapper, class GridView, class Scalar>
void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(EclipseState& eclState) void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doCreateGrids_(EclipseState& eclState)
@ -326,45 +410,47 @@ void EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::doFilterConnection
// is done after load balancing as in the future the other processes // is done after load balancing as in the future the other processes
// will hold an empty partition for the global grid and hence filtering // will hold an empty partition for the global grid and hence filtering
// here would remove all well connections. // here would remove all well connections.
if (equilGrid_) if (this->equilGrid_ != nullptr) {
{
ActiveGridCells activeCells(equilGrid().logicalCartesianSize(), ActiveGridCells activeCells(equilGrid().logicalCartesianSize(),
equilGrid().globalCell().data(), equilGrid().globalCell().data(),
equilGrid().size(0)); equilGrid().size(0));
schedule.filterConnections(activeCells); schedule.filterConnections(activeCells);
} }
#if HAVE_MPI #if HAVE_MPI
try try {
{
// Broadcast another time to remove inactive peforations on // Broadcast another time to remove inactive peforations on
// slave processors. // slave processors.
eclBroadcast(EclGenericVanguard::comm(), schedule); eclBroadcast(EclGenericVanguard::comm(), schedule);
} }
catch(const std::exception& broadcast_error) catch (const std::exception& broadcast_error) {
{
OpmLog::error(fmt::format("Distributing properties to all processes failed\n" OpmLog::error(fmt::format("Distributing properties to all processes failed\n"
"Internal error message: {}", broadcast_error.what())); "Internal error message: {}", broadcast_error.what()));
MPI_Finalize(); MPI_Finalize();
std::exit(EXIT_FAILURE); std::exit(EXIT_FAILURE);
} }
#endif #endif // HAVE_MPI
} }
template<class ElementMapper, class GridView, class Scalar> template<class ElementMapper, class GridView, class Scalar>
const Dune::CpGrid& EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::equilGrid() const const Dune::CpGrid&
EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::equilGrid() const
{ {
assert(mpiRank == 0); assert(mpiRank == 0);
return *equilGrid_; return *equilGrid_;
} }
template<class ElementMapper, class GridView, class Scalar> template<class ElementMapper, class GridView, class Scalar>
const Dune::CartesianIndexMapper<Dune::CpGrid>& EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::cartesianIndexMapper() const const Dune::CartesianIndexMapper<Dune::CpGrid>&
EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::cartesianIndexMapper() const
{ {
return *cartesianIndexMapper_; return *cartesianIndexMapper_;
} }
template<class ElementMapper, class GridView, class Scalar> template<class ElementMapper, class GridView, class Scalar>
const Dune::CartesianIndexMapper<Dune::CpGrid>& EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::equilCartesianIndexMapper() const const Dune::CartesianIndexMapper<Dune::CpGrid>&
EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::equilCartesianIndexMapper() const
{ {
assert(mpiRank == 0); assert(mpiRank == 0);
assert(equilCartesianIndexMapper_); assert(equilCartesianIndexMapper_);
@ -372,7 +458,9 @@ const Dune::CartesianIndexMapper<Dune::CpGrid>& EclGenericCpGridVanguard<Element
} }
template<class ElementMapper, class GridView, class Scalar> template<class ElementMapper, class GridView, class Scalar>
Scalar EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::computeCellThickness(const typename GridView::template Codim<0>::Entity& element) const Scalar
EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::
computeCellThickness(const typename GridView::template Codim<0>::Entity& element) const
{ {
typedef typename Element::Geometry Geometry; typedef typename Element::Geometry Geometry;
static constexpr int zCoord = Element::dimension - 1; static constexpr int zCoord = Element::dimension - 1;
@ -396,30 +484,42 @@ Scalar EclGenericCpGridVanguard<ElementMapper,GridView,Scalar>::computeCellThick
} }
#if HAVE_DUNE_FEM #if HAVE_DUNE_FEM
template class EclGenericCpGridVanguard<Dune::MultipleCodimMultipleGeomTypeMapper< template class EclGenericCpGridVanguard<
Dune::GridView< Dune::MultipleCodimMultipleGeomTypeMapper<
Dune::Fem::GridPart2GridViewTraits< Dune::GridView<
Dune::Fem::AdaptiveLeafGridPart<Dune::CpGrid, Dune::PartitionIteratorType(4), false>>>>, Dune::Fem::GridPart2GridViewTraits<
Dune::GridView< Dune::Fem::AdaptiveLeafGridPart<
Dune::Fem::GridPart2GridViewTraits< Dune::CpGrid,
Dune::Fem::AdaptiveLeafGridPart< Dune::PartitionIteratorType(4),
Dune::CpGrid, Dune::PartitionIteratorType(4), false>>>, false>>>>,
double>; Dune::GridView<
template class EclGenericCpGridVanguard<Dune::MultipleCodimMultipleGeomTypeMapper< Dune::Fem::GridPart2GridViewTraits<
Dune::Fem::GridPart2GridViewImpl< Dune::Fem::AdaptiveLeafGridPart<
Dune::Fem::AdaptiveLeafGridPart< Dune::CpGrid,
Dune::CpGrid, Dune::PartitionIteratorType(4),
Dune::PartitionIteratorType(4), false>>>,
false>>>, double>;
Dune::Fem::GridPart2GridViewImpl<
Dune::Fem::AdaptiveLeafGridPart< template class EclGenericCpGridVanguard<
Dune::CpGrid, Dune::MultipleCodimMultipleGeomTypeMapper<
Dune::PartitionIteratorType(4), Dune::Fem::GridPart2GridViewImpl<
false> >, Dune::Fem::AdaptiveLeafGridPart<
double>; Dune::CpGrid,
Dune::PartitionIteratorType(4),
false>>>,
Dune::Fem::GridPart2GridViewImpl<
Dune::Fem::AdaptiveLeafGridPart<
Dune::CpGrid,
Dune::PartitionIteratorType(4),
false> >,
double>;
#else #else
template class EclGenericCpGridVanguard<Dune::MultipleCodimMultipleGeomTypeMapper<Dune::GridView<Dune::DefaultLeafGridViewTraits<Dune::CpGrid>>>, template class EclGenericCpGridVanguard<
Dune::GridView<Dune::DefaultLeafGridViewTraits<Dune::CpGrid>>, Dune::MultipleCodimMultipleGeomTypeMapper<
double>; Dune::GridView<
Dune::DefaultLeafGridViewTraits<Dune::CpGrid>>>,
Dune::GridView<
Dune::DefaultLeafGridViewTraits<Dune::CpGrid>>,
double>;
#endif #endif
} // namespace Opm } // namespace Opm

View File

@ -31,6 +31,15 @@
#include <opm/grid/CpGrid.hpp> #include <opm/grid/CpGrid.hpp>
#include <functional> #include <functional>
#include <optional>
#include <vector>
namespace Opm {
class EclipseState;
class Schedule;
class Well;
class ParallelEclipseState;
}
namespace Opm { namespace Opm {
@ -101,10 +110,11 @@ public:
*/ */
const CartesianIndexMapper& equilCartesianIndexMapper() const; const CartesianIndexMapper& equilCartesianIndexMapper() const;
std::vector<int> cellPartition() const const std::vector<int>& cellPartition() const
{ {
return cell_part_; return this->cell_part_;
} }
protected: protected:
/*! /*!
* \brief Distribute the simulation grid over multiple processes * \brief Distribute the simulation grid over multiple processes
@ -112,25 +122,56 @@ protected:
* (For parallel simulation runs.) * (For parallel simulation runs.)
*/ */
#if HAVE_MPI #if HAVE_MPI
void doLoadBalance_(Dune::EdgeWeightMethod edgeWeightsMethod, void doLoadBalance_(const Dune::EdgeWeightMethod edgeWeightsMethod,
bool ownersFirst, bool serialPartitioning, const bool ownersFirst,
bool enableDistributedWells, double zoltanImbalanceTol, const bool serialPartitioning,
const GridView& gridv, const bool enableDistributedWells,
const Schedule& schedule, const double zoltanImbalanceTol,
std::vector<double>& centroids, const GridView& gridView,
EclipseState& eclState, const Schedule& schedule,
std::vector<double>& centroids,
EclipseState& eclState,
EclGenericVanguard::ParallelWellStruct& parallelWells, EclGenericVanguard::ParallelWellStruct& parallelWells,
int numJacobiBlocks); const int numJacobiBlocks);
void distributeFieldProps_(EclipseState& eclState); void distributeFieldProps_(EclipseState& eclState);
#endif
private:
std::vector<double> extractFaceTrans(const GridView& gridView) const;
void distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const bool serialPartitioning,
const bool enableDistributedWells,
const double zoltanImbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
std::vector<double>& centroids,
EclipseState& eclState,
EclGenericVanguard::ParallelWellStruct& parallelWells);
void distributeGrid(const Dune::EdgeWeightMethod edgeWeightsMethod,
const bool ownersFirst,
const bool serialPartitioning,
const bool enableDistributedWells,
const double zoltanImbalanceTol,
const bool loadBalancerSet,
const std::vector<double>& faceTrans,
const std::vector<Well>& wells,
std::vector<double>& centroids,
ParallelEclipseState* eclState,
EclGenericVanguard::ParallelWellStruct& parallelWells);
protected:
#endif // HAVE_MPI
void allocCartMapper(); void allocCartMapper();
void doCreateGrids_(EclipseState& eclState); void doCreateGrids_(EclipseState& eclState);
virtual void allocTrans() = 0; virtual void allocTrans() = 0;
virtual double getTransmissibility(unsigned I, unsigned J) = 0; virtual double getTransmissibility(unsigned I, unsigned J) const = 0;
// removing some connection located in inactive grid cells // removing some connection located in inactive grid cells
void doFilterConnections_(Schedule& schedule); void doFilterConnections_(Schedule& schedule);
@ -143,7 +184,7 @@ protected:
std::unique_ptr<CartesianIndexMapper> equilCartesianIndexMapper_; std::unique_ptr<CartesianIndexMapper> equilCartesianIndexMapper_;
int mpiRank; int mpiRank;
std::vector<int> cell_part_; std::vector<int> cell_part_{};
}; };
} // namespace Opm } // namespace Opm

View File

@ -253,7 +253,13 @@ public:
* \brief Number of blocks in the Block-Jacobi preconditioner. * \brief Number of blocks in the Block-Jacobi preconditioner.
*/ */
int numJacobiBlocks() const int numJacobiBlocks() const
{ return numJacobiBlocks_; } {
#if HAVE_OPENCL
return numJacobiBlocks_;
#else
return 0;
#endif
}
/*! /*!
* \brief Parameter that decide if cells owned by rank are ordered before ghost cells. * \brief Parameter that decide if cells owned by rank are ordered before ghost cells.
@ -329,7 +335,11 @@ protected:
std::string caseName_; std::string caseName_;
std::string fileName_; std::string fileName_;
Dune::EdgeWeightMethod edgeWeightsMethod_; Dune::EdgeWeightMethod edgeWeightsMethod_;
int numJacobiBlocks_;
#if HAVE_OPENCL
int numJacobiBlocks_{0};
#endif // HAVE_OPENCL
bool ownersFirst_; bool ownersFirst_;
bool serialPartitioning_; bool serialPartitioning_;
double zoltanImbalanceTol_; double zoltanImbalanceTol_;

View File

@ -213,7 +213,12 @@ namespace Opm
matrix_ = const_cast<Matrix*>(&M.istlMatrix()); matrix_ = const_cast<Matrix*>(&M.istlMatrix());
// setup sparsity pattern for jacobi matrix for preconditioner (only used for openclSolver) // setup sparsity pattern for jacobi matrix for preconditioner (only used for openclSolver)
numJacobiBlocks_ = EWOMS_GET_PARAM(TypeTag, int, NumJacobiBlocks); #if HAVE_OPENCL
this->numJacobiBlocks_ = EWOMS_GET_PARAM(TypeTag, int, NumJacobiBlocks);
#else
this->numJacobiBlocks_ = 0;
#endif
useWellConn_ = EWOMS_GET_PARAM(TypeTag, bool, MatrixAddWellContributions); useWellConn_ = EWOMS_GET_PARAM(TypeTag, bool, MatrixAddWellContributions);
if (numJacobiBlocks_ > 1) { if (numJacobiBlocks_ > 1) {
const auto wellsForConn = simulator_.vanguard().schedule().getWellsatEnd(); const auto wellsForConn = simulator_.vanguard().schedule().getWellsatEnd();