mirror of
https://github.com/OPM/opm-simulators.git
synced 2025-02-25 18:55:30 -06:00
Merge pull request #2919 from blattms/do-not-filter-distributed
Do not filter connections on the schedule of the loadbalanced grid.
This commit is contained in:
commit
5617a2ef5c
@ -35,6 +35,7 @@
|
|||||||
#include <opm/grid/cpgrid/GridHelpers.hpp>
|
#include <opm/grid/cpgrid/GridHelpers.hpp>
|
||||||
#include <opm/simulators/utils/ParallelEclipseState.hpp>
|
#include <opm/simulators/utils/ParallelEclipseState.hpp>
|
||||||
#include <opm/simulators/utils/PropsCentroidsDataHandle.hpp>
|
#include <opm/simulators/utils/PropsCentroidsDataHandle.hpp>
|
||||||
|
#include <opm/simulators/utils/ParallelSerialization.hpp>
|
||||||
|
|
||||||
#include <dune/grid/common/mcmgmapper.hh>
|
#include <dune/grid/common/mcmgmapper.hh>
|
||||||
|
|
||||||
@ -234,15 +235,10 @@ public:
|
|||||||
|
|
||||||
cartesianIndexMapper_.reset();
|
cartesianIndexMapper_.reset();
|
||||||
|
|
||||||
if ( ! equilGrid_ )
|
// Calling Schedule::filterConnections would remove any perforated
|
||||||
{
|
// cells that exist only on other ranks even in the case of distributed wells
|
||||||
// for processes that do not hold the global grid we filter here using the local grid.
|
// But we need all connections to figure out the first cell of a well (e.g. for
|
||||||
// If we would filter in filterConnection_ our partition would be empty and the connections of all
|
// pressure). Hence this is now skipped. Rank 0 had everything even before.
|
||||||
// wells would be removed.
|
|
||||||
ActiveGridCells activeCells(grid().logicalCartesianSize(),
|
|
||||||
grid().globalCell().data(), grid().size(0));
|
|
||||||
this->schedule().filterConnections(activeCells);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -351,6 +347,21 @@ protected:
|
|||||||
equilGrid().size(0));
|
equilGrid().size(0));
|
||||||
this->schedule().filterConnections(activeCells);
|
this->schedule().filterConnections(activeCells);
|
||||||
}
|
}
|
||||||
|
#if HAVE_MPI
|
||||||
|
try
|
||||||
|
{
|
||||||
|
// Broadcast another time to remove inactive peforations on
|
||||||
|
// slave processors.
|
||||||
|
Opm::eclScheduleBroadcast(this->schedule());
|
||||||
|
}
|
||||||
|
catch(const std::exception& broadcast_error)
|
||||||
|
{
|
||||||
|
OpmLog::error(fmt::format("Distributing properties to all processes failed\n"
|
||||||
|
"Internal error message: {}", broadcast_error.what()));
|
||||||
|
MPI_Finalize();
|
||||||
|
std::exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<Grid> grid_;
|
std::unique_ptr<Grid> grid_;
|
||||||
|
@ -49,4 +49,9 @@ void eclStateBroadcast(EclipseState& eclState, Schedule& schedule,
|
|||||||
ser.broadcast(summaryConfig);
|
ser.broadcast(summaryConfig);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void eclScheduleBroadcast(Schedule& schedule)
|
||||||
|
{
|
||||||
|
Opm::EclMpiSerializer ser(Dune::MPIHelper::getCollectiveCommunication());
|
||||||
|
ser.broadcast(schedule);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -33,6 +33,9 @@ class SummaryConfig;
|
|||||||
void eclStateBroadcast(EclipseState& eclState, Schedule& schedule,
|
void eclStateBroadcast(EclipseState& eclState, Schedule& schedule,
|
||||||
SummaryConfig& summaryConfig);
|
SummaryConfig& summaryConfig);
|
||||||
|
|
||||||
|
/// \brief Broadcasts an schedule from root node in parallel runs.
|
||||||
|
void eclScheduleBroadcast(Schedule& schedule);
|
||||||
|
|
||||||
} // end namespace Opm
|
} // end namespace Opm
|
||||||
|
|
||||||
#endif // PARALLEL_SERIALIZATION_HPP
|
#endif // PARALLEL_SERIALIZATION_HPP
|
||||||
|
Loading…
Reference in New Issue
Block a user