Remove writInit()

The EGRID and INIT files are written using ebos
This commit is contained in:
Tor Harald Sandve 2018-01-15 13:06:42 +01:00
parent 386ade39f4
commit 36f6b7ad00

View File

@ -108,7 +108,6 @@ namespace Opm
printPRTHeader();
extractMessages();
runDiagnostics();
writeInit();
setupOutputWriter();
setupLinearSolver();
createSimulator();
@ -420,10 +419,6 @@ namespace Opm
ebosSimulator_.reset(new EbosSimulator(/*verbose=*/false));
ebosSimulator_->model().applyInitialSolution();
// Create a grid with a global view.
globalGrid_.reset(new Grid(grid()));
globalGrid_->switchToGlobalView();
try {
if (output_cout_) {
MissingFeatures::checkKeywords(deck());
@ -464,9 +459,6 @@ namespace Opm
const Schedule& schedule() const
{ return ebosSimulator_->gridManager().schedule(); }
const SummaryConfig& summaryConfig() const
{ return ebosSimulator_->gridManager().summaryConfig(); }
// Extract messages from parser.
// Writes to:
// OpmLog singleton.
@ -513,27 +505,6 @@ namespace Opm
diagnostic.diagnosis(eclState(), deck(), this->grid());
}
void writeInit()
{
bool output = ( output_ > OUTPUT_LOG_ONLY );
bool output_ecl = param_.getDefault("output_ecl", true);
auto int_vectors = computeCellRanks(output, output_ecl);
if( output && output_ecl && grid().comm().rank() == 0 )
{
exportNncStructure_();
const EclipseGrid& inputGrid = eclState().getInputGrid();
eclIO_.reset(new EclipseIO(eclState(),
UgGridHelpers::createEclipseGrid( this->globalGrid() , inputGrid ),
schedule(),
summaryConfig()));
eclIO_->writeInitial(computeLegacySimProps_(), int_vectors, nnc_);
Problem& problem = ebosProblem();
problem.setEclIO(std::move(eclIO_));
}
}
// Setup output writer.
// Writes to:
// output_writer_
@ -679,232 +650,6 @@ namespace Opm
Grid& grid()
{ return ebosSimulator_->gridManager().grid(); }
const Grid& globalGrid()
{ return *globalGrid_; }
Problem& ebosProblem()
{ return ebosSimulator_->problem(); }
const Problem& ebosProblem() const
{ return ebosSimulator_->problem(); }
std::shared_ptr<MaterialLawManager> materialLawManager()
{ return ebosProblem().materialLawManager(); }
Scalar gravity() const
{ return ebosProblem().gravity()[2]; }
std::map<std::string, std::vector<int> > computeCellRanks(bool output, bool output_ecl)
{
std::map<std::string, std::vector<int> > integerVectors;
if( output && output_ecl && grid().comm().size() > 1 )
{
typedef typename Grid::LeafGridView GridView;
#if DUNE_VERSION_NEWER(DUNE_GEOMETRY, 2, 6)
using ElementMapper = Dune::MultipleCodimMultipleGeomTypeMapper<GridView>;
#else
// Get the owner rank number for each cell
using ElementMapper = Dune::MultipleCodimMultipleGeomTypeMapper<GridView, Dune::MCMGElementLayout>;
#endif
using Handle = CellOwnerDataHandle<ElementMapper>;
const Grid& globalGrid = this->globalGrid();
const auto& globalGridView = globalGrid.leafGridView();
#if DUNE_VERSION_NEWER(DUNE_GEOMETRY, 2, 6)
ElementMapper globalMapper(globalGridView, Dune::mcmgElementLayout());
#else
ElementMapper globalMapper(globalGridView);
#endif
const auto globalSize = globalGrid.size(0);
std::vector<int> ranks(globalSize, -1);
Handle handle(globalMapper, ranks);
this->grid().gatherData(handle);
integerVectors.emplace("MPI_RANK", ranks);
}
return integerVectors;
}
data::Solution computeLegacySimProps_()
{
const int* dims = UgGridHelpers::cartDims(grid());
const int globalSize = dims[0]*dims[1]*dims[2];
data::CellData tranx = {UnitSystem::measure::transmissibility, std::vector<double>( globalSize ), data::TargetType::INIT};
data::CellData trany = {UnitSystem::measure::transmissibility, std::vector<double>( globalSize ), data::TargetType::INIT};
data::CellData tranz = {UnitSystem::measure::transmissibility, std::vector<double>( globalSize ), data::TargetType::INIT};
for (size_t i = 0; i < tranx.data.size(); ++i) {
tranx.data[0] = 0.0;
trany.data[0] = 0.0;
tranz.data[0] = 0.0;
}
const Grid& globalGrid = this->globalGrid();
const auto& globalGridView = globalGrid.leafGridView();
typedef typename Grid::LeafGridView GridView;
#if DUNE_VERSION_NEWER(DUNE_GEOMETRY, 2, 6)
typedef Dune::MultipleCodimMultipleGeomTypeMapper<GridView> ElementMapper;
ElementMapper globalElemMapper(globalGridView, Dune::mcmgElementLayout());
#else
typedef Dune::MultipleCodimMultipleGeomTypeMapper<GridView, Dune::MCMGElementLayout> ElementMapper;
ElementMapper globalElemMapper(globalGridView);
#endif
const auto& cartesianCellIdx = globalGrid.globalCell();
const auto* globalTrans = &(ebosSimulator_->gridManager().globalTransmissibility());
if (grid().comm().size() < 2) {
// in the sequential case we must use the transmissibilites defined by
// the problem. (because in the sequential case, the grid manager does
// not compute "global" transmissibilities for performance reasons. in
// the parallel case, the problem's transmissibilities can't be used
// because this object refers to the distributed grid and we need the
// sequential version here.)
globalTrans = &ebosSimulator_->problem().eclTransmissibilities();
}
auto elemIt = globalGridView.template begin</*codim=*/0>();
const auto& elemEndIt = globalGridView.template end</*codim=*/0>();
for (; elemIt != elemEndIt; ++ elemIt) {
const auto& elem = *elemIt;
auto isIt = globalGridView.ibegin(elem);
const auto& isEndIt = globalGridView.iend(elem);
for (; isIt != isEndIt; ++ isIt) {
const auto& is = *isIt;
if (!is.neighbor())
{
continue; // intersection is on the domain boundary
}
unsigned c1 = globalElemMapper.index(is.inside());
unsigned c2 = globalElemMapper.index(is.outside());
if (c1 > c2)
{
continue; // we only need to handle each connection once, thank you.
}
int gc1 = std::min(cartesianCellIdx[c1], cartesianCellIdx[c2]);
int gc2 = std::max(cartesianCellIdx[c1], cartesianCellIdx[c2]);
if (gc2 - gc1 == 1) {
tranx.data[gc1] = globalTrans->transmissibility(c1, c2);
}
if (gc2 - gc1 == dims[0]) {
trany.data[gc1] = globalTrans->transmissibility(c1, c2);
}
if (gc2 - gc1 == dims[0]*dims[1]) {
tranz.data[gc1] = globalTrans->transmissibility(c1, c2);
}
}
}
return {{"TRANX" , tranx},
{"TRANY" , trany} ,
{"TRANZ" , tranz}};
}
void exportNncStructure_()
{
nnc_ = eclState().getInputNNC();
int nx = eclState().getInputGrid().getNX();
int ny = eclState().getInputGrid().getNY();
//int nz = eclState().getInputGrid().getNZ()
const Grid& globalGrid = this->globalGrid();
const auto& globalGridView = globalGrid.leafGridView();
typedef typename Grid::LeafGridView GridView;
#if DUNE_VERSION_NEWER(DUNE_GEOMETRY, 2, 6)
typedef Dune::MultipleCodimMultipleGeomTypeMapper<GridView> ElementMapper;
ElementMapper globalElemMapper(globalGridView, Dune::mcmgElementLayout());
#else
typedef Dune::MultipleCodimMultipleGeomTypeMapper<GridView, Dune::MCMGElementLayout> ElementMapper;
ElementMapper globalElemMapper(globalGridView);
#endif
const auto* globalTrans = &(ebosSimulator_->gridManager().globalTransmissibility());
if (grid().comm().size() < 2) {
// in the sequential case we must use the transmissibilites defined by
// the problem. (because in the sequential case, the grid manager does
// not compute "global" transmissibilities for performance reasons. in
// the parallel case, the problem's transmissibilities can't be used
// because this object refers to the distributed grid and we need the
// sequential version here.)
globalTrans = &ebosSimulator_->problem().eclTransmissibilities();
}
auto elemIt = globalGridView.template begin</*codim=*/0>();
const auto& elemEndIt = globalGridView.template end</*codim=*/0>();
for (; elemIt != elemEndIt; ++ elemIt) {
const auto& elem = *elemIt;
auto isIt = globalGridView.ibegin(elem);
const auto& isEndIt = globalGridView.iend(elem);
for (; isIt != isEndIt; ++ isIt) {
const auto& is = *isIt;
if (!is.neighbor())
{
continue; // intersection is on the domain boundary
}
unsigned c1 = globalElemMapper.index(is.inside());
unsigned c2 = globalElemMapper.index(is.outside());
if (c1 > c2)
{
continue; // we only need to handle each connection once, thank you.
}
// TODO (?): use the cartesian index mapper to make this code work
// with grids other than Dune::CpGrid. The problem is that we need
// the a mapper for the sequential grid, not for the distributed one.
int cc1 = globalGrid.globalCell()[c1];
int cc2 = globalGrid.globalCell()[c2];
if (std::abs(cc1 - cc2) != 1 &&
std::abs(cc1 - cc2) != nx &&
std::abs(cc1 - cc2) != nx*ny)
{
nnc_.addNNC(cc1, cc2, globalTrans->transmissibility(c1, c2));
}
}
}
}
/// Convert saturations from a vector of individual phase saturation vectors
/// to an interleaved format where all values for a given cell come before all
/// values for the next cell, all in a single vector.
template <class FluidSystem>
void convertSats(std::vector<double>& sat_interleaved, const std::vector< std::vector<double> >& sat, const PhaseUsage& pu)
{
assert(sat.size() == 3);
const auto nc = sat[0].size();
const auto np = sat_interleaved.size() / nc;
for (size_t c = 0; c < nc; ++c) {
if ( FluidSystem::phaseIsActive(FluidSystem::oilPhaseIdx)) {
const int opos = pu.phase_pos[BlackoilPhases::Liquid];
const std::vector<double>& sat_p = sat[ FluidSystem::oilPhaseIdx];
sat_interleaved[np*c + opos] = sat_p[c];
}
if ( FluidSystem::phaseIsActive(FluidSystem::waterPhaseIdx)) {
const int wpos = pu.phase_pos[BlackoilPhases::Aqua];
const std::vector<double>& sat_p = sat[ FluidSystem::waterPhaseIdx];
sat_interleaved[np*c + wpos] = sat_p[c];
}
if ( FluidSystem::phaseIsActive(FluidSystem::gasPhaseIdx)) {
const int gpos = pu.phase_pos[BlackoilPhases::Vapour];
const std::vector<double>& sat_p = sat[ FluidSystem::gasPhaseIdx];
sat_interleaved[np*c + gpos] = sat_p[c];
}
}
}
std::unique_ptr<EbosSimulator> ebosSimulator_;
int mpi_rank_ = 0;
bool output_cout_ = false;
@ -913,15 +658,11 @@ namespace Opm
ParameterGroup param_;
bool output_to_files_ = false;
std::string output_dir_ = std::string(".");
NNC nnc_;
std::unique_ptr<EclipseIO> eclIO_;
std::unique_ptr<OutputWriter> output_writer_;
boost::any parallel_information_;
std::unique_ptr<NewtonIterationBlackoilInterface> fis_solver_;
std::unique_ptr<Simulator> simulator_;
std::string logFile_;
// Needs to be shared pointer because it gets initialzed before MPI_Init.
std::shared_ptr<Grid> globalGrid_;
};
} // namespace Opm