2016-11-11 08:02:34 -06:00
|
|
|
// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
|
|
|
// vi: set et ts=4 sw=4 sts=4:
|
|
|
|
/*
|
|
|
|
This file is part of the Open Porous Media project (OPM).
|
|
|
|
|
|
|
|
OPM is free software: you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation, either version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
OPM is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with OPM. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
Consult the COPYING file in the top-level source directory of this
|
|
|
|
module for the precise wording of the license and the list of
|
|
|
|
copyright holders.
|
|
|
|
*/
|
|
|
|
/*!
|
|
|
|
* \file
|
|
|
|
*
|
|
|
|
* \copydoc Ewoms::EclWriter
|
|
|
|
*/
|
|
|
|
#ifndef EWOMS_ECL_WRITER_HH
|
|
|
|
#define EWOMS_ECL_WRITER_HH
|
|
|
|
|
|
|
|
#include "collecttoiorank.hh"
|
2017-12-07 05:38:48 -06:00
|
|
|
#include "ecloutputblackoilmodule.hh"
|
2016-11-11 08:02:34 -06:00
|
|
|
|
|
|
|
#include <ewoms/disc/ecfv/ecfvdiscretization.hh>
|
|
|
|
#include <ewoms/io/baseoutputwriter.hh>
|
2018-02-09 02:23:40 -06:00
|
|
|
#include <ebos/threadhandle.hh>
|
2018-03-05 06:00:51 -06:00
|
|
|
|
|
|
|
#if HAVE_ECL_OUTPUT
|
2017-12-07 05:38:48 -06:00
|
|
|
#include <opm/output/eclipse/EclipseIO.hpp>
|
2018-03-05 06:00:51 -06:00
|
|
|
#endif
|
2016-11-11 08:02:34 -06:00
|
|
|
|
2018-02-01 07:40:01 -06:00
|
|
|
#include <opm/material/common/Valgrind.hpp>
|
|
|
|
#include <opm/material/common/Exceptions.hpp>
|
2016-11-11 08:02:34 -06:00
|
|
|
|
|
|
|
#include <boost/algorithm/string.hpp>
|
|
|
|
|
|
|
|
#include <list>
|
|
|
|
#include <utility>
|
|
|
|
#include <string>
|
|
|
|
#include <limits>
|
|
|
|
#include <sstream>
|
|
|
|
#include <fstream>
|
|
|
|
#include <type_traits>
|
|
|
|
|
|
|
|
namespace Ewoms {
|
|
|
|
namespace Properties {
|
|
|
|
NEW_PROP_TAG(EnableEclOutput);
|
2018-01-17 08:24:24 -06:00
|
|
|
NEW_PROP_TAG(EclOutputDoublePrecision);
|
2016-11-11 08:02:34 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
template <class TypeTag>
|
|
|
|
class EclWriter;
|
|
|
|
|
|
|
|
/*!
|
|
|
|
* \ingroup EclBlackOilSimulator
|
|
|
|
*
|
2017-12-07 05:38:48 -06:00
|
|
|
* \brief Collects necessary output values and pass it to opm-output.
|
2016-11-11 08:02:34 -06:00
|
|
|
*
|
|
|
|
* Caveats:
|
2017-12-07 05:38:48 -06:00
|
|
|
* - For this class to do do anything meaningful, you will have to
|
|
|
|
* have the OPM module opm-output.
|
2016-11-11 08:02:34 -06:00
|
|
|
* - The only DUNE grid which is currently supported is Dune::CpGrid
|
2017-12-07 05:38:48 -06:00
|
|
|
* from the OPM module "opm-grid". Using another grid won't
|
2016-11-11 08:02:34 -06:00
|
|
|
* fail at compile time but you will provoke a fatal exception as
|
|
|
|
* soon as you try to write an ECL output file.
|
|
|
|
* - This class requires to use the black oil model with the element
|
|
|
|
* centered finite volume discretization.
|
|
|
|
*/
|
|
|
|
template <class TypeTag>
|
2017-12-07 05:38:48 -06:00
|
|
|
class EclWriter
|
2016-11-11 08:02:34 -06:00
|
|
|
{
|
|
|
|
typedef typename GET_PROP_TYPE(TypeTag, Simulator) Simulator;
|
2018-02-01 09:26:58 -06:00
|
|
|
typedef typename GET_PROP_TYPE(TypeTag, Vanguard) Vanguard;
|
2016-11-11 08:02:34 -06:00
|
|
|
typedef typename GET_PROP_TYPE(TypeTag, GridView) GridView;
|
2017-12-07 05:38:48 -06:00
|
|
|
typedef typename GET_PROP_TYPE(TypeTag, Grid) Grid;
|
|
|
|
typedef typename GET_PROP_TYPE(TypeTag, Scalar) Scalar;
|
|
|
|
typedef typename GET_PROP_TYPE(TypeTag, ElementContext) ElementContext;
|
2018-01-22 06:38:50 -06:00
|
|
|
typedef typename GET_PROP_TYPE(TypeTag, FluidSystem) FluidSystem;
|
2017-12-07 05:38:48 -06:00
|
|
|
typedef typename GridView::template Codim<0>::Entity Element;
|
|
|
|
typedef typename GridView::template Codim<0>::Iterator ElementIterator;
|
2016-11-11 08:02:34 -06:00
|
|
|
|
2018-02-01 09:26:58 -06:00
|
|
|
typedef CollectDataToIORank< Vanguard > CollectDataToIORankType;
|
2016-11-11 08:02:34 -06:00
|
|
|
|
2017-12-07 05:38:48 -06:00
|
|
|
typedef std::vector<Scalar> ScalarBuffer;
|
2016-11-11 08:02:34 -06:00
|
|
|
|
|
|
|
public:
|
|
|
|
EclWriter(const Simulator& simulator)
|
|
|
|
: simulator_(simulator)
|
2018-02-01 09:26:58 -06:00
|
|
|
, collectToIORank_(simulator_.vanguard())
|
2018-02-01 06:16:37 -06:00
|
|
|
, eclOutputModule_(simulator, collectToIORank_)
|
2018-02-01 01:01:16 -06:00
|
|
|
, asyncOutput_()
|
2016-11-11 08:02:34 -06:00
|
|
|
{
|
2018-02-01 09:26:58 -06:00
|
|
|
globalGrid_ = simulator_.vanguard().grid();
|
2018-01-12 08:32:43 -06:00
|
|
|
globalGrid_.switchToGlobalView();
|
2018-02-01 09:26:58 -06:00
|
|
|
eclIO_.reset(new Opm::EclipseIO(simulator_.vanguard().eclState(),
|
|
|
|
Opm::UgGridHelpers::createEclipseGrid( globalGrid_ , simulator_.vanguard().eclState().getInputGrid() ),
|
|
|
|
simulator_.vanguard().schedule(),
|
|
|
|
simulator_.vanguard().summaryConfig()));
|
2018-02-01 01:01:16 -06:00
|
|
|
|
|
|
|
// create output thread if enabled and rank is I/O rank
|
|
|
|
// async output is enabled by default if pthread are enabled
|
|
|
|
#if HAVE_PTHREAD
|
|
|
|
const bool asyncOutputDefault = true;
|
|
|
|
#else
|
|
|
|
const bool asyncOutputDefault = false;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// TODO Add param
|
|
|
|
const bool isIORank = collectToIORank_.isParallel() && collectToIORank_.isIORank();
|
|
|
|
if( asyncOutputDefault && isIORank )
|
|
|
|
{
|
|
|
|
#if HAVE_PTHREAD
|
|
|
|
asyncOutput_.reset( new Opm::ThreadHandle( isIORank ) );
|
|
|
|
#else
|
|
|
|
throw std::runtime_error("Pthreads were not found, cannot enable async_output");
|
|
|
|
#endif
|
|
|
|
}
|
2016-11-11 08:02:34 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
~EclWriter()
|
|
|
|
{ }
|
|
|
|
|
2017-12-07 05:38:48 -06:00
|
|
|
const Opm::EclipseIO& eclIO() const
|
2018-01-15 03:13:41 -06:00
|
|
|
{ return *eclIO_; }
|
2016-11-11 08:02:34 -06:00
|
|
|
|
2018-01-12 08:32:43 -06:00
|
|
|
void writeInit()
|
|
|
|
{
|
2018-03-05 06:00:51 -06:00
|
|
|
#if !HAVE_ECL_OUTPUT
|
|
|
|
throw std::runtime_error("Eclipse output support not available in opm-common, unable to write ECL output!");
|
2018-01-12 08:32:43 -06:00
|
|
|
#else
|
|
|
|
if (collectToIORank_.isIORank()) {
|
|
|
|
std::map<std::string, std::vector<int> > integerVectors;
|
|
|
|
if (collectToIORank_.isParallel())
|
|
|
|
integerVectors.emplace("MPI_RANK", collectToIORank_.globalRanks());
|
|
|
|
eclIO_->writeInitial(computeTrans_(), integerVectors, exportNncStructure_());
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-11-11 08:02:34 -06:00
|
|
|
/*!
|
2017-12-07 05:38:48 -06:00
|
|
|
* \brief collect and pass data and pass it to eclIO writer
|
2016-11-11 08:02:34 -06:00
|
|
|
*/
|
2018-02-07 07:30:43 -06:00
|
|
|
void writeOutput(Opm::data::Wells& localWellData, Scalar t, bool substep, Scalar totalSolverTime, Scalar nextstep)
|
2016-11-11 08:02:34 -06:00
|
|
|
{
|
2018-03-05 06:00:51 -06:00
|
|
|
#if !HAVE_ECL_OUTPUT
|
|
|
|
throw std::runtime_error("Eclipse output support not available in opm-common, unable to write ECL output!");
|
2018-01-15 03:13:41 -06:00
|
|
|
#else
|
2017-12-07 05:38:48 -06:00
|
|
|
|
|
|
|
int episodeIdx = simulator_.episodeIndex() + 1;
|
2018-02-01 09:26:58 -06:00
|
|
|
const auto& gridView = simulator_.vanguard().gridView();
|
2017-12-07 05:38:48 -06:00
|
|
|
int numElements = gridView.size(/*codim=*/0);
|
|
|
|
bool log = collectToIORank_.isIORank();
|
2018-02-01 06:16:37 -06:00
|
|
|
eclOutputModule_.allocBuffers(numElements, episodeIdx, substep, log);
|
2017-12-07 05:38:48 -06:00
|
|
|
|
|
|
|
ElementContext elemCtx(simulator_);
|
|
|
|
ElementIterator elemIt = gridView.template begin</*codim=*/0>();
|
|
|
|
const ElementIterator& elemEndIt = gridView.template end</*codim=*/0>();
|
|
|
|
for (; elemIt != elemEndIt; ++elemIt) {
|
|
|
|
const Element& elem = *elemIt;
|
|
|
|
elemCtx.updatePrimaryStencil(elem);
|
|
|
|
elemCtx.updatePrimaryIntensiveQuantities(/*timeIdx=*/0);
|
|
|
|
eclOutputModule_.processElement(elemCtx);
|
2016-11-11 08:02:34 -06:00
|
|
|
}
|
2017-12-07 05:38:48 -06:00
|
|
|
eclOutputModule_.outputErrorLog();
|
2016-11-11 08:02:34 -06:00
|
|
|
|
2017-12-07 05:38:48 -06:00
|
|
|
// collect all data to I/O rank and assign to sol
|
2018-02-01 01:01:16 -06:00
|
|
|
Opm::data::Solution localCellData = {};
|
2018-02-07 07:30:43 -06:00
|
|
|
if (!substep)
|
2018-01-19 08:45:42 -06:00
|
|
|
eclOutputModule_.assignToSolution(localCellData);
|
|
|
|
|
2018-02-07 07:30:43 -06:00
|
|
|
// add cell data to perforations for Rft output
|
|
|
|
if (!substep)
|
|
|
|
eclOutputModule_.addRftDataToWells(localWellData, episodeIdx);
|
|
|
|
|
2018-01-09 06:08:05 -06:00
|
|
|
if (collectToIORank_.isParallel())
|
2018-02-09 05:57:34 -06:00
|
|
|
collectToIORank_.collect(localCellData, eclOutputModule_.getBlockData(), localWellData);
|
2016-11-11 08:02:34 -06:00
|
|
|
|
2018-01-17 08:24:24 -06:00
|
|
|
std::map<std::string, double> miscSummaryData;
|
|
|
|
std::map<std::string, std::vector<double>> regionData;
|
2018-01-29 05:23:23 -06:00
|
|
|
eclOutputModule_.outputFipLog(miscSummaryData, regionData, substep);
|
2018-01-12 08:32:43 -06:00
|
|
|
|
2016-11-11 08:02:34 -06:00
|
|
|
// write output on I/O rank
|
|
|
|
if (collectToIORank_.isIORank()) {
|
2017-12-07 05:38:48 -06:00
|
|
|
|
|
|
|
std::map<std::string, std::vector<double>> extraRestartData;
|
|
|
|
|
|
|
|
// Add suggested next timestep to extra data.
|
2018-02-07 07:30:43 -06:00
|
|
|
if (!substep)
|
2018-01-17 08:24:24 -06:00
|
|
|
extraRestartData["OPMEXTRA"] = std::vector<double>(1, nextstep);
|
2017-12-07 05:38:48 -06:00
|
|
|
|
2018-01-17 08:24:24 -06:00
|
|
|
// Add TCPU
|
2017-12-07 05:38:48 -06:00
|
|
|
if (totalSolverTime != 0.0) {
|
|
|
|
miscSummaryData["TCPU"] = totalSolverTime;
|
2016-11-11 08:02:34 -06:00
|
|
|
}
|
2018-01-22 07:05:10 -06:00
|
|
|
bool enableDoublePrecisionOutput = EWOMS_GET_PARAM(TypeTag, bool, EclOutputDoublePrecision);
|
2017-12-07 05:38:48 -06:00
|
|
|
const Opm::data::Solution& cellData = collectToIORank_.isParallel() ? collectToIORank_.globalCellData() : localCellData;
|
2018-02-01 01:01:16 -06:00
|
|
|
const Opm::data::Wells& wellData = collectToIORank_.isParallel() ? collectToIORank_.globalWellData() : localWellData;
|
|
|
|
|
2018-02-09 05:57:34 -06:00
|
|
|
const std::map<std::pair<std::string, int>, double>& blockData = collectToIORank_.isParallel() ? collectToIORank_.globalBlockData() : eclOutputModule_.getBlockData();
|
2018-01-19 08:45:42 -06:00
|
|
|
|
2018-02-01 01:01:16 -06:00
|
|
|
if( asyncOutput_ ) {
|
|
|
|
// dispatch the write call to the extra thread
|
|
|
|
asyncOutput_->dispatch( WriterCall(*eclIO_,
|
|
|
|
episodeIdx,
|
|
|
|
substep,
|
|
|
|
t,
|
|
|
|
cellData,
|
|
|
|
wellData,
|
|
|
|
miscSummaryData,
|
|
|
|
regionData,
|
2018-02-09 05:57:34 -06:00
|
|
|
blockData,
|
2018-02-01 01:01:16 -06:00
|
|
|
extraRestartData,
|
|
|
|
enableDoublePrecisionOutput ) );
|
|
|
|
} else {
|
|
|
|
eclIO_->writeTimeStep(episodeIdx,
|
|
|
|
substep,
|
|
|
|
t,
|
|
|
|
cellData,
|
|
|
|
wellData,
|
|
|
|
miscSummaryData,
|
|
|
|
regionData,
|
2018-02-09 05:57:34 -06:00
|
|
|
blockData,
|
2018-02-01 01:01:16 -06:00
|
|
|
extraRestartData,
|
|
|
|
enableDoublePrecisionOutput);
|
|
|
|
}
|
2017-12-07 05:38:48 -06:00
|
|
|
}
|
2018-01-16 07:24:21 -06:00
|
|
|
#endif
|
2016-11-11 08:02:34 -06:00
|
|
|
}
|
|
|
|
|
2018-01-15 03:13:41 -06:00
|
|
|
void restartBegin()
|
|
|
|
{
|
2018-02-19 01:47:33 -06:00
|
|
|
bool enableHysteresis = simulator_.problem().materialLawManager()->enableHysteresis();
|
2017-12-07 05:38:48 -06:00
|
|
|
std::map<std::string, Opm::RestartKey> solution_keys {{"PRESSURE" , Opm::RestartKey(Opm::UnitSystem::measure::pressure)},
|
2018-01-22 06:38:50 -06:00
|
|
|
{"SWAT" , Opm::RestartKey(Opm::UnitSystem::measure::identity, FluidSystem::phaseIsActive(FluidSystem::waterPhaseIdx))},
|
|
|
|
{"SGAS" , Opm::RestartKey(Opm::UnitSystem::measure::identity, FluidSystem::phaseIsActive(FluidSystem::gasPhaseIdx))},
|
|
|
|
{"TEMP" , Opm::RestartKey(Opm::UnitSystem::measure::temperature)}, // always required for now
|
|
|
|
{"RS" , Opm::RestartKey(Opm::UnitSystem::measure::gas_oil_ratio, FluidSystem::enableDissolvedGas())},
|
|
|
|
{"RV" , Opm::RestartKey(Opm::UnitSystem::measure::oil_gas_ratio, FluidSystem::enableVaporizedOil())},
|
2018-02-19 01:47:33 -06:00
|
|
|
{"SOMAX", {Opm::UnitSystem::measure::identity, simulator_.problem().vapparsActive()}},
|
|
|
|
{"PCSWM_OW", {Opm::UnitSystem::measure::identity, enableHysteresis}},
|
|
|
|
{"KRNSW_OW", {Opm::UnitSystem::measure::identity, enableHysteresis}},
|
|
|
|
{"PCSWM_GO", {Opm::UnitSystem::measure::identity, enableHysteresis}},
|
|
|
|
{"KRNSW_GO", {Opm::UnitSystem::measure::identity, enableHysteresis}}};
|
2017-12-07 05:38:48 -06:00
|
|
|
|
|
|
|
std::map<std::string, bool> extra_keys {
|
|
|
|
{"OPMEXTRA" , false}
|
|
|
|
};
|
|
|
|
|
|
|
|
unsigned episodeIdx = simulator_.episodeIndex();
|
2018-02-01 09:26:58 -06:00
|
|
|
const auto& gridView = simulator_.vanguard().gridView();
|
2017-12-07 05:38:48 -06:00
|
|
|
unsigned numElements = gridView.size(/*codim=*/0);
|
2018-02-01 06:16:37 -06:00
|
|
|
eclOutputModule_.allocBuffers(numElements, episodeIdx, /*substep=*/false, /*log=*/false);
|
2017-12-07 05:38:48 -06:00
|
|
|
|
|
|
|
auto restart_values = eclIO_->loadRestart(solution_keys, extra_keys);
|
|
|
|
for (unsigned elemIdx = 0; elemIdx < numElements; ++elemIdx) {
|
|
|
|
unsigned globalIdx = collectToIORank_.localIdxToGlobalIdx(elemIdx);
|
|
|
|
eclOutputModule_.setRestart(restart_values.solution, elemIdx, globalIdx);
|
|
|
|
}
|
2016-11-11 08:02:34 -06:00
|
|
|
}
|
|
|
|
|
2017-12-07 05:38:48 -06:00
|
|
|
|
|
|
|
const EclOutputBlackOilModule<TypeTag>& eclOutputModule() const {
|
|
|
|
return eclOutputModule_;
|
2016-11-11 08:02:34 -06:00
|
|
|
}
|
|
|
|
|
2017-12-07 05:38:48 -06:00
|
|
|
|
2016-11-11 08:02:34 -06:00
|
|
|
private:
|
|
|
|
static bool enableEclOutput_()
|
|
|
|
{ return EWOMS_GET_PARAM(TypeTag, bool, EnableEclOutput); }
|
|
|
|
|
2018-01-12 08:32:43 -06:00
|
|
|
Opm::data::Solution computeTrans_() const
|
|
|
|
{
|
2018-02-01 09:26:58 -06:00
|
|
|
const auto& cartMapper = simulator_.vanguard().cartesianIndexMapper();
|
2018-01-12 08:32:43 -06:00
|
|
|
const auto& cartDims = cartMapper.cartesianDimensions();
|
|
|
|
const int globalSize = cartDims[0]*cartDims[1]*cartDims[2];
|
|
|
|
|
|
|
|
Opm::data::CellData tranx = {Opm::UnitSystem::measure::transmissibility, std::vector<double>( globalSize ), Opm::data::TargetType::INIT};
|
|
|
|
Opm::data::CellData trany = {Opm::UnitSystem::measure::transmissibility, std::vector<double>( globalSize ), Opm::data::TargetType::INIT};
|
|
|
|
Opm::data::CellData tranz = {Opm::UnitSystem::measure::transmissibility, std::vector<double>( globalSize ), Opm::data::TargetType::INIT};
|
|
|
|
|
|
|
|
for (size_t i = 0; i < tranx.data.size(); ++i) {
|
|
|
|
tranx.data[0] = 0.0;
|
|
|
|
trany.data[0] = 0.0;
|
|
|
|
tranz.data[0] = 0.0;
|
|
|
|
}
|
|
|
|
|
|
|
|
const auto& globalGridView = globalGrid_.leafGridView();
|
|
|
|
typedef typename Grid::LeafGridView GridView;
|
2018-01-29 02:12:34 -06:00
|
|
|
#if DUNE_VERSION_NEWER(DUNE_GRID, 2,6)
|
|
|
|
typedef Dune::MultipleCodimMultipleGeomTypeMapper<GridView> ElementMapper;
|
|
|
|
ElementMapper globalElemMapper(globalGridView, Dune::mcmgElementLayout());
|
|
|
|
#else
|
2018-01-12 08:32:43 -06:00
|
|
|
typedef Dune::MultipleCodimMultipleGeomTypeMapper<GridView, Dune::MCMGElementLayout> ElementMapper;
|
|
|
|
ElementMapper globalElemMapper(globalGridView);
|
2018-01-29 02:12:34 -06:00
|
|
|
#endif
|
|
|
|
|
2018-01-12 08:32:43 -06:00
|
|
|
const auto& cartesianCellIdx = globalGrid_.globalCell();
|
|
|
|
|
2018-02-01 09:26:58 -06:00
|
|
|
const auto* globalTrans = &(simulator_.vanguard().globalTransmissibility());
|
2018-01-12 08:32:43 -06:00
|
|
|
if (!collectToIORank_.isParallel()) {
|
|
|
|
// in the sequential case we must use the transmissibilites defined by
|
|
|
|
// the problem. (because in the sequential case, the grid manager does
|
|
|
|
// not compute "global" transmissibilities for performance reasons. in
|
|
|
|
// the parallel case, the problem's transmissibilities can't be used
|
|
|
|
// because this object refers to the distributed grid and we need the
|
|
|
|
// sequential version here.)
|
|
|
|
globalTrans = &simulator_.problem().eclTransmissibilities();
|
|
|
|
}
|
|
|
|
|
|
|
|
auto elemIt = globalGridView.template begin</*codim=*/0>();
|
|
|
|
const auto& elemEndIt = globalGridView.template end</*codim=*/0>();
|
|
|
|
for (; elemIt != elemEndIt; ++ elemIt) {
|
|
|
|
const auto& elem = *elemIt;
|
|
|
|
|
|
|
|
auto isIt = globalGridView.ibegin(elem);
|
|
|
|
const auto& isEndIt = globalGridView.iend(elem);
|
|
|
|
for (; isIt != isEndIt; ++ isIt) {
|
|
|
|
const auto& is = *isIt;
|
|
|
|
|
|
|
|
if (!is.neighbor())
|
|
|
|
{
|
|
|
|
continue; // intersection is on the domain boundary
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned c1 = globalElemMapper.index(is.inside());
|
|
|
|
unsigned c2 = globalElemMapper.index(is.outside());
|
|
|
|
|
|
|
|
if (c1 > c2)
|
|
|
|
{
|
|
|
|
continue; // we only need to handle each connection once, thank you.
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int gc1 = std::min(cartesianCellIdx[c1], cartesianCellIdx[c2]);
|
|
|
|
int gc2 = std::max(cartesianCellIdx[c1], cartesianCellIdx[c2]);
|
|
|
|
if (gc2 - gc1 == 1) {
|
|
|
|
tranx.data[gc1] = globalTrans->transmissibility(c1, c2);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gc2 - gc1 == cartDims[0]) {
|
|
|
|
trany.data[gc1] = globalTrans->transmissibility(c1, c2);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gc2 - gc1 == cartDims[0]*cartDims[1]) {
|
|
|
|
tranz.data[gc1] = globalTrans->transmissibility(c1, c2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return {{"TRANX" , tranx},
|
|
|
|
{"TRANY" , trany} ,
|
|
|
|
{"TRANZ" , tranz}};
|
|
|
|
}
|
|
|
|
|
|
|
|
Opm::NNC exportNncStructure_() const
|
|
|
|
{
|
|
|
|
Opm::NNC nnc = eclState().getInputNNC();
|
|
|
|
int nx = eclState().getInputGrid().getNX();
|
|
|
|
int ny = eclState().getInputGrid().getNY();
|
|
|
|
|
|
|
|
const auto& globalGridView = globalGrid_.leafGridView();
|
|
|
|
typedef typename Grid::LeafGridView GridView;
|
2018-01-29 02:12:34 -06:00
|
|
|
#if DUNE_VERSION_NEWER(DUNE_GRID, 2,6)
|
|
|
|
typedef Dune::MultipleCodimMultipleGeomTypeMapper<GridView> ElementMapper;
|
|
|
|
ElementMapper globalElemMapper(globalGridView, Dune::mcmgElementLayout());
|
|
|
|
|
|
|
|
#else
|
2018-01-12 08:32:43 -06:00
|
|
|
typedef Dune::MultipleCodimMultipleGeomTypeMapper<GridView, Dune::MCMGElementLayout> ElementMapper;
|
|
|
|
ElementMapper globalElemMapper(globalGridView);
|
2018-01-29 02:12:34 -06:00
|
|
|
#endif
|
2018-01-12 08:32:43 -06:00
|
|
|
|
2018-02-01 09:26:58 -06:00
|
|
|
const auto* globalTrans = &(simulator_.vanguard().globalTransmissibility());
|
2018-01-12 08:32:43 -06:00
|
|
|
if (!collectToIORank_.isParallel()) {
|
|
|
|
// in the sequential case we must use the transmissibilites defined by
|
|
|
|
// the problem. (because in the sequential case, the grid manager does
|
|
|
|
// not compute "global" transmissibilities for performance reasons. in
|
|
|
|
// the parallel case, the problem's transmissibilities can't be used
|
|
|
|
// because this object refers to the distributed grid and we need the
|
|
|
|
// sequential version here.)
|
|
|
|
globalTrans = &simulator_.problem().eclTransmissibilities();
|
|
|
|
}
|
|
|
|
|
|
|
|
auto elemIt = globalGridView.template begin</*codim=*/0>();
|
|
|
|
const auto& elemEndIt = globalGridView.template end</*codim=*/0>();
|
|
|
|
for (; elemIt != elemEndIt; ++ elemIt) {
|
|
|
|
const auto& elem = *elemIt;
|
|
|
|
|
|
|
|
auto isIt = globalGridView.ibegin(elem);
|
|
|
|
const auto& isEndIt = globalGridView.iend(elem);
|
|
|
|
for (; isIt != isEndIt; ++ isIt) {
|
|
|
|
const auto& is = *isIt;
|
|
|
|
|
|
|
|
if (!is.neighbor())
|
|
|
|
{
|
|
|
|
continue; // intersection is on the domain boundary
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned c1 = globalElemMapper.index(is.inside());
|
|
|
|
unsigned c2 = globalElemMapper.index(is.outside());
|
|
|
|
|
|
|
|
if (c1 > c2)
|
|
|
|
{
|
|
|
|
continue; // we only need to handle each connection once, thank you.
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO (?): use the cartesian index mapper to make this code work
|
|
|
|
// with grids other than Dune::CpGrid. The problem is that we need
|
|
|
|
// the a mapper for the sequential grid, not for the distributed one.
|
|
|
|
int cc1 = globalGrid_.globalCell()[c1];
|
|
|
|
int cc2 = globalGrid_.globalCell()[c2];
|
|
|
|
|
|
|
|
if (std::abs(cc1 - cc2) != 1 &&
|
|
|
|
std::abs(cc1 - cc2) != nx &&
|
|
|
|
std::abs(cc1 - cc2) != nx*ny)
|
|
|
|
{
|
|
|
|
nnc.addNNC(cc1, cc2, globalTrans->transmissibility(c1, c2));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nnc;
|
|
|
|
}
|
|
|
|
|
2018-02-01 01:01:16 -06:00
|
|
|
|
|
|
|
struct WriterCall : public Opm::ThreadHandle :: ObjectInterface
|
|
|
|
{
|
|
|
|
Opm::EclipseIO& eclIO_;
|
|
|
|
int episodeIdx_;
|
|
|
|
bool isSubstep_;
|
|
|
|
double secondsElapsed_;
|
|
|
|
Opm::data::Solution cellData_;
|
|
|
|
Opm::data::Wells wellData_;
|
|
|
|
std::map<std::string, double> singleSummaryValues_;
|
|
|
|
std::map<std::string, std::vector<double>> regionSummaryValues_;
|
|
|
|
std::map<std::pair<std::string, int>, double> blockSummaryValues_;
|
|
|
|
std::map<std::string, std::vector<double>> extraRestartData_;
|
|
|
|
bool writeDoublePrecision_;
|
|
|
|
|
|
|
|
explicit WriterCall(
|
|
|
|
Opm::EclipseIO& eclIO,
|
|
|
|
int episodeIdx,
|
|
|
|
bool isSubstep,
|
|
|
|
double secondsElapsed,
|
|
|
|
Opm::data::Solution cellData,
|
|
|
|
Opm::data::Wells wellData,
|
|
|
|
const std::map<std::string, double>& singleSummaryValues,
|
|
|
|
const std::map<std::string, std::vector<double>>& regionSummaryValues,
|
|
|
|
const std::map<std::pair<std::string, int>, double>& blockSummaryValues,
|
|
|
|
const std::map<std::string, std::vector<double>>& extraRestartData,
|
|
|
|
bool writeDoublePrecision)
|
|
|
|
: eclIO_(eclIO),
|
|
|
|
episodeIdx_(episodeIdx),
|
|
|
|
isSubstep_(isSubstep),
|
|
|
|
secondsElapsed_(secondsElapsed),
|
|
|
|
cellData_(cellData),
|
|
|
|
wellData_(wellData),
|
|
|
|
singleSummaryValues_(singleSummaryValues),
|
|
|
|
regionSummaryValues_(regionSummaryValues),
|
|
|
|
blockSummaryValues_(blockSummaryValues),
|
|
|
|
extraRestartData_(extraRestartData),
|
|
|
|
writeDoublePrecision_(writeDoublePrecision)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
// callback to eclIO serial writeTimeStep method
|
|
|
|
void run ()
|
|
|
|
{
|
|
|
|
// write data
|
|
|
|
eclIO_.writeTimeStep(episodeIdx_,
|
|
|
|
isSubstep_,
|
|
|
|
secondsElapsed_,
|
|
|
|
cellData_,
|
|
|
|
wellData_,
|
|
|
|
singleSummaryValues_,
|
|
|
|
regionSummaryValues_,
|
|
|
|
blockSummaryValues_,
|
|
|
|
extraRestartData_,
|
|
|
|
writeDoublePrecision_);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-01-12 08:32:43 -06:00
|
|
|
const Opm::EclipseState& eclState() const
|
2018-02-01 09:26:58 -06:00
|
|
|
{ return simulator_.vanguard().eclState(); }
|
2018-01-12 08:32:43 -06:00
|
|
|
|
2016-11-11 08:02:34 -06:00
|
|
|
const Simulator& simulator_;
|
|
|
|
CollectDataToIORankType collectToIORank_;
|
2018-02-01 06:16:37 -06:00
|
|
|
EclOutputBlackOilModule<TypeTag> eclOutputModule_;
|
2017-12-07 05:38:48 -06:00
|
|
|
std::unique_ptr<Opm::EclipseIO> eclIO_;
|
2018-01-12 08:32:43 -06:00
|
|
|
Grid globalGrid_;
|
2018-02-01 01:01:16 -06:00
|
|
|
std::unique_ptr< Opm::ThreadHandle > asyncOutput_;
|
|
|
|
|
2016-11-11 08:02:34 -06:00
|
|
|
|
|
|
|
};
|
|
|
|
} // namespace Ewoms
|
|
|
|
|
|
|
|
#endif
|