2023-09-09 16:02:09 -05:00
// -*- mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
// vi: set et ts=4 sw=4 sts=4:
/*
Copyright 2022 SINTEF Digital , Mathematics and Cybernetics .
Copyright 2023 Inria , Bretagne – Atlantique Research Center
This file is part of the Open Porous Media project ( OPM ) .
OPM is free software : you can redistribute it and / or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation , either version 2 of the License , or
( at your option ) any later version .
OPM is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details .
You should have received a copy of the GNU General Public License
along with OPM . If not , see < http : //www.gnu.org/licenses/>.
Consult the COPYING file in the top - level source directory of this
module for the precise wording of the license and the list of
copyright holders .
*/
/*!
* \ file
*
* \ copydoc Opm : : DamarisWriter
*/
# ifndef EWOMS_DAMARIS_WRITER_HH
# define EWOMS_DAMARIS_WRITER_HH
# include <dune/grid/common/partitionset.hh>
2024-01-10 07:13:52 -06:00
# include <ebos/damaris_properties.hh>
2023-09-09 16:02:09 -05:00
# include <ebos/eclbasevanguard.hh>
# include <ebos/eclgenericwriter.hh>
# include <ebos/ecloutputblackoilmodule.hh>
# include <opm/common/OpmLog/OpmLog.hpp>
2024-01-10 07:13:52 -06:00
# include <opm/simulators/flow/countGlobalCells.hpp>
# include <opm/simulators/utils/DamarisVar.hpp>
# include <opm/simulators/utils/DeferredLoggingErrorHelpers.hpp>
# include <opm/simulators/utils/GridDataOutput.hpp>
2023-12-11 14:30:18 -06:00
2023-09-09 16:02:09 -05:00
# include <fmt/format.h>
# include <Damaris.h>
2024-01-10 07:13:52 -06:00
# include <limits>
# include <memory>
# include <stdexcept>
# include <string>
2023-09-09 16:02:09 -05:00
namespace Opm {
/*!
* \ ingroup EclBlackOilSimulator
*
* \ brief Collects necessary output values and pass them to Damaris server processes .
*
* Currently only passing through PRESSURE , GLOBAL_CELL_INDEX and MPI_RANK information .
2023-09-22 16:23:29 -05:00
* This class now passes through the 3 D mesh information to Damaris to enable
2023-09-09 16:02:09 -05:00
* in situ visualization via Paraview or Ascent . And developed so that variables specified
* through the Eclipse input deck will be available to Damaris .
*/
template < class TypeTag >
class DamarisWriter : public EclGenericWriter < GetPropType < TypeTag , Properties : : Grid > ,
2024-01-10 07:15:00 -06:00
GetPropType < TypeTag , Properties : : EquilGrid > ,
GetPropType < TypeTag , Properties : : GridView > ,
GetPropType < TypeTag , Properties : : ElementMapper > ,
GetPropType < TypeTag , Properties : : Scalar > >
2023-09-09 16:02:09 -05:00
{
using Simulator = GetPropType < TypeTag , Properties : : Simulator > ;
using GridView = GetPropType < TypeTag , Properties : : GridView > ;
using Grid = GetPropType < TypeTag , Properties : : Grid > ;
using EquilGrid = GetPropType < TypeTag , Properties : : EquilGrid > ;
using Scalar = GetPropType < TypeTag , Properties : : Scalar > ;
using ElementContext = GetPropType < TypeTag , Properties : : ElementContext > ;
using Element = typename GridView : : template Codim < 0 > : : Entity ;
using ElementMapper = GetPropType < TypeTag , Properties : : ElementMapper > ;
using BaseType = EclGenericWriter < Grid , EquilGrid , GridView , ElementMapper , Scalar > ;
2023-12-11 14:30:18 -06:00
using DamarisVarInt = Opm : : DamarisOutput : : DamarisVar < int > ;
using DamarisVarChar = Opm : : DamarisOutput : : DamarisVar < char > ;
using DamarisVarDbl = Opm : : DamarisOutput : : DamarisVar < double > ;
2023-12-08 14:22:55 -06:00
2023-09-09 16:02:09 -05:00
public :
static void registerParameters ( )
{
2023-12-12 14:48:17 -06:00
EWOMS_REGISTER_PARAM ( TypeTag , bool , DamarisOutputHdfCollective ,
" Write output via Damaris using parallel HDF5 to get single file and dataset per timestep instead of one per Damaris \n \
2023-12-13 10:50:05 -06:00
core with multiple datasets . " );
2023-09-22 16:23:29 -05:00
EWOMS_REGISTER_PARAM ( TypeTag , bool , DamarisSaveToHdf ,
" Set to false to prevent output to HDF5. Uses collective output by default or set --enable-damaris-collective=false to \n \
2023-12-13 10:50:05 -06:00
use file per core ( file per Damaris server ) . " ) ;
2023-12-12 14:48:17 -06:00
EWOMS_REGISTER_PARAM ( TypeTag , bool , DamarisSaveMeshToHdf ,
" Saves the mesh data to the HDF5 file (1st iteration only). Will set --damaris-output-hdf-collective to false \n \
2023-12-13 10:50:05 -06:00
so will use file per core ( file per Damaris server ) output ( global sizes and offset values \ n \
of mesh variables are not being provided as yet ) . " );
2023-09-22 16:23:29 -05:00
EWOMS_REGISTER_PARAM ( TypeTag , std : : string , DamarisPythonScript ,
" Set to the path and filename of a Python script to run on Damaris server resources with access to OPM flow data. " ) ;
EWOMS_REGISTER_PARAM ( TypeTag , std : : string , DamarisPythonParaviewScript ,
" Set to the path and filename of a Paraview Python script to run on Paraview Catalyst (1 or 2) on Damaris server \n \
2023-12-13 10:50:05 -06:00
resources with access to OPM flow data . " );
2023-09-22 16:23:29 -05:00
EWOMS_REGISTER_PARAM ( TypeTag , std : : string , DamarisSimName ,
2023-12-13 10:50:05 -06:00
" The name of the simulation to be used by Damaris. If empty (the default) then Damaris uses \" opm-sim-<random-number> \" . \n \
This name is used for the Damaris HDF5 file name prefix . Make unique if writing to the same output directory . " );
2023-09-22 16:23:29 -05:00
EWOMS_REGISTER_PARAM ( TypeTag , std : : string , DamarisLogLevel ,
" The log level for the Damaris logging system (boost log based). \n \
2023-12-13 10:50:05 -06:00
Levels are : [ trace , debug , info , warning , error , fatal ] . Currently debug and info are useful . " );
2023-11-11 02:22:30 -06:00
EWOMS_REGISTER_PARAM ( TypeTag , std : : string , DamarisDaskFile ,
" The name of a Dask json configuration file (if using Dask for processing). " ) ;
2023-09-22 16:23:29 -05:00
EWOMS_REGISTER_PARAM ( TypeTag , int , DamarisDedicatedCores ,
" Set the number of dedicated cores (MPI processes) that should be used for Damaris processing (per node). \n \
2023-12-13 10:50:05 -06:00
Must divide evenly into the number of simulation ranks ( client ranks ) . " ) ;
2023-09-22 16:23:29 -05:00
EWOMS_REGISTER_PARAM ( TypeTag , int , DamarisDedicatedNodes ,
" Set the number of dedicated nodes (full nodes) that should be used for Damaris processing (per simulation). \n \
2023-12-13 10:50:05 -06:00
Must divide evenly into the number of simulation nodes . " );
2023-12-08 14:22:55 -06:00
EWOMS_REGISTER_PARAM ( TypeTag , long , DamarisSharedMemorySizeBytes ,
2023-09-22 16:23:29 -05:00
" Set the size of the shared memory buffer used for IPC between the simulation and the Damaris resources. \n \
2023-12-13 10:50:05 -06:00
Needs to hold all the variables published , possibly over multiple simulation iterations . " );
EWOMS_REGISTER_PARAM ( TypeTag , std : : string , DamarisSharedMemoryName ,
" The name of the shared memory area to be used by Damaris for the current. If empty (the default) then Damaris uses \" opm-damaris-<random-string> \" . \n \
This name should be unique if multiple simulations are running on the same node / server as it is used for the Damaris shmem name and by the Python Dask \ n \
library to locate sections of variables . " );
2023-09-09 16:02:09 -05:00
}
// The Simulator object should preferably have been const - the
// only reason that is not the case is due to the SummaryState
// object owned deep down by the vanguard.
DamarisWriter ( Simulator & simulator )
: BaseType ( simulator . vanguard ( ) . schedule ( ) ,
simulator . vanguard ( ) . eclState ( ) ,
simulator . vanguard ( ) . summaryConfig ( ) ,
simulator . vanguard ( ) . grid ( ) ,
simulator . vanguard ( ) . grid ( ) . comm ( ) . rank ( ) = = 0 ? & simulator . vanguard ( ) . equilGrid ( ) : nullptr ,
simulator . vanguard ( ) . gridView ( ) ,
simulator . vanguard ( ) . cartesianIndexMapper ( ) ,
simulator . vanguard ( ) . grid ( ) . comm ( ) . rank ( ) = = 0 ? & simulator . vanguard ( ) . equilCartesianIndexMapper ( ) : nullptr ,
false , false )
, simulator_ ( simulator )
{
this - > damarisUpdate_ = true ;
rank_ = simulator_ . vanguard ( ) . grid ( ) . comm ( ) . rank ( ) ;
nranks_ = simulator_ . vanguard ( ) . grid ( ) . comm ( ) . size ( ) ;
const auto & gridView = simulator_ . gridView ( ) ;
const auto & interior_elements = elements ( gridView , Dune : : Partitions : : interior ) ;
// Get the size of the unique vector elements (excludes the shared 'ghost' elements)
numElements_ = std : : distance ( interior_elements . begin ( ) , interior_elements . end ( ) ) ;
this - > elements_rank_offsets_ . resize ( nranks_ ) ;
this - > damarisOutputModule_ = std : : make_unique < EclOutputBlackOilModule < TypeTag > > ( simulator , this - > collectToIORank_ ) ;
}
~ DamarisWriter ( )
{ }
/*!
* \ brief Writes localCellData through to Damaris servers . Sets up the unstructured mesh which is passed to Damaris .
*/
void writeOutput ( data : : Solution & localCellData , bool isSubStep )
{
OPM_TIMEBLOCK ( writeOutput ) ;
const int reportStepNum = simulator_ . episodeIndex ( ) + 1 ;
// added this as localCellData was not being written
if ( ! isSubStep )
this - > damarisOutputModule_ - > invalidateLocalData ( ) ;
this - > prepareLocalCellData ( isSubStep , reportStepNum ) ;
this - > damarisOutputModule_ - > outputErrorLog ( simulator_ . gridView ( ) . comm ( ) ) ;
// The damarisWriter is not outputing well or aquifer data (yet)
auto localWellData = simulator_ . problem ( ) . wellModel ( ) . wellData ( ) ; // data::Well
if ( ! isSubStep )
{
if ( localCellData . size ( ) = = 0 ) {
this - > damarisOutputModule_ - > assignToSolution ( localCellData ) ;
}
// add cell data to perforations for Rft output
this - > damarisOutputModule_ - > addRftDataToWells ( localWellData , reportStepNum ) ;
// On first call and if the mesh and variable size change then set damarisUpdate_ to true
if ( damarisUpdate_ = = true ) {
// Sets the damaris parameter values "n_elements_local" and "n_elements_total"
// which define sizes of the Damaris variables, per-rank and globally (over all ranks).
// Also sets the offsets to where a ranks array data sits within the global array.
// This is usefull for HDF5 output and for defining distributed arrays in Dask.
this - > setupDamarisWritingPars ( simulator_ . vanguard ( ) . grid ( ) . comm ( ) , numElements_ , elements_rank_offsets_ ) ;
// sets data for non-time-varying variables MPI_RANK and GLOBAL_CELL_INDEX
this - > setGlobalIndexForDamaris ( ) ;
2023-09-22 16:23:29 -05:00
// Set the geometry data for the mesh model.
// this function writes the mesh data directly to Damaris shared memory using Opm::DamarisOutput::DamarisVar objects.
this - > writeDamarisGridOutput ( ) ;
// Currently by default we assume a static mesh grid (the geometry unchanging through the simulation)
// Set damarisUpdate_ to true if we want to update the geometry sent to Damaris
2023-09-09 16:02:09 -05:00
this - > damarisUpdate_ = false ;
}
if ( this - > damarisOutputModule_ - > getPRESSURE_ptr ( ) ! = nullptr )
{
int64_t temp_int64_t [ 1 ] ;
temp_int64_t [ 0 ] = static_cast < int64_t > ( this - > elements_rank_offsets_ [ rank_ ] ) ;
dam_err_ = damaris_set_position ( " PRESSURE " , temp_int64_t ) ;
2023-12-13 10:50:05 -06:00
if ( dam_err_ ! = DAMARIS_OK ) {
2023-12-08 14:22:55 -06:00
OpmLog : : error ( fmt : : format ( " damariswriter::writeOutput() : ( rank:{}) "
" damaris_set_position(PRESSURE, ...), Damaris Error: {} " ,
rank_ , damaris_error_string ( dam_err_ ) ) ) ;
2023-09-09 16:02:09 -05:00
}
dam_err_ = damaris_write ( " PRESSURE " , ( void * ) this - > damarisOutputModule_ - > getPRESSURE_ptr ( ) ) ;
if ( dam_err_ ! = DAMARIS_OK ) {
2023-12-08 14:22:55 -06:00
OpmLog : : error ( fmt : : format ( " damariswriter::writeOutput() : ( rank:{}) "
" damaris_write(PRESSURE, ...), Damaris Error: {} " ,
rank_ , damaris_error_string ( dam_err_ ) ) ) ;
2023-09-09 16:02:09 -05:00
}
dam_err_ = damaris_end_iteration ( ) ;
if ( dam_err_ ! = DAMARIS_OK ) {
2023-12-08 14:22:55 -06:00
OpmLog : : error ( fmt : : format ( " damariswriter::writeOutput() : ( rank:{}) "
" damaris_end_iteration(), Damaris Error: {} " ,
rank_ , damaris_error_string ( dam_err_ ) ) ) ;
2023-09-09 16:02:09 -05:00
}
}
} // end of ! isSubstep
}
private :
int dam_err_ ;
int rank_ ;
int nranks_ ;
int numElements_ ; ///< size of the unique vector elements
Simulator & simulator_ ;
std : : unique_ptr < EclOutputBlackOilModule < TypeTag > > damarisOutputModule_ ;
std : : vector < unsigned long long > elements_rank_offsets_ ;
bool damarisUpdate_ = false ; ///< Whenever this is true writeOutput() will set up Damaris mesh information and offsets of model fields
static bool enableDamarisOutput_ ( )
{
return EWOMS_GET_PARAM ( TypeTag , bool , EnableDamarisOutput ) ;
}
void setGlobalIndexForDamaris ( )
{
// GLOBAL_CELL_INDEX is used to reorder variable data when writing to disk
// This is enabled using select-file="GLOBAL_CELL_INDEX" in the <variable> XML tag
if ( this - > collectToIORank_ . isParallel ( ) ) {
const std : : vector < int > & local_to_global = this - > collectToIORank_ . localIdxToGlobalIdxMapping ( ) ;
dam_err_ = damaris_write ( " GLOBAL_CELL_INDEX " , local_to_global . data ( ) ) ;
} else {
std : : vector < int > local_to_global_filled ;
local_to_global_filled . resize ( this - > numElements_ ) ;
for ( int i = 0 ; i < this - > numElements_ ; i + + )
{
local_to_global_filled [ i ] = i ;
}
dam_err_ = damaris_write ( " GLOBAL_CELL_INDEX " , local_to_global_filled . data ( ) ) ;
}
if ( dam_err_ ! = DAMARIS_OK ) {
2023-12-08 14:22:55 -06:00
OpmLog : : error ( fmt : : format ( " damariswriter::writeOutput() : "
" ( rank:{}) damaris_write(GLOBAL_CELL_INDEX, ...), Damaris Error: {} " ,
rank_ , damaris_error_string ( dam_err_ ) ) ) ;
2023-09-09 16:02:09 -05:00
}
2023-12-13 10:50:05 -06:00
2023-12-12 14:48:17 -06:00
// This is an example of writing to the Damaris shared memory directly (i.e. not using
// damaris_write() to copy data there)
// We will add the MPI rank value directly into shared memory using the DamarisVar
// wrapper of the C based Damaris API.
2023-12-13 10:50:05 -06:00
// The shared memory is given back to Damaris when the DamarisVarInt goes out of scope.
DamarisVarInt mpi_rank_var_test ( 1 , { std : : string ( " n_elements_local " ) } , std : : string ( " MPI_RANK " ) , rank_ ) ;
mpi_rank_var_test . setDamarisParameterAndShmem ( { this - > numElements_ } ) ;
2023-09-22 16:23:29 -05:00
// Fill the created memory area
for ( int i = 0 ; i < this - > numElements_ ; i + + )
{
2023-12-13 10:50:05 -06:00
mpi_rank_var_test . data ( ) [ i ] = rank_ ; // write the rank vaue to the shared memory area.
2023-09-09 16:02:09 -05:00
}
2023-12-13 10:50:05 -06:00
2023-09-09 16:02:09 -05:00
}
void setupDamarisWritingPars ( Parallel : : Communication comm , const int n_elements_local_grid , std : : vector < unsigned long long > & elements_rank_offsets )
{
// one for each rank -- to be gathered from each client rank
std : : vector < unsigned long long > elements_rank_sizes ( nranks_ ) ;
// n_elements_local_grid should be the full model size
const unsigned long long n_elements_local = n_elements_local_grid ;
// This gets the n_elements_local from all ranks and copies them to a std::vector of all the values on all ranks
// (elements_rank_sizes[]).
comm . allgather ( & n_elements_local , 1 , elements_rank_sizes . data ( ) ) ;
elements_rank_offsets [ 0 ] = 0ULL ;
// This scan makes the offsets to the start of each ranks grid section if each local grid data was concatenated (in
// rank order)
for ( int t1 = 1 ; t1 < nranks_ ; t1 + + ) {
elements_rank_offsets [ t1 ] = elements_rank_offsets [ t1 - 1 ] + elements_rank_sizes [ t1 - 1 ] ;
}
// find the global/total size
unsigned long long n_elements_global_max = elements_rank_offsets [ nranks_ - 1 ] ;
n_elements_global_max + = elements_rank_sizes [ nranks_ - 1 ] ; // add the last ranks size to the already accumulated offset values
if ( rank_ = = 0 ) {
OpmLog : : debug ( fmt : : format ( " In setupDamarisWritingPars(): n_elements_global_max = {} " , n_elements_global_max ) ) ;
}
// Set the paramater so that the Damaris servers can allocate the correct amount of memory for the variabe
// Damaris parameters only support int data types. This will limit models to be under size of 2^32-1 elements
// ToDo: Do we need to check that local ranks are 0 based ?
int temp_int = static_cast < int > ( elements_rank_sizes [ rank_ ] ) ;
dam_err_ = damaris_parameter_set ( " n_elements_local " , & temp_int , sizeof ( int ) ) ;
2023-12-08 14:22:55 -06:00
if ( dam_err_ ! = DAMARIS_OK ) {
OpmLog : : error ( " ( rank: " + std : : to_string ( rank_ ) + " ) Damaris library produced an error result for "
2023-09-09 16:02:09 -05:00
" damaris_parameter_set( \" n_elements_local \" , &temp_int, sizeof(int)); " ) ;
}
// Damaris parameters only support int data types. This will limit models to be under size of 2^32-1 elements
// ToDo: Do we need to check that n_elements_global_max will fit in a C int type (INT_MAX)
2023-09-28 05:58:18 -05:00
if ( n_elements_global_max < = std : : numeric_limits < int > : : max ( ) ) {
2023-09-22 16:23:29 -05:00
temp_int = static_cast < int > ( n_elements_global_max ) ;
dam_err_ = damaris_parameter_set ( " n_elements_total " , & temp_int , sizeof ( int ) ) ;
2023-12-08 14:22:55 -06:00
if ( dam_err_ ! = DAMARIS_OK ) {
OpmLog : : error ( " ( rank: " + std : : to_string ( rank_ ) + " ) Damaris library produced an error result for "
2023-09-22 16:23:29 -05:00
" damaris_parameter_set( \" n_elements_total \" , &temp_int, sizeof(int)); " ) ;
}
} else {
2023-12-08 14:22:55 -06:00
OpmLog : : error ( fmt : : format ( " ( rank:{} ) The size of the global array ({}) is "
" greater than what a Damaris paramater type supports ({}). " ,
rank_ , n_elements_global_max , std : : numeric_limits < int > : : max ( ) ) ) ;
// assert( n_elements_global_max <= std::numeric_limits<int>::max() ) ;
OPM_THROW ( std : : runtime_error , " setupDamarisWritingPars() n_elements_global_max > std::numeric_limits<int>::max() " + std : : to_string ( dam_err_ ) ) ;
2023-09-09 16:02:09 -05:00
}
// Use damaris_set_position to set the offset in the global size of the array.
// This is used so that output functionality (e.g. HDF5Store) knows global offsets of the data of the ranks
int64_t temp_int64_t [ 1 ] ;
temp_int64_t [ 0 ] = static_cast < int64_t > ( elements_rank_offsets [ rank_ ] ) ;
dam_err_ = damaris_set_position ( " PRESSURE " , temp_int64_t ) ;
2023-12-08 14:22:55 -06:00
if ( dam_err_ ! = DAMARIS_OK ) {
OpmLog : : error ( " ( rank: " + std : : to_string ( rank_ ) + " ) Damaris library produced an error result for "
2023-09-09 16:02:09 -05:00
" damaris_set_position( \" PRESSURE \" , temp_int64_t); " ) ;
}
dam_err_ = damaris_set_position ( " GLOBAL_CELL_INDEX " , temp_int64_t ) ;
2023-12-08 14:22:55 -06:00
if ( dam_err_ ! = DAMARIS_OK ) {
OpmLog : : error ( " ( rank: " + std : : to_string ( rank_ ) + " ) Damaris library produced an error result for "
2023-09-09 16:02:09 -05:00
" damaris_set_position( \" GLOBAL_CELL_INDEX \" , temp_int64_t); " ) ;
}
2023-09-22 16:23:29 -05:00
2023-12-13 10:50:05 -06:00
// Set the size of the MPI variable
DamarisVarInt mpi_rank_var ( 1 , { std : : string ( " n_elements_local " ) } , std : : string ( " MPI_RANK " ) , rank_ ) ;
mpi_rank_var . setDamarisPosition ( { * temp_int64_t } ) ;
2023-09-22 16:23:29 -05:00
}
void writeDamarisGridOutput ( void )
{
const auto & gridView = simulator_ . gridView ( ) ;
Opm : : GridDataOutput : : SimMeshDataAccessor geomData ( gridView , Dune : : Partitions : : interior ) ;
try {
const bool hasPolyCells = geomData . polyhedralCellPresent ( ) ;
if ( hasPolyCells ) {
OpmLog : : error ( fmt : : format ( " ERORR: rank {} The DUNE geometry grid has polyhedral elements - These elements are currently not supported. " , rank_ ) ) ;
}
// This is the template XML model for x,y,z coordinates defined in initDamarisXmlFile.cpp which is used to
// build the internally generated Damaris XML configuration file.
// <parameter name="n_coords_local" type="int" value="1" />
// <parameter name="n_coords_global" type="int" value="1" comment="only needed if we need to write to HDF5 in Collective mode"/>
// <layout name="n_coords_layout" type="double" dimensions="n_coords_local" comment="For the individual x, y and z coordinates of the mesh vertices" />
// <group name="coordset/coords/values">
// <variable name="x" layout="n_coords_layout" type="scalar" visualizable="false" unit="m" script="PythonConduitTest" time-varying="false" />
// <variable name="y" layout="n_coords_layout" type="scalar" visualizable="false" unit="m" script="PythonConduitTest" time-varying="false" />
// <variable name="z" layout="n_coords_layout" type="scalar" visualizable="false" unit="m" script="PythonConduitTest" time-varying="false" />
// </group>
2023-12-13 10:50:05 -06:00
DamarisVarDbl var_x ( 1 , { std : : string ( " n_coords_local " ) } , std : : string ( " coordset/coords/values/x " ) , rank_ ) ;
2023-09-22 16:23:29 -05:00
// N.B. We have not set any position/offset values (using DamarisVar::SetDamarisPosition).
// They are not needed for mesh data as each process has a local geometric model.
// However, HDF5 collective and Dask arrays cannot be used for this data.
2023-12-13 10:50:05 -06:00
var_x . setDamarisParameterAndShmem ( { geomData . getNVertices ( ) } ) ;
2023-09-22 16:23:29 -05:00
2023-12-13 10:50:05 -06:00
DamarisVarDbl var_y ( 1 , { std : : string ( " n_coords_local " ) } , std : : string ( " coordset/coords/values/y " ) , rank_ ) ;
var_y . setDamarisParameterAndShmem ( { geomData . getNVertices ( ) } ) ;
2023-09-22 16:23:29 -05:00
2023-12-13 10:50:05 -06:00
DamarisVarDbl var_z ( 1 , { std : : string ( " n_coords_local " ) } , std : : string ( " coordset/coords/values/z " ) , rank_ ) ;
var_z . setDamarisParameterAndShmem ( { geomData . getNVertices ( ) } ) ;
2023-09-22 16:23:29 -05:00
2023-12-14 03:27:05 -06:00
// Now we can use the shared memory area that Damaris has allocated and use it to write the x,y,z coordinates
2023-12-13 10:50:05 -06:00
if ( geomData . writeGridPoints ( var_x , var_y , var_z ) < 0 )
2023-12-12 14:48:17 -06:00
DUNE_THROW ( Dune : : IOError , geomData . getError ( ) ) ;
2023-12-11 14:30:18 -06:00
2023-09-22 16:23:29 -05:00
// This is the template XML model for connectivity, offsets and types, as defined in initDamarisXmlFile.cpp which is used to
// build the internally generated Damaris XML configuration file.
// <parameter name="n_connectivity_ph" type="int" value="1" />
// <layout name="n_connections_layout_ph" type="int" dimensions="n_connectivity_ph" comment="Layout for connectivities " />
// <parameter name="n_offsets_types_ph" type="int" value="1" />
// <layout name="n_offsets_layout_ph" type="int" dimensions="n_offsets_types_ph+1" comment="Layout for the offsets_ph" />
// <layout name="n_types_layout_ph" type="char" dimensions="n_offsets_types_ph" comment="Layout for the types_ph " />
// <group name="topologies/topo/elements">
// <variable name="connectivity" layout="n_connections_layout_ph" type="scalar" visualizable="false" unit="" script="PythonConduitTest" time-varying="false" />
// <variable name="offsets" layout="n_offsets_layout_ph" type="scalar" visualizable="false" unit="" script="PythonConduitTest" time-varying="false" />
// <variable name="types" layout="n_types_layout_ph" type="scalar" visualizable="false" unit="" script="PythonConduitTest" time-varying="false" />
// </group>
2023-12-13 10:50:05 -06:00
DamarisVarInt var_connectivity ( 1 , { std : : string ( " n_connectivity_ph " ) } , std : : string ( " topologies/topo/elements/connectivity " ) , rank_ ) ;
var_connectivity . setDamarisParameterAndShmem ( { geomData . getNCorners ( ) } ) ;
DamarisVarInt var_offsets ( 1 , { std : : string ( " n_offsets_types_ph " ) } , std : : string ( " topologies/topo/elements/offsets " ) , rank_ ) ;
var_offsets . setDamarisParameterAndShmem ( { geomData . getNCells ( ) } ) ;
DamarisVarChar var_types ( 1 , { std : : string ( " n_offsets_types_ph " ) } , std : : string ( " topologies/topo/elements/types " ) , rank_ ) ;
var_types . setDamarisParameterAndShmem ( { geomData . getNCells ( ) } ) ;
2023-09-22 16:23:29 -05:00
// Copy the mesh data from the Durne grid
long i = 0 ;
Opm : : GridDataOutput : : ConnectivityVertexOrder vtkorder = Opm : : GridDataOutput : : VTK ;
2023-12-13 10:50:05 -06:00
i = geomData . writeConnectivity ( var_connectivity , vtkorder ) ;
2023-09-22 16:23:29 -05:00
if ( i ! = geomData . getNCorners ( ) )
2023-12-08 14:22:55 -06:00
DUNE_THROW ( Dune : : IOError , geomData . getError ( ) ) ;
2023-09-22 16:23:29 -05:00
2023-12-13 10:50:05 -06:00
i = geomData . writeOffsetsCells ( var_offsets ) ;
2023-09-22 16:23:29 -05:00
if ( i ! = geomData . getNCells ( ) + 1 )
2023-12-08 14:22:55 -06:00
DUNE_THROW ( Dune : : IOError , geomData . getError ( ) ) ;
2023-09-22 16:23:29 -05:00
2023-12-13 10:50:05 -06:00
i = geomData . writeCellTypes ( var_types ) ;
2023-09-22 16:23:29 -05:00
if ( i ! = geomData . getNCells ( ) )
2023-12-08 14:22:55 -06:00
DUNE_THROW ( Dune : : IOError , geomData . getError ( ) ) ;
2023-09-22 16:23:29 -05:00
}
catch ( std : : exception & e )
{
2023-12-08 14:22:55 -06:00
OpmLog : : error ( e . what ( ) ) ;
2023-09-09 16:02:09 -05:00
}
}
void prepareLocalCellData ( const bool isSubStep ,
const int reportStepNum )
{
OPM_TIMEBLOCK ( prepareLocalCellData ) ;
if ( damarisOutputModule_ - > localDataValid ( ) ) {
return ;
}
const auto & gridView = simulator_ . vanguard ( ) . gridView ( ) ;
2023-12-12 04:32:03 -06:00
const int num_interior = detail : :
2023-12-06 03:33:46 -06:00
countLocalInteriorCellsGridView ( gridView ) ;
2023-09-09 16:02:09 -05:00
const bool log = this - > collectToIORank_ . isIORank ( ) ;
2023-12-06 03:33:46 -06:00
damarisOutputModule_ - > allocBuffers ( num_interior , reportStepNum ,
2023-09-09 16:02:09 -05:00
isSubStep , log , /*isRestart*/ false ) ;
ElementContext elemCtx ( simulator_ ) ;
OPM_BEGIN_PARALLEL_TRY_CATCH ( ) ;
{
OPM_TIMEBLOCK ( prepareCellBasedData ) ;
2023-12-06 03:33:46 -06:00
for ( const auto & elem : elements ( gridView , Dune : : Partitions : : interior ) ) {
2023-09-09 16:02:09 -05:00
elemCtx . updatePrimaryStencil ( elem ) ;
elemCtx . updatePrimaryIntensiveQuantities ( /*timeIdx=*/ 0 ) ;
damarisOutputModule_ - > processElement ( elemCtx ) ;
}
}
if ( ! simulator_ . model ( ) . linearizer ( ) . getFlowsInfo ( ) . empty ( ) ) {
OPM_TIMEBLOCK ( prepareFlowsData ) ;
2023-12-06 03:33:46 -06:00
for ( const auto & elem : elements ( gridView , Dune : : Partitions : : interior ) ) {
2023-09-09 16:02:09 -05:00
elemCtx . updatePrimaryStencil ( elem ) ;
elemCtx . updatePrimaryIntensiveQuantities ( /*timeIdx=*/ 0 ) ;
damarisOutputModule_ - > processElementFlows ( elemCtx ) ;
}
}
{
OPM_TIMEBLOCK ( prepareBlockData ) ;
2023-12-06 03:33:46 -06:00
for ( const auto & elem : elements ( gridView , Dune : : Partitions : : interior ) ) {
2023-09-09 16:02:09 -05:00
elemCtx . updatePrimaryStencil ( elem ) ;
elemCtx . updatePrimaryIntensiveQuantities ( /*timeIdx=*/ 0 ) ;
damarisOutputModule_ - > processElementBlockData ( elemCtx ) ;
}
}
{
OPM_TIMEBLOCK ( prepareFluidInPlace ) ;
# ifdef _OPENMP
# pragma omp parallel for
# endif
2023-12-06 03:33:46 -06:00
for ( int dofIdx = 0 ; dofIdx < num_interior ; + + dofIdx ) {
2023-09-09 16:02:09 -05:00
const auto & intQuants = * ( simulator_ . model ( ) . cachedIntensiveQuantities ( dofIdx , /*timeIdx=*/ 0 ) ) ;
const auto totVolume = simulator_ . model ( ) . dofTotalVolume ( dofIdx ) ;
damarisOutputModule_ - > updateFluidInPlace ( dofIdx , intQuants , totVolume ) ;
}
}
damarisOutputModule_ - > validateLocalData ( ) ;
OPM_END_PARALLEL_TRY_CATCH ( " DamarisWriter::prepareLocalCellData() failed: " , simulator_ . vanguard ( ) . grid ( ) . comm ( ) ) ;
}
} ;
} // namespace Opm
# endif