Update 1 for code after code review of PR 4889

This commit is contained in:
Josh Bowden
2023-12-08 21:22:55 +01:00
parent 0f1d31c88a
commit 54d6db6f35
10 changed files with 476 additions and 457 deletions

View File

@@ -595,6 +595,7 @@ if (Damaris_FOUND AND MPI_FOUND)
list (APPEND PUBLIC_HEADER_FILES opm/simulators/utils/DamarisKeywords.hpp)
list (APPEND PUBLIC_HEADER_FILES ebos/damariswriter.hh)
list (APPEND PUBLIC_HEADER_FILES opm/simulators/utils/DamarisVar.hpp)
list (APPEND PUBLIC_HEADER_FILES opm/simulators/utils/GridDataOutput.hpp)
endif()
if(HDF5_FOUND)

View File

@@ -92,7 +92,7 @@ struct DamarisDedicatedNodes {
using type = UndefinedProperty;
};
template<class TypeTag, class MyTypeTag>
struct DamarisSharedMemeorySizeBytes {
struct DamarisSharedMemorySizeBytes {
using type = UndefinedProperty;
};
template<class TypeTag, class MyTypeTag>
@@ -136,6 +136,8 @@ class DamarisWriter : public EclGenericWriter<GetPropType<TypeTag, Properties::G
using ElementMapper = GetPropType<TypeTag, Properties::ElementMapper>;
using BaseType = EclGenericWriter<Grid,EquilGrid,GridView,ElementMapper,Scalar>;
typedef Opm::DamarisOutput::DamarisVar<int> DamarisVarInt ;
typedef Opm::DamarisOutput::DamarisVar<double> DamarisVarDbl ;
public:
static void registerParameters()
@@ -166,7 +168,7 @@ public:
EWOMS_REGISTER_PARAM(TypeTag, int, DamarisDedicatedNodes,
"Set the number of dedicated nodes (full nodes) that should be used for Damaris processing (per simulation). \n \
Must divide evenly into the number of simulation nodes.");
EWOMS_REGISTER_PARAM(TypeTag, long, DamarisSharedMemeorySizeBytes,
EWOMS_REGISTER_PARAM(TypeTag, long, DamarisSharedMemorySizeBytes,
"Set the size of the shared memory buffer used for IPC between the simulation and the Damaris resources. \n \
Needs to hold all the variables published, possibly over multiple simulation iterations.");
@@ -256,21 +258,21 @@ public:
temp_int64_t[0] = static_cast<int64_t>(this->elements_rank_offsets_[rank_]);
dam_err_ = damaris_set_position("PRESSURE", temp_int64_t);
if (dam_err_ != DAMARIS_OK && rank_ == 0) {
OpmLog::error(fmt::format("ERORR: damariswriter::writeOutput() : ( rank:{})"
OpmLog::error(fmt::format("damariswriter::writeOutput() : ( rank:{})"
"damaris_set_position(PRESSURE, ...), Damaris Error: {} ",
rank_, damaris_error_string(dam_err_) ));
}
dam_err_ = damaris_write("PRESSURE", (void*)this->damarisOutputModule_->getPRESSURE_ptr());
if (dam_err_ != DAMARIS_OK) {
OpmLog::error(fmt::format("ERORR: damariswriter::writeOutput() : ( rank:{}) "
OpmLog::error(fmt::format("damariswriter::writeOutput() : ( rank:{}) "
"damaris_write(PRESSURE, ...), Damaris Error: {} ",
rank_, damaris_error_string(dam_err_) ));
}
dam_err_ = damaris_end_iteration();
if (dam_err_ != DAMARIS_OK) {
OpmLog::error(fmt::format("ERORR: damariswriter::writeOutput() : ( rank:{}) "
OpmLog::error(fmt::format("damariswriter::writeOutput() : ( rank:{}) "
"damaris_end_iteration(), Damaris Error: {} ",
rank_, damaris_error_string(dam_err_) ));
}
@@ -312,16 +314,21 @@ private:
}
if (dam_err_ != DAMARIS_OK) {
OpmLog::error(fmt::format("ERORR: damariswriter::writeOutput() :"
OpmLog::error(fmt::format("damariswriter::writeOutput() :"
"( rank:{}) damaris_write(GLOBAL_CELL_INDEX, ...), Damaris Error: {} ",
rank_, damaris_error_string(dam_err_) ));
}
// This is an example of writing to the Damaris shared memory directly (i.e. not using damaris_write() to copy data there)
// We will add the MPI rank value directly into shared memory using the DamarisVar vrapper C based Damaris API
// We will add the MPI rank value directly into shared memory using the DamarisVar wrapper of the C based Damaris API
// The shared memory is given back to Damaris on object deletion - i.e. when the unique_ptr goes out of scope.
std::unique_ptr<Opm::DamarisOutput::DamarisVar<int>> mpi_rank_var(new Opm::DamarisOutput::DamarisVar<int>(1, {std::string("n_elements_local")}, std::string("MPI_RANK"), rank_)) ;
// N.B. we have not set any offset values, so HDF5 collective nad Dask arrays cannot be used.
//auto mpi_rank_var = std::make_unique<Opm::DamarisOutput::DamarisVar<int>>(
// 1, {std::string("n_elements_local")}, std::string("MPI_RANK"), rank_)) ;
// std::unique_ptr<Opm::DamarisOutput::DamarisVar<int>>
std::unique_ptr<DamarisVarInt> mpi_rank_var( new DamarisVarInt(1,
{std::string("n_elements_local")},
std::string("MPI_RANK"), rank_) ) ;
// N.B. we have not set any offset values, so HDF5 collective and Dask arrays cannot be used.
mpi_rank_var->SetDamarisParameterAndShmem( {this->numElements_ } ) ;
int* shmem_mpi_ptr = mpi_rank_var->data_ptr() ;
// Fill the created memory area
@@ -361,8 +368,8 @@ private:
// ToDo: Do we need to check that local ranks are 0 based ?
int temp_int = static_cast<int>(elements_rank_sizes[rank_]);
dam_err_ = damaris_parameter_set("n_elements_local", &temp_int, sizeof(int));
if (dam_err_ != DAMARIS_OK && rank_ == 0) {
OpmLog::error("Damaris library produced an error result for "
if (dam_err_ != DAMARIS_OK) {
OpmLog::error("( rank:" + std::to_string(rank_)+") Damaris library produced an error result for "
"damaris_parameter_set(\"n_elements_local\", &temp_int, sizeof(int));");
}
// Damaris parameters only support int data types. This will limit models to be under size of 2^32-1 elements
@@ -370,13 +377,16 @@ private:
if( n_elements_global_max <= std::numeric_limits<int>::max() ) {
temp_int = static_cast<int>(n_elements_global_max);
dam_err_ = damaris_parameter_set("n_elements_total", &temp_int, sizeof(int));
if (dam_err_ != DAMARIS_OK && rank_ == 0) {
OpmLog::error("Damaris library produced an error result for "
if (dam_err_ != DAMARIS_OK) {
OpmLog::error("( rank:" + std::to_string(rank_)+") Damaris library produced an error result for "
"damaris_parameter_set(\"n_elements_total\", &temp_int, sizeof(int));");
}
} else {
OpmLog::error(fmt::format("The size of the global array ({}) is greater than what a Damaris paramater type supports ({}). ", n_elements_global_max, std::numeric_limits<int>::max() ));
assert( n_elements_global_max <= std::numeric_limits<int>::max() ) ;
OpmLog::error(fmt::format("( rank:{} ) The size of the global array ({}) is"
"greater than what a Damaris paramater type supports ({}). ",
rank_, n_elements_global_max, std::numeric_limits<int>::max() ));
// assert( n_elements_global_max <= std::numeric_limits<int>::max() ) ;
OPM_THROW(std::runtime_error, "setupDamarisWritingPars() n_elements_global_max > std::numeric_limits<int>::max() " + std::to_string(dam_err_));
}
// Use damaris_set_position to set the offset in the global size of the array.
@@ -384,17 +394,24 @@ private:
int64_t temp_int64_t[1];
temp_int64_t[0] = static_cast<int64_t>(elements_rank_offsets[rank_]);
dam_err_ = damaris_set_position("PRESSURE", temp_int64_t);
if (dam_err_ != DAMARIS_OK && rank_ == 0) {
OpmLog::error("Damaris library produced an error result for "
if (dam_err_ != DAMARIS_OK) {
OpmLog::error("( rank:" + std::to_string(rank_)+") Damaris library produced an error result for "
"damaris_set_position(\"PRESSURE\", temp_int64_t);");
}
dam_err_ = damaris_set_position("GLOBAL_CELL_INDEX", temp_int64_t);
if (dam_err_ != DAMARIS_OK && rank_ == 0) {
OpmLog::error("Damaris library produced an error result for "
if (dam_err_ != DAMARIS_OK) {
OpmLog::error("( rank:" + std::to_string(rank_)+") Damaris library produced an error result for "
"damaris_set_position(\"GLOBAL_CELL_INDEX\", temp_int64_t);");
}
std::unique_ptr<Opm::DamarisOutput::DamarisVar<int>> mpi_rank_var(new Opm::DamarisOutput::DamarisVar<int>(1, {std::string("n_elements_local")}, std::string("MPI_RANK"), rank_)) ;
//auto mpi_rank_var = std::make_unique<Opm::DamarisOutput::DamarisVar<int>>(
// 1, {std::string("n_elements_local")}, std::string("MPI_RANK"), rank_)) ;
// std::unique_ptr<Opm::DamarisOutput::DamarisVar<int>>
// mpi_rank_var(new Opm::DamarisOutput::DamarisVar<int>(1, {std::string("n_elements_local")}, std::string("MPI_RANK"), rank_)) ;
std::unique_ptr<DamarisVarInt> mpi_rank_var( new DamarisVarInt(1,
{std::string("n_elements_local")},
std::string("MPI_RANK"), rank_) ) ;
mpi_rank_var->SetDamarisPosition({*temp_int64_t}) ;
}
@@ -480,7 +497,7 @@ private:
}
catch (std::exception& e)
{
std :: cout << e.what() << std::endl;
OpmLog::error(e.what());
}
}

View File

@@ -422,8 +422,8 @@ struct DamarisDedicatedNodes<TypeTag, TTag::EclBaseProblem> {
static constexpr int value = 0;
};
template<class TypeTag>
struct DamarisSharedMemeorySizeBytes<TypeTag, TTag::EclBaseProblem> {
static constexpr long value = 536870912;
struct DamarisSharedMemorySizeBytes<TypeTag, TTag::EclBaseProblem> {
static constexpr long value = 536870912; // 512 MB
};
template<class TypeTag>
struct DamarisLogLevel<TypeTag, TTag::EclBaseProblem> {

View File

@@ -240,18 +240,24 @@ void Main::setupDamaris(const std::string& outputDir )
ensureOutputDirExists(outputDir);
}
//const auto find_replace_map;
//const auto find_replace_map = Opm::DamarisOutput::DamarisKeywords<PreTypeTag>(EclGenericVanguard::comm(), outputDir);
std::map<std::string, std::string> find_replace_map;
find_replace_map = Opm::DamarisOutput::DamarisKeywords<PreTypeTag>(EclGenericVanguard::comm(), outputDir);
find_replace_map = Opm::DamarisOutput::getDamarisKeywords<PreTypeTag>(EclGenericVanguard::comm(), outputDir);
// By default EnableDamarisOutputCollective is true so all simulation results will
// be written into one single file for each iteration using Parallel HDF5.
// It set to false, FilePerCore mode is used in Damaris, then simulation results in each
// If set to false, FilePerCore mode is used in Damaris, then simulation results in each
// node are aggregated by dedicated Damaris cores and stored to separate files per Damaris core.
// Irrespective of mode, output is written asynchronously at the end of each timestep.
// Using the ModifyModel class to set the XML file for Damaris.
DamarisOutput::initializeDamaris(EclGenericVanguard::comm(), EclGenericVanguard::comm().rank(), find_replace_map);
DamarisOutput::initializeDamaris(EclGenericVanguard::comm(),
EclGenericVanguard::comm().rank(),
find_replace_map);
int is_client;
MPI_Comm new_comm;
// damaris_start() is where the Damaris Server ranks will block, until damaris_stop()
// is called from the client ranks
int err = damaris_start(&is_client);
isSimulationRank_ = (is_client > 0);
if (isSimulationRank_ && err == DAMARIS_OK) {

View File

@@ -344,7 +344,7 @@ private:
}
if (enableDamarisOutput_) {
this->setupDamaris(outputDir);
this->setupDamaris(outputDir); // Damaris server ranks will block here until damaris_stop() is called by client ranks
}
#endif // HAVE_DAMARIS

View File

@@ -1,5 +1,6 @@
/*
Copyright 2021 Equinor.
Copyright 2023 Inria.
This file is part of the Open Porous Media project (OPM).
@@ -82,7 +83,9 @@ DamarisSettings::getKeywords([[maybe_unused]] const Parallel::Communication& com
const std::string& OutputDir)
{
std::string saveToHDF5_str("MyStore");
if (! saveToDamarisHDF5 ) saveToHDF5_str = "#" ;
if (! saveToDamarisHDF5 ){
saveToHDF5_str = "#";
}
// These strings are used to comment out an XML element if it is not reqired
std::string disablePythonXMLstart("!--");
@@ -133,11 +136,11 @@ DamarisSettings::getKeywords([[maybe_unused]] const Parallel::Communication& com
{
// A work around of this issue is to remove the Paraview mpi4py library (use print(inspect.getfile(mpi4py)))
// and then possibly not use mpi4py in the Paraview script code. OR try to install paraview mpi4py with headers.
std::cerr << "ERROR: Both the Python (--damaris-python-script command line argument) and Paraview Python " <<
"(--damaris-python-paraview-script command line argument) scripts are valid, however only one type "
"of analysis is supported in a single simulation (due to Paraview installing mpi4py library locally and without header files)."
" Please choose one or the other method of analysis for now. Exiting." << std::endl ;
std::exit(-1) ;
OPM_THROW(std::runtime_error, "ERROR: Both the Python (--damaris-python-script command line argument) and Paraview Python "
"(--damaris-python-paraview-script command line argument) scripts are valid, however only one "
"type of analysis is supported in a single simulation (due to Paraview installing mpi4py library "
"locally and without header files). "
"Please choose one or the other method of analysis for now. Exiting." )
}
std::string damarisOutputCollective_str;
@@ -191,7 +194,7 @@ DamarisSettings::getKeywords([[maybe_unused]] const Parallel::Communication& com
if (shmemSizeBytes != 0) {
shmemSizeBytes_str = std::to_string(shmemSizeBytes);
} else {
shmemSizeBytes_str = "536870912" ;
shmemSizeBytes_str = "536870912"; // 512 MB
}
std::string logLevel_str(damarisLogLevel);

View File

@@ -42,7 +42,7 @@ namespace Opm::DamarisOutput
* Returns true if the file exists.
* Tests to see if filename string is empty
* or the "#" character and if so returns false.
* Tests for file existance on ranl 0 and
* Tests for file existance on rank 0 and
* passes result via MPI to all other ranks.
*/
bool FileExists(const std::string& filename_in,
@@ -59,7 +59,7 @@ struct DamarisSettings {
std::string damarisDaskFile = "";
int nDamarisCores = 1;
int nDamarisNodes = 0;
long shmemSizeBytes = 536870912 ;
long shmemSizeBytes = 536870912; // 512 MB
std::map<std::string, std::string>
getKeywords(const Parallel::Communication& comm,
@@ -76,7 +76,7 @@ struct DamarisSettings {
*/
template<class TypeTag>
std::map<std::string, std::string>
DamarisKeywords(const Parallel::Communication& comm, const std::string& OutputDir)
getDamarisKeywords(const Parallel::Communication& comm, const std::string& OutputDir)
{
DamarisSettings settings;
// Get all of the Damaris keywords (except for --enable-damaris, which is used in simulators/flow/Main.hpp)
@@ -88,7 +88,7 @@ DamarisKeywords(const Parallel::Communication& comm, const std::string& OutputDi
settings.damarisSimName = EWOMS_GET_PARAM(TypeTag, std::string, DamarisSimName);
settings.nDamarisCores = EWOMS_GET_PARAM(TypeTag, int, DamarisDedicatedCores);
settings.nDamarisNodes = EWOMS_GET_PARAM(TypeTag, int, DamarisDedicatedNodes);
settings.shmemSizeBytes = EWOMS_GET_PARAM(TypeTag, long, DamarisSharedMemeorySizeBytes);
settings.shmemSizeBytes = EWOMS_GET_PARAM(TypeTag, long, DamarisSharedMemorySizeBytes);
settings.damarisLogLevel = EWOMS_GET_PARAM(TypeTag, std::string, DamarisLogLevel);
settings.damarisDaskFile = EWOMS_GET_PARAM(TypeTag, std::string, DamarisDaskFile);
return settings.getKeywords(comm, OutputDir);

View File

@@ -35,29 +35,30 @@ namespace Opm::DamarisOutput
std::string initDamarisXmlFile(); // Defined in initDamarisXMLFile.cpp, to avoid messing up this file.
// Initialize Damaris by filling in th XML file and storing it in the chosen directory
/**
* Initialize Damaris by either reading a file specified by the environment variable FLOW_DAMARIS_XML_FILE or
* by filling in th XML file and storing it in the chosen directory
*/
void
initializeDamaris(MPI_Comm comm, int mpiRank, std::map<std::string, std::string>& find_replace_map )
{
int dam_err_;
int dam_err;
/* Get the name of the Damaris input file from an environment variable if available */
const char* cs_damaris_xml_file = getenv("FLOW_DAMARIS_XML_FILE");
if (cs_damaris_xml_file != NULL)
{
std::cout << "INFO: Initializing Damaris from environment variable FLOW_DAMARIS_XML_FILE: "
<< cs_damaris_xml_file << std::endl;
dam_err_ = damaris_initialize(cs_damaris_xml_file, MPI_COMM_WORLD);
if (dam_err_ != DAMARIS_OK) {
OpmLog::info(std::string("Initializing Damaris from environment variable FLOW_DAMARIS_XML_FILE: ") + cs_damaris_xml_file);
dam_err = damaris_initialize(cs_damaris_xml_file, comm);
if (dam_err != DAMARIS_OK) {
OpmLog::error(fmt::format("ERORR: damariswriter::initializeDamaris() : ( rank:{}) "
"damaris_initialize({}, MPI_COMM_WORLD), Damaris Error: {} ",
mpiRank, cs_damaris_xml_file, damaris_error_string(dam_err_) ));
"damaris_initialize({}, comm), Damaris Error: {} ",
mpiRank, cs_damaris_xml_file, damaris_error_string(dam_err) ));
}
} else {
// Prepare the XML file
// Prepare the inbuilt XML file
std::string damaris_config_xml = initDamarisXmlFile(); // This is the template for a Damaris XML file
damaris::model::ModifyModel myMod = damaris::model::ModifyModel(damaris_config_xml);
// The map will make it precise the output directory and FileMode (either FilePerCore or Collective storage)
// The map file find all occurences of the string in position 1 and replace it/them with string in position 2
// std::map<std::string, std::string> find_replace_map = DamarisKeywords(outputDir, enableDamarisOutputCollective);
myMod.RepalceWithRegEx(find_replace_map);
@@ -68,12 +69,13 @@ initializeDamaris(MPI_Comm comm, int mpiRank, std::map<std::string, std::string>
if (mpiRank == 0) {
myMod.SaveXMLStringToFile(damaris_xml_filename_str);
}
std::cout << "INFO: Initializing Damaris using internally built file:" << damaris_xml_filename_str << " (N.B. use FLOW_DAMARIS_XML_FILE to override)" << std::endl;
dam_err_ = damaris_initialize(damaris_xml_filename_str.c_str(), comm);
if (dam_err_ != DAMARIS_OK) {
OpmLog::error(fmt::format("ERORR: damariswriter::initializeDamaris() : ( rank:{}) "
"damaris_initialize({}, MPI_COMM_WORLD), Damaris Error: {}. Error via OPM internally built file:",
mpiRank, cs_damaris_xml_file, damaris_error_string(dam_err_) ));
OpmLog::info("Initializing Damaris using internally built file: " + damaris_xml_filename_str + " (N.B. use environment variable FLOW_DAMARIS_XML_FILE to override)");
dam_err = damaris_initialize(damaris_xml_filename_str.c_str(), comm);
if (dam_err != DAMARIS_OK) {
OpmLog::error(fmt::format("damariswriter::initializeDamaris() : ( rank:{}) "
"damaris_initialize({}, comm), Damaris Error: {}. Error via OPM internally built file:",
mpiRank, cs_damaris_xml_file, damaris_error_string(dam_err) ));
}
}
}

View File

@@ -152,16 +152,12 @@ namespace Opm
int64_t * positions_; //!< The offsets into the array that the data in the Variable starts from for this rank.
int rank_; //!< Rank of process - used for error reporting.
bool paramaters_set_; //!< set to true after SetDamarisParameter() is call to ensure the variable has correct size for memory allocation in SetPointersToDamarisShmem()
std::vector<std::string> param_names_; //!< Contains one paramater name for each paramater that a variable depends on (via it's Layout)
std::string variable_name_; //!< Reference string to the XML attribute name of the variable.
int dam_err_; //!< Set to != DAMARIS_OK if a Daamris error was returned by a Damaris API function call
bool has_error_;
std::ostringstream dam_err_sstr_; //!< Use dam_err_sstr.str() to return an error string describing detected error
DamarisVarXMLAttributes xml_attributes_; //!< The extra elements that need to be part of a Damaris <variable> type. They are simple string values that may reference other XML elements (and could be checked for existence etc.)
T * data_ptr_; //!< This pointer will be mapped to the Damaris shared memory area for the variable in the SetPointersToDamarisShmem() method. The type T will match the Layout type
public:

View File

@@ -19,8 +19,8 @@
along with OPM. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef OPM_SIM_MESH_DATA2_HPP
#define OPM_SIM_MESH_DATA2_HPP
#ifndef OPM_GRID_DATA_OUTPUT_HPP
#define OPM_GRID_DATA_OUTPUT_HPP
#include <sstream>
#include <dune/grid/common/rangegenerators.hh>
@@ -31,7 +31,7 @@
This data extractor provides the full set of vertices (corresponding to Dune::Partition::all) and then
allows a user to specify Dune sub-partitions to get the references into the vertex array and element
(aka cell) types for the sub-partition. This allows the full set of verticies to be reused for
(aka cell) types for the sub-partition. This allows the full set of vertices to be reused for
visualisation of the various sub-partitions, at the expense of copying all the vertices. Typically
a user is interested in the interiorBoarder elements which make use of the bulk (~80%) of the vertices.
This saves having to renumber the indexes to the vertices for the sub-partitions.
@@ -57,17 +57,17 @@
... do something with vertex data x_vert, y_vert and z_vert ....
free [] x_vert;
free [] y_vert;
free [] z_vert;
delete [] x_vert;
delete [] y_vert;
delete [] z_vert;
// example using AOS
double * xyz_vert_aos = new double[nvert*3];
geomData.writeGridPoints_SOA(xyz_vert_aos) ;
geomData.writeGridPoints_AOS(xyz_vert_aos);
... do something with vertex data xyz_vert_aos....
free [] xyz_vert_aos;
delete [] xyz_vert_aos;
*/
@@ -75,7 +75,7 @@
namespace Opm::GridDataOutput
{
/**
* Allows selection of order of verticies in writeConnectivity()
* Allows selection of order of vertices in writeConnectivity()
*/
enum ConnectivityVertexOrder { DUNE = 0 , VTK = 1 };
@@ -120,11 +120,6 @@ namespace Opm::GridDataOutput
countEntities();
}
//! destructor
~SimMeshDataAccessor ()
{
}
/**
Checks for cells that have polyhedral type within the current partition of cells
@@ -154,22 +149,22 @@ namespace Opm::GridDataOutput
void countEntities( )
{
// We include all the vertices for this ranks partition
const auto& vert_partition_it = vertices(gridView_, Dune::Partitions::all);
nvertices_ = std::distance(vert_partition_it.begin(), vert_partition_it.end());
const auto& cell_partition_it = elements(gridView_, dunePartition_);
ncells_ = std::distance(cell_partition_it.begin(), cell_partition_it.end());
const auto& vert_partition = vertices(gridView_, Dune::Partitions::all);
nvertices_ = std::distance(vert_partition.begin(), vert_partition.end());
const auto& cell_partition = elements(gridView_, dunePartition_);
ncells_ = 0;
ncorners_ = 0;
for (const auto& cit : cell_partition_it)
for (const auto& cit : cell_partition)
{
auto corner_geom = cit.geometry();
ncorners_ += corner_geom.corners();
++ncells_;
}
}
/**
Write the positions of vertices - directly to the pointers given in paramaters
Write the positions of vertices - directly to the pointers given in parameters
Returns the number of vertices written
*/
@@ -180,8 +175,7 @@ namespace Opm::GridDataOutput
if (dimw_ == 3) {
for (const auto& vit : vertices(gridView_, Dune::Partitions::all) )
{
// if (i < nvertices_) // As we are templated on the Dune::PartitionSet<partitions>, this cannot change
auto xyz_local = vit.geometry().corner(0); // verticies only have one corner
auto xyz_local = vit.geometry().corner(0); // vertices only have one corner
x_inout[i] = static_cast<T>(xyz_local[0]);
y_inout[i] = static_cast<T>(xyz_local[1]);
z_inout[i] = static_cast<T>(xyz_local[2]);
@@ -190,14 +184,14 @@ namespace Opm::GridDataOutput
} else if (dimw_ == 2) {
for (const auto& vit : vertices(gridView_, Dune::Partitions::all) )
{
// if (i < nvertices_) // As we are templated on the Dune::PartitionSet<partitions>, this cannot change
auto xyz_local = vit.geometry().corner(0); // verticies only have one corner
auto xyz_local = vit.geometry().corner(0); // vertices only have one corner
x_inout[i] = static_cast<T>(xyz_local[0]);
y_inout[i] = static_cast<T>(xyz_local[1]);
z_inout[i] = static_cast<T>(0.0);
i++;
}
}
assert(i == nvertices_); // As we are templated on the Dune::PartitionSet<partitions>, this cannot change
return i;
}
@@ -267,7 +261,7 @@ namespace Opm::GridDataOutput
}
/**
* Write the connectivity array - directly to the pointer given in paramater 1
* Write the connectivity array - directly to the pointer given in parameter 1
Reorders the indecies as selected either in DUNE order or VTK order.
Returns the number of corner indices written.
@@ -307,7 +301,7 @@ namespace Opm::GridDataOutput
}
/**
* Write the offsets values - directly to the pointer given in paramater 1
* Write the offsets values - directly to the pointer given in parameter 1
Returns the number of offset values written, which should be 1 greater than ncells_
or -1 if an error was detected
*/
@@ -326,7 +320,7 @@ namespace Opm::GridDataOutput
}
/**
* Write the Cell types array - directly to the pointer given in paramater 1
* Write the Cell types array - directly to the pointer given in parameter 1
*/
template <typename I>
long writeCellTypes( I* types_inout)