Fixing bug with analysis

This commit is contained in:
Mark Berrill 2021-01-22 10:05:16 -05:00
parent c350e2b45e
commit 90ba7ed65a
15 changed files with 1229 additions and 926 deletions

108
.clang-format Normal file
View File

@ -0,0 +1,108 @@
# To run clang tools:
# cd to root directory
# To update format only:
# find . -name "*.cpp" -or -name "*.cc" -or -name "*.h" -or -name "*.hpp" -or -name "*.I" | xargs -I{} clang-format -i {}
# git status -s . | sed s/^...// | grep -E "(\.cpp|\.h|\.cc|\.hpp|\.I)" | xargs -I{} clang-format -i {}
# To run modernize
# export CLANG_PATH=/packages/llvm/build/llvm-60
# export PATH=${CLANG_PATH}/bin:${CLANG_PATH}/share/clang:$PATH
# find src -name "*.cpp" -or -name "*.cc" | xargs -I{} clang-tidy -checks=modernize* -p=/projects/AtomicModel/build/debug -fix {}
# find src -name "*.cpp" -or -name "*.cc" -or -name "*.h" -or -name "*.hpp" -or -name "*.I" | xargs -I{} clang-format -i {}
---
Language: Cpp
# BasedOnStyle: LLVM
AccessModifierOffset: -4
AlignAfterOpenBracket: DontAlign
AlignConsecutiveAssignments: true
AlignConsecutiveDeclarations: false
AlignEscapedNewlinesLeft: true
AlignOperands: true
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: true
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: true
AfterControlStatement: false
AfterEnum: false
AfterFunction: true
AfterNamespace: false
AfterObjCDeclaration: true
AfterStruct: false
AfterUnion: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
BreakBeforeBinaryOperators: None
#BreakBeforeBraces: Stroustrup
BreakBeforeBraces: Custom
BreakBeforeTernaryOperators: false
BreakConstructorInitializersBeforeComma: false
ColumnLimit: 100
CommentPragmas: '^ IWYU pragma:'
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: false
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ]
IncludeCategories:
- Regex: '^"(llvm|llvm-c|clang|clang-c)/'
Priority: 2
- Regex: '^(<|"(gtest|isl|json)/)'
Priority: 3
- Regex: '.*'
Priority: 1
IndentCaseLabels: false
IndentWidth: 4
IndentWrappedFunctionNames: false
KeepEmptyLinesAtTheStartOfBlocks: true
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 2
NamespaceIndentation: None
ObjCBlockIndentWidth: 4
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakBeforeFirstCallParameter: 19
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyExcessCharacter: 1000
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: true
SpaceAfterTemplateKeyword: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeParens: ControlStatements
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: true
SpacesInSquareBrackets: false
Standard: Cpp11
TabWidth: 4
UseTab: Never
...

View File

@ -2,8 +2,8 @@
#define SILO_INTERFACE_HPP
#include "IO/silo.h"
#include "common/Utilities.h"
#include "common/MPI.h"
#include "common/Utilities.h"
#include "ProfilerApp.h"
@ -13,17 +13,29 @@
#include <silo.h>
namespace silo {
/****************************************************
* Helper functions *
****************************************************/
template<class TYPE> static constexpr int getType();
template<> constexpr int getType<double>() { return DB_DOUBLE; }
template<> constexpr int getType<float>() { return DB_FLOAT; }
template<> constexpr int getType<int>() { return DB_INT; }
template<class TYPE>
static constexpr int getType();
template<>
constexpr int getType<double>()
{
return DB_DOUBLE;
}
template<>
constexpr int getType<float>()
{
return DB_FLOAT;
}
template<>
constexpr int getType<int>()
{
return DB_INT;
}
template<class TYPE>
inline void copyData( Array<TYPE> &data, int type, const void *src )
{
@ -43,10 +55,23 @@ inline void copyData( Array<TYPE>& data, int type, const void *src )
/****************************************************
* Write/read an arbitrary vector *
****************************************************/
template<class TYPE> constexpr int getSiloType();
template<> constexpr int getSiloType<int>() { return DB_INT; }
template<> constexpr int getSiloType<float>() { return DB_FLOAT; }
template<> constexpr int getSiloType<double>() { return DB_DOUBLE; }
template<class TYPE>
constexpr int getSiloType();
template<>
constexpr int getSiloType<int>()
{
return DB_INT;
}
template<>
constexpr int getSiloType<float>()
{
return DB_FLOAT;
}
template<>
constexpr int getSiloType<double>()
{
return DB_DOUBLE;
}
template<class TYPE>
void write( DBfile *fid, const std::string &varname, const std::vector<TYPE> &data )
{
@ -144,7 +169,11 @@ void writeUniformMesh( DBfile* fid, const std::string& meshname,
z[N[2]] = range[5];
}
float *coords[] = { x, y, z };
int err = DBPutQuadmesh( fid, meshname.c_str(), nullptr, coords, dims, NDIM, DB_FLOAT, DB_COLLINEAR, nullptr );
int err = DBPutQuadmesh(
fid, meshname.c_str(), nullptr, coords, dims, NDIM, DB_FLOAT, DB_COLLINEAR, nullptr );
delete[] x;
delete[] y;
delete[] z;
ASSERT( err == 0 );
PROFILE_STOP( "writeUniformMesh", 2 );
}
@ -154,8 +183,9 @@ void writeUniformMesh( DBfile* fid, const std::string& meshname,
* Write a vector/tensor quad variable *
****************************************************/
template<int NDIM, class TYPE>
void writeUniformMeshVariable( DBfile* fid, const std::string& meshname, const std::array<int,NDIM>& N,
const std::string& varname, const Array<TYPE>& data, VariableType type )
void writeUniformMeshVariable( DBfile *fid, const std::string &meshname,
const std::array<int, NDIM> &N, const std::string &varname, const Array<TYPE> &data,
VariableType type )
{
PROFILE_START( "writeUniformMeshVariable", 2 );
int nvars = 1, dims[NDIM] = { 1 };
@ -197,8 +227,8 @@ void writeUniformMeshVariable( DBfile* fid, const std::string& meshname, const s
std::vector<char *> varnames( nvars, nullptr );
for ( int i = 0; i < nvars; i++ )
varnames[i] = const_cast<char *>( var_names[i].c_str() );
int err = DBPutQuadvar( fid, varname.c_str(), meshname.c_str(), nvars,
varnames.data(), vars, dims, NDIM, nullptr, 0, getType<TYPE>(), vartype, nullptr );
int err = DBPutQuadvar( fid, varname.c_str(), meshname.c_str(), nvars, varnames.data(), vars,
dims, NDIM, nullptr, 0, getType<TYPE>(), vartype, nullptr );
ASSERT( err == 0 );
PROFILE_STOP( "writeUniformMeshVariable", 2 );
}
@ -227,8 +257,8 @@ Array<TYPE> readUniformMeshVariable( DBfile* fid, const std::string& varname )
* Read/write a point mesh/variable to silo *
****************************************************/
template<class TYPE>
void writePointMesh( DBfile* fid, const std::string& meshname,
int ndim, int N, const TYPE *coords[] )
void writePointMesh(
DBfile *fid, const std::string &meshname, int ndim, int N, const TYPE *coords[] )
{
int err = DBPutPointmesh( fid, meshname.c_str(), ndim, coords, N, getType<TYPE>(), nullptr );
ASSERT( err == 0 );
@ -250,15 +280,16 @@ Array<TYPE> readPointMesh( DBfile* fid, const std::string& meshname )
return coords;
}
template<class TYPE>
void writePointMeshVariable( DBfile* fid, const std::string& meshname,
const std::string& varname, const Array<TYPE>& data )
void writePointMeshVariable(
DBfile *fid, const std::string &meshname, const std::string &varname, const Array<TYPE> &data )
{
int N = data.size( 0 );
int nvars = data.size( 1 );
std::vector<const TYPE *> vars( nvars );
for ( int i = 0; i < nvars; i++ )
vars[i] = &data( 0, i );
int err = DBPutPointvar( fid, varname.c_str(), meshname.c_str(), nvars, vars.data(), N, getType<TYPE>(), nullptr );
int err = DBPutPointvar(
fid, varname.c_str(), meshname.c_str(), nvars, vars.data(), N, getType<TYPE>(), nullptr );
ASSERT( err == 0 );
}
template<class TYPE>
@ -282,8 +313,8 @@ Array<TYPE> readPointMeshVariable( DBfile* fid, const std::string& varname )
* Read/write a triangle mesh *
****************************************************/
template<class TYPE>
void writeTriMesh( DBfile* fid, const std::string& meshName,
int ndim, int ndim_tri, int N, const TYPE *coords[], int N_tri, const int *tri[] )
void writeTriMesh( DBfile *fid, const std::string &meshName, int ndim, int ndim_tri, int N,
const TYPE *coords[], int N_tri, const int *tri[] )
{
auto zoneName = meshName + "_zones";
std::vector<int> nodelist( ( ndim_tri + 1 ) * N_tri );
@ -302,10 +333,10 @@ void writeTriMesh( DBfile* fid, const std::string& meshName,
ERROR( "Unknown shapetype" );
int shapesize = ndim_tri + 1;
int shapecnt = N_tri;
DBPutZonelist2( fid, zoneName.c_str(), N_tri, ndim_tri, nodelist.data(),
nodelist.size(), 0, 0, 0, &shapetype, &shapesize, &shapecnt, 1, nullptr );
DBPutUcdmesh( fid, meshName.c_str(), ndim, nullptr, coords, N,
nodelist.size(), zoneName.c_str(), nullptr, getType<TYPE>(), nullptr );
DBPutZonelist2( fid, zoneName.c_str(), N_tri, ndim_tri, nodelist.data(), nodelist.size(), 0, 0,
0, &shapetype, &shapesize, &shapecnt, 1, nullptr );
DBPutUcdmesh( fid, meshName.c_str(), ndim, nullptr, coords, N, nodelist.size(),
zoneName.c_str(), nullptr, getType<TYPE>(), nullptr );
}
template<class TYPE>
void readTriMesh( DBfile *fid, const std::string &meshname, Array<TYPE> &coords, Array<int> &tri )
@ -362,8 +393,8 @@ void writeTriMeshVariable( DBfile* fid, int ndim, const std::string& meshname,
std::vector<char *> varnames( nvars, nullptr );
for ( int i = 0; i < nvars; i++ )
varnames[i] = const_cast<char *>( var_names[i].c_str() );
DBPutUcdvar( fid, varname.c_str(), meshname.c_str(), nvars,
varnames.data(), vars, data.size(0), nullptr, 0, getType<TYPE>(), vartype, nullptr );
DBPutUcdvar( fid, varname.c_str(), meshname.c_str(), nvars, varnames.data(), vars,
data.size( 0 ), nullptr, 0, getType<TYPE>(), vartype, nullptr );
}
template<class TYPE>
Array<TYPE> readTriMeshVariable( DBfile *fid, const std::string &varname )
@ -382,7 +413,7 @@ Array<TYPE> readTriMeshVariable( DBfile* fid, const std::string& varname )
}
}; // silo namespace
}; // namespace silo
#endif

View File

@ -542,7 +542,6 @@ double MorphDrain(DoubleArray &SignDist, signed char *id, std::shared_ptr<Domain
}
// Extract only the connected part of NWP
BlobIDstruct new_index;
double vF=0.0; double vS=0.0;
ComputeGlobalBlobIDs(nx-2,ny-2,nz-2,Dm->rank_info,phase,SignDist,vF,vS,phase_label,Dm->Comm);
Dm->Comm.barrier();

View File

@ -15,10 +15,8 @@
AnalysisType &operator|=( AnalysisType &lhs, AnalysisType rhs )
{
lhs = static_cast<AnalysisType>(
static_cast<std::underlying_type<AnalysisType>::type>(lhs) |
static_cast<std::underlying_type<AnalysisType>::type>(rhs)
);
lhs = static_cast<AnalysisType>( static_cast<std::underlying_type<AnalysisType>::type>( lhs ) |
static_cast<std::underlying_type<AnalysisType>::type>( rhs ) );
return lhs;
}
bool matches( AnalysisType x, AnalysisType y )
@ -28,10 +26,11 @@ bool matches( AnalysisType x, AnalysisType y )
}
// Create a shared_ptr to an array of values
template<class TYPE>
void DeleteArray( const TYPE *p )
static inline std::shared_ptr<TYPE> make_shared_array( size_t N )
{
delete [] p;
return std::shared_ptr<TYPE>( new TYPE[N], []( const TYPE *p ) { delete[] p; } );
}
@ -39,9 +38,13 @@ void DeleteArray( const TYPE *p )
class WriteRestartWorkItem : public ThreadPool::WorkItemRet<void>
{
public:
WriteRestartWorkItem( const char* filename_, std::shared_ptr<double> cDen_, std::shared_ptr<double> cfq_, int N_ ):
filename(filename_), cfq(cfq_), cDen(cDen_), N(N_) {}
virtual void run() {
WriteRestartWorkItem( const std::string &filename_, std::shared_ptr<double> cDen_,
std::shared_ptr<double> cfq_, int N_ )
: filename( filename_ ), cfq( cfq_ ), cDen( cDen_ ), N( N_ )
{
}
virtual void run()
{
PROFILE_START( "Save Checkpoint", 1 );
double value;
ofstream File( filename, ios::binary );
@ -51,7 +54,6 @@ public:
File.write( (char *) &value, sizeof( value ) );
value = cDen.get()[N + n];
File.write( (char *) &value, sizeof( value ) );
}
for ( int n = 0; n < N; n++ ) {
// Write the distributions
@ -63,43 +65,58 @@ public:
File.close();
PROFILE_STOP( "Save Checkpoint", 1 );
};
private:
WriteRestartWorkItem();
const char* filename;
const std::string filename;
std::shared_ptr<double> cfq, cDen;
// const DoubleArray& phase;
//const DoubleArray& dist;
const int N;
};
// Helper class to compute the blob ids
typedef std::shared_ptr<std::pair<int, IntArray>> BlobIDstruct;
typedef std::shared_ptr<std::vector<BlobIDType>> BlobIDList;
static const std::string id_map_filename = "lbpm_id_map.txt";
class BlobIdentificationWorkItem1 : public ThreadPool::WorkItemRet<void>
{
public:
BlobIdentificationWorkItem1( int timestep_, int Nx_, int Ny_, int Nz_, const RankInfoStruct& rank_info_,
std::shared_ptr<const DoubleArray> phase_, const DoubleArray& dist_,
BlobIDstruct last_id_, BlobIDstruct new_index_, BlobIDstruct new_id_, BlobIDList new_list_, runAnalysis::commWrapper&& comm_ ):
timestep(timestep_), Nx(Nx_), Ny(Ny_), Nz(Nz_), rank_info(rank_info_),
phase(phase_), dist(dist_), last_id(last_id_), new_index(new_index_), new_id(new_id_), new_list(new_list_), comm(std::move(comm_))
BlobIdentificationWorkItem1( int timestep_, int Nx_, int Ny_, int Nz_,
const RankInfoStruct &rank_info_, std::shared_ptr<const DoubleArray> phase_,
const DoubleArray &dist_, BlobIDstruct last_id_, BlobIDstruct new_index_,
BlobIDstruct new_id_, BlobIDList new_list_, runAnalysis::commWrapper &&comm_ )
: timestep( timestep_ ),
Nx( Nx_ ),
Ny( Ny_ ),
Nz( Nz_ ),
rank_info( rank_info_ ),
phase( phase_ ),
dist( dist_ ),
last_id( last_id_ ),
new_index( new_index_ ),
new_id( new_id_ ),
new_list( new_list_ ),
comm( std::move( comm_ ) )
{
}
~BlobIdentificationWorkItem1() {}
virtual void run() {
virtual void run()
{
// Compute the global blob id and compare to the previous version
PROFILE_START( "Identify blobs", 1 );
double vF = 0.0;
double vS = -1.0; // one voxel buffer region around solid
IntArray &ids = new_index->second;
new_index->first = ComputeGlobalBlobIDs(Nx-2,Ny-2,Nz-2,rank_info,*phase,dist,vF,vS,ids,comm.comm);
new_index->first = ComputeGlobalBlobIDs(
Nx - 2, Ny - 2, Nz - 2, rank_info, *phase, dist, vF, vS, ids, comm.comm );
PROFILE_STOP( "Identify blobs", 1 );
}
private:
BlobIdentificationWorkItem1();
int timestep;
int Nx, Ny, Nz;
const RankInfoStruct& rank_info;
const RankInfoStruct rank_info;
std::shared_ptr<const DoubleArray> phase;
const DoubleArray &dist;
BlobIDstruct last_id, new_index, new_id;
@ -109,15 +126,27 @@ private:
class BlobIdentificationWorkItem2 : public ThreadPool::WorkItemRet<void>
{
public:
BlobIdentificationWorkItem2( int timestep_, int Nx_, int Ny_, int Nz_, const RankInfoStruct& rank_info_,
std::shared_ptr<const DoubleArray> phase_, const DoubleArray& dist_,
BlobIDstruct last_id_, BlobIDstruct new_index_, BlobIDstruct new_id_, BlobIDList new_list_ , runAnalysis::commWrapper&& comm_ ):
timestep(timestep_), Nx(Nx_), Ny(Ny_), Nz(Nz_), rank_info(rank_info_),
phase(phase_), dist(dist_), last_id(last_id_), new_index(new_index_), new_id(new_id_), new_list(new_list_), comm(std::move(comm_))
BlobIdentificationWorkItem2( int timestep_, int Nx_, int Ny_, int Nz_,
const RankInfoStruct &rank_info_, std::shared_ptr<const DoubleArray> phase_,
const DoubleArray &dist_, BlobIDstruct last_id_, BlobIDstruct new_index_,
BlobIDstruct new_id_, BlobIDList new_list_, runAnalysis::commWrapper &&comm_ )
: timestep( timestep_ ),
Nx( Nx_ ),
Ny( Ny_ ),
Nz( Nz_ ),
rank_info( rank_info_ ),
phase( phase_ ),
dist( dist_ ),
last_id( last_id_ ),
new_index( new_index_ ),
new_id( new_id_ ),
new_list( new_list_ ),
comm( std::move( comm_ ) )
{
}
~BlobIdentificationWorkItem2() {}
virtual void run() {
virtual void run()
{
// Compute the global blob id and compare to the previous version
PROFILE_START( "Identify blobs maps", 1 );
const IntArray &ids = new_index->second;
@ -140,11 +169,12 @@ public:
}
PROFILE_STOP( "Identify blobs maps", 1 );
}
private:
BlobIdentificationWorkItem2();
int timestep;
int Nx, Ny, Nz;
const RankInfoStruct& rank_info;
const RankInfoStruct rank_info;
std::shared_ptr<const DoubleArray> phase;
const DoubleArray &dist;
BlobIDstruct last_id, new_index, new_id;
@ -158,14 +188,23 @@ class WriteVisWorkItem: public ThreadPool::WorkItemRet<void>
{
public:
WriteVisWorkItem( int timestep_, std::vector<IO::MeshDataStruct> &visData_,
TwoPhase& Avgerages_, fillHalo<double>& fillData_, runAnalysis::commWrapper&& comm_ ):
timestep(timestep_), visData(visData_), Averages(Avgerages_), fillData(fillData_), comm(std::move(comm_))
TwoPhase &Avgerages_, std::array<int, 3> n_, RankInfoStruct rank_info_,
runAnalysis::commWrapper &&comm_ )
: timestep( timestep_ ),
visData( visData_ ),
Averages( Avgerages_ ),
n( std::move( n_ ) ),
rank_info( std::move( rank_info_ ) ),
comm( std::move( comm_ ) )
{
}
~WriteVisWorkItem() {}
virtual void run() {
virtual void run()
{
PROFILE_START( "Save Vis", 1 );
fillHalo<double> fillData( comm.comm, rank_info, n, { 1, 1, 1 }, 0, 1 );
ASSERT( visData[0].vars[0]->name == "phase" );
Array<double> &PhaseData = visData[0].vars[0]->data;
fillData.copy( Averages.SDn, PhaseData );
@ -195,12 +234,14 @@ public:
PROFILE_STOP( "Save Vis", 1 );
};
private:
WriteVisWorkItem();
int timestep;
std::array<int, 3> n;
RankInfoStruct rank_info;
std::vector<IO::MeshDataStruct> &visData;
TwoPhase &Averages;
fillHalo<double>& fillData;
runAnalysis::commWrapper comm;
};
@ -208,18 +249,28 @@ private:
class IOWorkItem : public ThreadPool::WorkItemRet<void>
{
public:
IOWorkItem(int timestep_, std::shared_ptr<Database> input_db_, std::vector<IO::MeshDataStruct>& visData_,
SubPhase& Averages_, fillHalo<double>& fillData_, runAnalysis::commWrapper&& comm_ ):
timestep(timestep_), input_db(input_db_), visData(visData_), Averages(Averages_), fillData(fillData_), comm(std::move(comm_))
IOWorkItem( int timestep_, std::shared_ptr<Database> input_db_,
std::vector<IO::MeshDataStruct> &visData_, SubPhase &Averages_, std::array<int, 3> n_,
RankInfoStruct rank_info_, runAnalysis::commWrapper &&comm_ )
: timestep( timestep_ ),
input_db( input_db_ ),
visData( visData_ ),
Averages( Averages_ ),
n( std::move( n_ ) ),
rank_info( std::move( rank_info_ ) ),
comm( std::move( comm_ ) )
{
}
~IOWorkItem() {}
virtual void run() {
virtual void run()
{
PROFILE_START( "Save Vis", 1 );
auto color_db = input_db->getDatabase( "Color" );
auto vis_db = input_db->getDatabase( "Visualization" );
// int timestep = color_db->getWithDefault<int>( "timestep", 0 );
PROFILE_START("Save Vis",1);
fillHalo<double> fillData( comm.comm, rank_info, n, { 1, 1, 1 }, 0, 1 );
if ( vis_db->getWithDefault<bool>( "save_phase_field", true ) ) {
ASSERT( visData[0].vars[0]->name == "phase" );
@ -268,13 +319,15 @@ public:
PROFILE_STOP( "Save Vis", 1 );
};
private:
IOWorkItem();
int timestep;
std::array<int, 3> n;
RankInfoStruct rank_info;
std::shared_ptr<Database> input_db;
std::vector<IO::MeshDataStruct> &visData;
SubPhase &Averages;
fillHalo<double>& fillData;
runAnalysis::commWrapper comm;
};
@ -284,12 +337,19 @@ private:
class AnalysisWorkItem : public ThreadPool::WorkItemRet<void>
{
public:
AnalysisWorkItem( AnalysisType type_, int timestep_, TwoPhase& Averages_,
BlobIDstruct ids, BlobIDList id_list_, double beta_ ):
type(type_), timestep(timestep_), Averages(Averages_),
blob_ids(ids), id_list(id_list_), beta(beta_) { }
AnalysisWorkItem( AnalysisType type_, int timestep_, TwoPhase &Averages_, BlobIDstruct ids,
BlobIDList id_list_, double beta_ )
: type( type_ ),
timestep( timestep_ ),
Averages( Averages_ ),
blob_ids( ids ),
id_list( id_list_ ),
beta( beta_ )
{
}
~AnalysisWorkItem() {}
virtual void run() {
virtual void run()
{
Averages.NumberComponents_NWP = blob_ids->first;
Averages.Label_NWP = blob_ids->second;
Averages.Label_NWP_map = *id_list;
@ -316,6 +376,7 @@ public:
PROFILE_STOP( "Compute dist", 1 );
}
}
private:
AnalysisWorkItem();
AnalysisType type;
@ -330,12 +391,19 @@ private:
class TCATWorkItem : public ThreadPool::WorkItemRet<void>
{
public:
TCATWorkItem( AnalysisType type_, int timestep_, TwoPhase& Averages_,
BlobIDstruct ids, BlobIDList id_list_, double beta_ ):
type(type_), timestep(timestep_), Averages(Averages_),
blob_ids(ids), id_list(id_list_), beta(beta_) { }
TCATWorkItem( AnalysisType type_, int timestep_, TwoPhase &Averages_, BlobIDstruct ids,
BlobIDList id_list_, double beta_ )
: type( type_ ),
timestep( timestep_ ),
Averages( Averages_ ),
blob_ids( ids ),
id_list( id_list_ ),
beta( beta_ )
{
}
~TCATWorkItem() {}
virtual void run() {
virtual void run()
{
Averages.NumberComponents_NWP = blob_ids->first;
Averages.Label_NWP = blob_ids->second;
Averages.Label_NWP_map = *id_list;
@ -358,6 +426,7 @@ public:
PROFILE_STOP( "Compute TCAT", 1 );
}
}
private:
TCATWorkItem();
AnalysisType type;
@ -373,11 +442,18 @@ class GanglionTrackingWorkItem: public ThreadPool::WorkItemRet<void>
{
public:
GanglionTrackingWorkItem( AnalysisType type_, int timestep_, TwoPhase &Averages_,
BlobIDstruct ids, BlobIDList id_list_, double beta_ ):
type(type_), timestep(timestep_), Averages(Averages_),
blob_ids(ids), id_list(id_list_), beta(beta_) { }
BlobIDstruct ids, BlobIDList id_list_, double beta_ )
: type( type_ ),
timestep( timestep_ ),
Averages( Averages_ ),
blob_ids( ids ),
id_list( id_list_ ),
beta( beta_ )
{
}
~GanglionTrackingWorkItem() {}
virtual void run() {
virtual void run()
{
Averages.NumberComponents_NWP = blob_ids->first;
Averages.Label_NWP = blob_ids->second;
Averages.Label_NWP_map = *id_list;
@ -400,6 +476,7 @@ public:
PROFILE_STOP( "Compute ganglion", 1 );
}
}
private:
GanglionTrackingWorkItem();
AnalysisType type;
@ -414,10 +491,13 @@ private:
class BasicWorkItem : public ThreadPool::WorkItemRet<void>
{
public:
BasicWorkItem( AnalysisType type_, int timestep_, SubPhase& Averages_ ):
type(type_), timestep(timestep_), Averages(Averages_){ }
BasicWorkItem( AnalysisType type_, int timestep_, SubPhase &Averages_ )
: type( type_ ), timestep( timestep_ ), Averages( Averages_ )
{
}
~BasicWorkItem() {}
virtual void run() {
virtual void run()
{
if ( matches( type, AnalysisType::CopyPhaseIndicator ) ) {
// Averages.ColorToSignedDistance(beta,Averages.Phase,Averages.Phase_tplus);
@ -428,6 +508,7 @@ public:
PROFILE_STOP( "Compute basic averages", 1 );
}
}
private:
BasicWorkItem();
AnalysisType type;
@ -439,16 +520,20 @@ private:
class SubphaseWorkItem : public ThreadPool::WorkItemRet<void>
{
public:
SubphaseWorkItem( AnalysisType type_, int timestep_, SubPhase& Averages_ ):
type(type_), timestep(timestep_), Averages(Averages_){ }
SubphaseWorkItem( AnalysisType type_, int timestep_, SubPhase &Averages_ )
: type( type_ ), timestep( timestep_ ), Averages( Averages_ )
{
}
~SubphaseWorkItem() {}
virtual void run() {
virtual void run()
{
PROFILE_START( "Compute subphase", 1 );
Averages.Full();
Averages.Write( timestep );
PROFILE_STOP( "Compute subphase", 1 );
}
private:
SubphaseWorkItem();
AnalysisType type;
@ -458,20 +543,16 @@ private:
};
/******************************************************************
* MPI comm wrapper for use with analysis *
******************************************************************/
runAnalysis::commWrapper::commWrapper( int tag_, const Utilities::MPI& comm_, runAnalysis* analysis_ ):
comm(comm_),
tag(tag_),
analysis(analysis_)
runAnalysis::commWrapper::commWrapper(
int tag_, const Utilities::MPI &comm_, runAnalysis *analysis_ )
: comm( comm_ ), tag( tag_ ), analysis( analysis_ )
{
}
runAnalysis::commWrapper::commWrapper( commWrapper &&rhs ):
comm(rhs.comm),
tag(rhs.tag),
analysis(rhs.analysis)
runAnalysis::commWrapper::commWrapper( commWrapper &&rhs )
: comm( rhs.comm ), tag( rhs.tag ), analysis( rhs.analysis )
{
rhs.tag = -1;
}
@ -507,14 +588,10 @@ runAnalysis::commWrapper runAnalysis::getComm( )
/******************************************************************
* Constructor/Destructors *
******************************************************************/
runAnalysis::runAnalysis( std::shared_ptr<Database> input_db,
const RankInfoStruct& rank_info,
std::shared_ptr<ScaLBL_Communicator> ScaLBL_Comm,
std::shared_ptr<Domain> Dm,
int Np,
bool Regular,
IntArray Map ):
d_Np( Np ),
runAnalysis::runAnalysis( std::shared_ptr<Database> input_db, const RankInfoStruct &rank_info,
std::shared_ptr<ScaLBL_Communicator> ScaLBL_Comm, std::shared_ptr<Domain> Dm, int Np,
bool Regular, IntArray Map )
: d_Np( Np ),
d_regular( Regular ),
d_rank_info( rank_info ),
d_Map( Map ),
@ -568,7 +645,8 @@ runAnalysis::runAnalysis( std::shared_ptr<Database> input_db,
d_meshData.resize( 1 );
d_meshData[0].meshName = "domain";
d_meshData[0].mesh = std::make_shared<IO::DomainMesh>( d_rank_info,d_n[0],d_n[1],d_n[2],Dm->Lx,Dm->Ly,Dm->Lz );
d_meshData[0].mesh = std::make_shared<IO::DomainMesh>(
d_rank_info, d_n[0], d_n[1], d_n[2], Dm->Lx, Dm->Ly, Dm->Lz );
auto PhaseVar = std::make_shared<IO::Variable>();
auto PressVar = std::make_shared<IO::Variable>();
auto VxVar = std::make_shared<IO::Variable>();
@ -678,7 +756,8 @@ void runAnalysis::createThreads( const std::string& method, int N_threads )
// Check if we have thread support
auto thread_support = Utilities::MPI::queryThreadSupport();
if ( thread_support != Utilities::MPI::ThreadSupport::MULTIPLE && N_threads > 0 )
std::cerr << "Warning: Failed to start MPI with necessary thread support, errors may occur\n";
std::cerr
<< "Warning: Failed to start MPI with necessary thread support, errors may occur\n";
// Create the threads
const auto cores = d_tpool.getProcessAffinity();
if ( N_threads == 0 ) {
@ -758,12 +837,11 @@ AnalysisType runAnalysis::computeAnalysisType( int timestep )
}
/******************************************************************
* Run the analysis *
******************************************************************/
void runAnalysis::run(int timestep, std::shared_ptr<Database> input_db, TwoPhase& Averages, const double *Phi,
double *Pressure, double *Velocity, double *fq, double *Den)
void runAnalysis::run( int timestep, std::shared_ptr<Database> input_db, TwoPhase &Averages,
const double *Phi, double *Pressure, double *Velocity, double *fq, double *Den )
{
int N = d_N[0] * d_N[1] * d_N[2];
NULL_USE( N );
@ -794,7 +872,7 @@ void runAnalysis::run(int timestep, std::shared_ptr<Database> input_db, TwoPhase
matches(type,AnalysisType::CopySimState) ||
matches(type,AnalysisType::IdentifyBlobs) )
{
phase = std::shared_ptr<DoubleArray>(new DoubleArray(d_N[0],d_N[1],d_N[2]));
phase = std::make_shared<DoubleArray>(d_N[0],d_N[1],d_N[2]);
//ScaLBL_CopyToHost(phase->data(),Phi,N*sizeof(double));
// try 2 d_ScaLBL_Comm.RegulLayout(d_Map,Phi,Averages.Phase);
// memcpy(Averages.Phase.data(),phase->data(),N*sizeof(double));
@ -862,8 +940,8 @@ void runAnalysis::run(int timestep, std::shared_ptr<Database> input_db, TwoPhase
// if ( matches(type,AnalysisType::CreateRestart) ) {
if ( timestep % d_restart_interval == 0 ) {
// Copy restart data to the CPU
cDen = std::shared_ptr<double>(new double[2*d_Np],DeleteArray<double>);
cfq = std::shared_ptr<double>(new double[19*d_Np],DeleteArray<double>);
cDen = make_shared_array<double>( 2 * d_Np );
cfq = make_shared_array<double>( 19 * d_Np );
ScaLBL_CopyToHost( cfq.get(), fq, 19 * d_Np * sizeof( double ) );
ScaLBL_CopyToHost( cDen.get(), Den, 2 * d_Np * sizeof( double ) );
}
@ -871,15 +949,15 @@ void runAnalysis::run(int timestep, std::shared_ptr<Database> input_db, TwoPhase
// Spawn threads to do blob identification work
if ( matches( type, AnalysisType::IdentifyBlobs ) ) {
phase = std::shared_ptr<DoubleArray>(new DoubleArray(d_N[0],d_N[1],d_N[2]));
phase = std::make_shared<DoubleArray>( d_N[0], d_N[1], d_N[2] );
if ( d_regular )
d_ScaLBL_Comm->RegularLayout( d_Map, Phi, *phase );
else
ScaLBL_CopyToHost( phase->data(), Phi, N * sizeof( double ) );
BlobIDstruct new_index(new std::pair<int,IntArray>(0,IntArray()));
BlobIDstruct new_ids(new std::pair<int,IntArray>(0,IntArray()));
BlobIDList new_list(new std::vector<BlobIDType>());
auto new_index = std::make_shared<std::pair<int, IntArray>>( 0, IntArray() );
auto new_ids = std::make_shared<std::pair<int, IntArray>>( 0, IntArray() );
auto new_list = std::make_shared<std::vector<BlobIDType>>();
auto work1 = new BlobIdentificationWorkItem1( timestep, d_N[0], d_N[1], d_N[2], d_rank_info,
phase, Averages.SDs, d_last_ids, new_index, new_ids, new_list, getComm() );
auto work2 = new BlobIdentificationWorkItem2( timestep, d_N[0], d_N[1], d_N[2], d_rank_info,
@ -896,7 +974,8 @@ void runAnalysis::run(int timestep, std::shared_ptr<Database> input_db, TwoPhase
// if (timestep%d_restart_interval==0){
// if ( matches(type,AnalysisType::ComputeAverages) ) {
if ( timestep % d_analysis_interval == 0 ) {
auto work = new AnalysisWorkItem(type,timestep,Averages,d_last_index,d_last_id_map,d_beta);
auto work =
new AnalysisWorkItem( type, timestep, Averages, d_last_index, d_last_id_map, d_beta );
work->add_dependency( d_wait_blobID );
work->add_dependency( d_wait_analysis );
work->add_dependency( d_wait_vis ); // Make sure we are done using analysis before modifying
@ -923,9 +1002,8 @@ void runAnalysis::run(int timestep, std::shared_ptr<Database> input_db, TwoPhase
// if ( matches(type,AnalysisType::CreateRestart) ) {
if ( timestep % d_restart_interval == 0 ) {
// Write the vis files
commWrapper comm = getComm();
fillHalo<double> fillData( comm.comm, d_rank_info, d_n, {1,1,1}, 0, 1 );
auto work = new WriteVisWorkItem( timestep, d_meshData, Averages, fillData, std::move( comm ) );
auto work =
new WriteVisWorkItem( timestep, d_meshData, Averages, d_n, d_rank_info, getComm() );
work->add_dependency( d_wait_blobID );
work->add_dependency( d_wait_analysis );
work->add_dependency( d_wait_vis );
@ -938,7 +1016,8 @@ void runAnalysis::run(int timestep, std::shared_ptr<Database> input_db, TwoPhase
/******************************************************************
* Run the analysis *
******************************************************************/
void runAnalysis::basic(int timestep, std::shared_ptr<Database> input_db, SubPhase &Averages, const double *Phi, double *Pressure, double *Velocity, double *fq, double *Den)
void runAnalysis::basic( int timestep, std::shared_ptr<Database> input_db, SubPhase &Averages,
const double *Phi, double *Pressure, double *Velocity, double *fq, double *Den )
{
int Nx = d_N[0];
int Ny = d_N[1];
@ -998,7 +1077,8 @@ void runAnalysis::basic(int timestep, std::shared_ptr<Database> input_db, SubPha
// if ( matches(type,AnalysisType::ComputeAverages) ) {
if ( timestep % d_analysis_interval == 0 ) {
auto work = new BasicWorkItem( type, timestep, Averages );
work->add_dependency(d_wait_subphase); // Make sure we are done using analysis before modifying
work->add_dependency(
d_wait_subphase ); // Make sure we are done using analysis before modifying
work->add_dependency( d_wait_analysis );
work->add_dependency( d_wait_vis );
d_wait_analysis = d_tpool.add_work( work );
@ -1006,7 +1086,8 @@ void runAnalysis::basic(int timestep, std::shared_ptr<Database> input_db, SubPha
if ( timestep % d_subphase_analysis_interval == 0 ) {
auto work = new SubphaseWorkItem( type, timestep, Averages );
work->add_dependency(d_wait_subphase); // Make sure we are done using analysis before modifying
work->add_dependency(
d_wait_subphase ); // Make sure we are done using analysis before modifying
work->add_dependency( d_wait_analysis );
work->add_dependency( d_wait_vis );
d_wait_subphase = d_tpool.add_work( work );
@ -1015,8 +1096,8 @@ void runAnalysis::basic(int timestep, std::shared_ptr<Database> input_db, SubPha
if ( timestep % d_restart_interval == 0 ) {
std::shared_ptr<double> cfq, cDen;
// Copy restart data to the CPU
cDen = std::shared_ptr<double>(new double[2*d_Np],DeleteArray<double>);
cfq = std::shared_ptr<double>(new double[19*d_Np],DeleteArray<double>);
cDen = make_shared_array<double>( 2 * d_Np );
cfq = make_shared_array<double>( 19 * d_Np );
ScaLBL_CopyToHost( cfq.get(), fq, 19 * d_Np * sizeof( double ) );
ScaLBL_CopyToHost( cDen.get(), Den, 2 * d_Np * sizeof( double ) );
@ -1027,20 +1108,17 @@ void runAnalysis::basic(int timestep, std::shared_ptr<Database> input_db, SubPha
std::ofstream OutStream( "Restart.db" );
input_db->print( OutStream, "" );
OutStream.close();
}
// Write the restart file (using a seperate thread)
auto work1 = new WriteRestartWorkItem( d_restartFile.c_str(), cDen, cfq, d_Np );
work1->add_dependency( d_wait_restart );
d_wait_restart = d_tpool.add_work( work1 );
}
if ( timestep % d_visualization_interval == 0 ) {
// Write the vis files
commWrapper comm = getComm();
fillHalo<double> fillData( comm.comm, d_rank_info, {Nx-2,Ny-2,Nz-2}, {1,1,1}, 0, 1 );
auto work = new IOWorkItem( timestep, input_db, d_meshData, Averages, fillData, std::move( comm ) );
auto work =
new IOWorkItem( timestep, input_db, d_meshData, Averages, d_n, d_rank_info, getComm() );
work->add_dependency( d_wait_analysis );
work->add_dependency( d_wait_subphase );
work->add_dependency( d_wait_vis );
@ -1050,7 +1128,9 @@ void runAnalysis::basic(int timestep, std::shared_ptr<Database> input_db, SubPha
PROFILE_STOP( "basic" );
}
void runAnalysis::WriteVisData(int timestep, std::shared_ptr<Database> input_db, SubPhase &Averages, const double *Phi, double *Pressure, double *Velocity, double *fq, double *Den)
void runAnalysis::WriteVisData( int timestep, std::shared_ptr<Database> input_db,
SubPhase &Averages, const double *Phi, double *Pressure, double *Velocity, double *fq,
double *Den )
{
auto color_db = input_db->getDatabase( "Color" );
auto vis_db = input_db->getDatabase( "Visualization" );
@ -1073,9 +1153,8 @@ void runAnalysis::WriteVisData(int timestep, std::shared_ptr<Database> input_db,
PROFILE_START( "write vis", 1 );
// if (Averages.WriteVis == true){
commWrapper comm = getComm();
fillHalo<double> fillData( comm.comm, d_rank_info, d_n, {1,1,1}, 0, 1 );
auto work2 = new IOWorkItem(timestep, input_db, d_meshData, Averages, fillData, std::move( comm ) );
auto work2 =
new IOWorkItem( timestep, input_db, d_meshData, Averages, d_n, d_rank_info, getComm() );
work2->add_dependency( d_wait_vis );
d_wait_vis = d_tpool.add_work( work2 );

View File

@ -1,31 +1,36 @@
#ifndef RunAnalysis_H_INC
#define RunAnalysis_H_INC
#include "analysis/analysis.h"
#include "analysis/TwoPhase.h"
#include "analysis/SubPhase.h"
#include "analysis/TwoPhase.h"
#include "analysis/analysis.h"
#include "common/Communication.h"
#include "common/ScaLBL.h"
#include "threadpool/thread_pool.h"
#include <limits.h>
typedef std::shared_ptr<std::pair<int,IntArray>> BlobIDstruct;
typedef std::shared_ptr<std::vector<BlobIDType>> BlobIDList;
// Types of analysis
enum class AnalysisType : uint64_t { AnalyzeNone=0, IdentifyBlobs=0x01, CopyPhaseIndicator=0x02,
CopySimState=0x04, ComputeAverages=0x08, CreateRestart=0x10, WriteVis=0x20, ComputeSubphase=0x40 };
enum class AnalysisType : uint64_t {
AnalyzeNone = 0,
IdentifyBlobs = 0x01,
CopyPhaseIndicator = 0x02,
CopySimState = 0x04,
ComputeAverages = 0x08,
CreateRestart = 0x10,
WriteVis = 0x20,
ComputeSubphase = 0x40
};
//! Class to run the analysis in multiple threads
class runAnalysis
{
public:
//! Constructor
runAnalysis( std::shared_ptr<Database> db, const RankInfoStruct &rank_info,
std::shared_ptr<ScaLBL_Communicator> ScaLBL_Comm, std::shared_ptr <Domain> dm, int Np, bool Regular, IntArray Map );
std::shared_ptr<ScaLBL_Communicator> ScaLBL_Comm, std::shared_ptr<Domain> dm, int Np,
bool Regular, IntArray Map );
//! Destructor
~runAnalysis();
@ -34,8 +39,10 @@ public:
void run( int timestep, std::shared_ptr<Database> db, TwoPhase &Averages, const double *Phi,
double *Pressure, double *Velocity, double *fq, double *Den );
void basic( int timestep, std::shared_ptr<Database> db, SubPhase &Averages, const double *Phi, double *Pressure, double *Velocity, double *fq, double *Den );
void WriteVisData(int timestep, std::shared_ptr<Database> vis_db, SubPhase &Averages, const double *Phi, double *Pressure, double *Velocity, double *fq, double *Den);
void basic( int timestep, std::shared_ptr<Database> db, SubPhase &Averages, const double *Phi,
double *Pressure, double *Velocity, double *fq, double *Den );
void WriteVisData( int timestep, std::shared_ptr<Database> vis_db, SubPhase &Averages,
const double *Phi, double *Pressure, double *Velocity, double *fq, double *Den );
//! Finish all active analysis
void finish();
@ -44,7 +51,8 @@ public:
* \brief Set the affinities
* \details This function will create the analysis threads and set the affinity
* of this thread and all analysis threads. If MPI_THREAD_MULTIPLE is not
* enabled, the analysis threads will be disabled and the analysis will run in the current thread.
* enabled, the analysis threads will be disabled and the analysis will run in the current
* thread.
* @param[in] method Method used to control the affinities:
* none - Don't use threads (runs all analysis in the current thread)
* default - Create the specified number of threads, but don't load balance
@ -57,14 +65,12 @@ public:
private:
runAnalysis();
// Determine the analysis to perform
AnalysisType computeAnalysisType( int timestep );
public:
class commWrapper
{
public:
@ -83,9 +89,8 @@ public:
commWrapper getComm();
private:
std::array<int, 3> d_n; // Number of local cells
std::array<int,3> d_N; // NNumber of local cells with ghosts
std::array<int, 3> d_N; // Number of local cells with ghosts
int d_Np;
int d_rank;
int d_restart_interval, d_analysis_interval, d_blobid_interval, d_visualization_interval;
@ -95,9 +100,9 @@ private:
ThreadPool d_tpool;
RankInfoStruct d_rank_info;
IntArray d_Map;
BlobIDstruct d_last_ids;
BlobIDstruct d_last_index;
BlobIDList d_last_id_map;
std::shared_ptr<std::pair<int, IntArray>> d_last_ids;
std::shared_ptr<std::pair<int, IntArray>> d_last_index;
std::shared_ptr<std::vector<BlobIDType>> d_last_id_map;
std::vector<IO::MeshDataStruct> d_meshData;
std::string d_restartFile;
Utilities::MPI d_comm;
@ -114,8 +119,6 @@ private:
// Friends
friend commWrapper::~commWrapper();
};
#endif

View File

@ -67,6 +67,10 @@ public:
//! Destructor
~fillHalo( );
fillHalo() = delete;
fillHalo(const fillHalo&) = delete;
fillHalo& operator=(const fillHalo&) = delete;
/*!
* @brief Communicate the halos
* @param[in] array The array on which we fill the halos
@ -93,9 +97,6 @@ private:
TYPE *mem;
TYPE *send[3][3][3], *recv[3][3][3];
MPI_Request send_req[3][3][3], recv_req[3][3][3];
fillHalo(); // Private empty constructor
fillHalo(const fillHalo&); // Private copy constructor
fillHalo& operator=(const fillHalo&); // Private assignment operator
void pack( const Array<TYPE>& array, int i, int j, int k, TYPE *buffer );
void unpack( Array<TYPE>& array, int i, int j, int k, const TYPE *buffer );
};

View File

@ -558,17 +558,13 @@ void Domain::Decomp( const std::string& Filename )
int64_t z_transition_size = (nprocz*nz - (global_Nz - zStart))/2;
if (z_transition_size < 0) z_transition_size=0;
char LocalRankFilename[40];
char *loc_id;
loc_id = new char [(nx+2)*(ny+2)*(nz+2)];
// Set up the sub-domains
if (RANK==0){
printf("Distributing subdomains across %i processors \n",nprocs);
printf("Process grid: %i x %i x %i \n",nprocx,nprocy,nprocz);
printf("Subdomain size: %i x %i x %i \n",nx,ny,nz);
printf("Size of transition region: %ld \n", z_transition_size);
auto loc_id = new char [(nx+2)*(ny+2)*(nz+2)];
for (int kp=0; kp<nprocz; kp++){
for (int jp=0; jp<nprocy; jp++){
for (int ip=0; ip<nprocx; ip++){
@ -609,6 +605,7 @@ void Domain::Decomp( const std::string& Filename )
Comm.send(loc_id,N,rnk,15);
}
// Write the data for this rank data
char LocalRankFilename[40];
sprintf(LocalRankFilename,"ID.%05i",rnk+rank_offset);
FILE *ID = fopen(LocalRankFilename,"wb");
fwrite(loc_id,1,(nx+2)*(ny+2)*(nz+2),ID);
@ -616,9 +613,8 @@ void Domain::Decomp( const std::string& Filename )
}
}
}
}
else{
delete [] loc_id;
} else {
// Recieve the subdomain from rank = 0
//printf("Ready to recieve data %i at process %i \n", N,rank);
Comm.recv(id.data(),N,0,15);
@ -645,6 +641,7 @@ void Domain::Decomp( const std::string& Filename )
porosity = sum*iVol_global;
if (rank()==0) printf("Media porosity = %f \n",porosity);
//.........................................................
delete [] SegData;
}
void Domain::AggregateLabels( const std::string& filename ){
@ -669,8 +666,7 @@ void Domain::AggregateLabels( const std::string& filename ){
int local_size = (nx-2)*(ny-2)*(nz-2);
long int full_size = long(full_nx)*long(full_ny)*long(full_nz);
signed char *LocalID;
LocalID = new signed char [local_size];
auto LocalID = new signed char [local_size];
//printf("aggregate labels: local size=%i, global size = %i",local_size, full_size);
// assign the ID for the local sub-region
@ -687,8 +683,7 @@ void Domain::AggregateLabels( const std::string& filename ){
// populate the FullID
if (rank() == 0){
signed char *FullID;
FullID = new signed char [full_size];
auto FullID = new signed char [full_size];
// first handle local ID for rank 0
for (int k=1; k<nz-1; k++){
for (int j=1; j<ny-1; j++){
@ -727,6 +722,7 @@ void Domain::AggregateLabels( const std::string& filename ){
FILE *OUTFILE = fopen(filename.c_str(),"wb");
fwrite(FullID,1,full_size,OUTFILE);
fclose(OUTFILE);
delete [] FullID;
}
else{
// send LocalID to rank=0
@ -734,8 +730,8 @@ void Domain::AggregateLabels( const std::string& filename ){
int dstrank = 0;
Comm.send(LocalID,local_size,dstrank,tag);
}
delete [] LocalID;
Comm.barrier();
}
/********************************************************
@ -1232,11 +1228,10 @@ void Domain::ReadFromFile(const std::string& Filename,const std::string& Datatyp
// user needs to modify the input file accordingly before LBPM simulator read
// the input file.
//........................................................................................
int rank_offset = 0;
int RANK = rank();
int nprocs, nprocx, nprocy, nprocz, nx, ny, nz;
int64_t global_Nx,global_Ny,global_Nz;
int64_t i,j,k,n;
int64_t i,j,k;
//TODO These offset we may still need them
int64_t xStart,yStart,zStart;
xStart=yStart=zStart=0;
@ -1393,7 +1388,6 @@ void Domain::AggregateLabels( const std::string& filename, DoubleArray &UserData
for (int k=1; k<nz-1; k++){
for (int j=1; j<ny-1; j++){
for (int i=1; i<nx-1; i++){
int n = k*nx*ny+j*nx+i;
double local_id_val = UserData(i,j,k);
LocalID[(k-1)*(nx-2)*(ny-2) + (j-1)*(nx-2) + i-1] = local_id_val;
}

View File

@ -401,7 +401,6 @@ MPI_CLASS::MPI_CLASS()
communicator = MPI_CLASS_COMM_NULL;
d_maxTag = mpi_max_tag;
#endif
d_ranks = nullptr;
d_count = nullptr;
d_manage = false;
comm_rank = 0;
@ -435,8 +434,6 @@ void MPI_CLASS::reset()
++N_MPI_Comm_destroyed;
#endif
}
if ( d_ranks != nullptr )
delete[] d_ranks;
delete d_count;
}
if ( d_currentTag == nullptr ) {
@ -448,7 +445,6 @@ void MPI_CLASS::reset()
}
d_manage = false;
d_count = nullptr;
d_ranks = nullptr;
comm_rank = 0;
comm_size = 1;
d_maxTag = 0;
@ -467,7 +463,6 @@ MPI_CLASS::MPI_CLASS( const MPI_CLASS &comm )
d_manage( comm.d_manage ),
comm_rank( comm.comm_rank ),
comm_size( comm.comm_size ),
d_ranks( comm.d_ranks ),
d_maxTag( comm.d_maxTag ),
d_currentTag( comm.d_currentTag )
{
@ -490,7 +485,6 @@ MPI_CLASS::MPI_CLASS( MPI_CLASS &&rhs ) : MPI_CLASS()
std::swap( profile_level, rhs.profile_level );
std::swap( comm_rank, rhs.comm_rank );
std::swap( comm_size, rhs.comm_size );
std::swap( d_ranks, rhs.d_ranks );
std::swap( d_maxTag, rhs.d_maxTag );
std::swap( d_currentTag, rhs.d_currentTag );
std::swap( d_count, rhs.d_count );
@ -511,7 +505,6 @@ MPI_CLASS &MPI_CLASS::operator=( const MPI_CLASS &comm )
this->communicator = comm.communicator;
this->comm_rank = comm.comm_rank;
this->comm_size = comm.comm_size;
this->d_ranks = comm.d_ranks;
this->d_isNull = comm.d_isNull;
this->d_manage = comm.d_manage;
this->d_maxTag = comm.d_maxTag;
@ -537,7 +530,6 @@ MPI_CLASS &MPI_CLASS::operator=( MPI_CLASS &&rhs )
std::swap( profile_level, rhs.profile_level );
std::swap( comm_rank, rhs.comm_rank );
std::swap( comm_size, rhs.comm_size );
std::swap( d_ranks, rhs.d_ranks );
std::swap( d_maxTag, rhs.d_maxTag );
std::swap( d_currentTag, rhs.d_currentTag );
std::swap( d_count, rhs.d_count );
@ -560,7 +552,6 @@ std::atomic_int d_global_count_self = { 1 };
MPI_CLASS::MPI_CLASS( MPI_Comm comm, bool manage )
{
d_count = nullptr;
d_ranks = nullptr;
d_manage = false;
tmp_alignment = -1;
// Check if we are using our version of comm_world
@ -623,11 +614,7 @@ MPI_CLASS::MPI_CLASS( MPI_Comm comm, bool manage )
}
if ( d_manage )
++N_MPI_Comm_created;
// Create d_ranks
if ( comm_size > 1 ) {
d_ranks = new int[comm_size];
d_ranks[0] = -1;
}
#else
// We are not using MPI, intialize based on the communicator
NULL_USE( manage );
@ -663,34 +650,32 @@ MPI_CLASS::MPI_CLASS( MPI_Comm comm, bool manage )
************************************************************************/
std::vector<int> MPI_CLASS::globalRanks() const
{
// Get my global rank if it has not been set
static int myGlobalRank = -1;
if ( myGlobalRank == -1 ) {
#ifdef USE_MPI
if ( MPI_active() )
MPI_Comm_rank( MPI_CLASS_COMM_WORLD, &myGlobalRank );
#else
myGlobalRank = 0;
#endif
}
// Check if we are dealing with a serial or null communicator
if ( comm_size == 1 )
return std::vector<int>( 1, myGlobalRank );
if ( d_ranks == nullptr || communicator == MPI_COMM_NULL )
if ( d_isNull )
return std::vector<int>();
// Fill d_ranks if necessary
if ( d_ranks[0] == -1 ) {
if ( communicator == MPI_CLASS_COMM_WORLD ) {
for ( int i = 0; i < comm_size; i++ )
d_ranks[i] = i;
} else {
MPI_ASSERT( myGlobalRank != -1 );
this->allGather( myGlobalRank, d_ranks );
#ifdef USE_MPI
// Get my global rank and size if it has not been set
static int globalRank = -1;
static int globalSize = -1;
if ( globalRank == -1 && MPI_active() ) {
MPI_Comm_rank( MPI_CLASS_COMM_WORLD, &globalRank );
MPI_Comm_size( MPI_CLASS_COMM_WORLD, &globalSize );
}
// Check if we are dealing with a serial or global communicator
if ( comm_size == 1 )
return std::vector<int>( 1, globalRank );
if ( comm_size == globalSize ) {
std::vector<int> ranks( globalSize );
for ( int i = 0; i < globalSize; i++ )
ranks[i] = i;
return ranks;
}
// Return d_ranks
return std::vector<int>( d_ranks, d_ranks + comm_size );
// Get the global rank from each rank in the communicator
auto ranks = allGather( globalRank );
std::sort( ranks.begin(), ranks.end() );
return ranks;
#else
return std::vector<int>( 1, 1 );
#endif
}
@ -2806,7 +2791,6 @@ MPI_Request MPI_CLASS::IrecvBytes(
}
/************************************************************************
* sendrecv *
************************************************************************/
@ -2816,9 +2800,8 @@ void MPI_CLASS::sendrecv<char>( const char* sendbuf, int sendcount, int dest, in
char *recvbuf, int recvcount, int source, int recvtag ) const
{
PROFILE_START( "sendrecv<char>", profile_level );
MPI_Sendrecv( sendbuf, sendcount, MPI_CHAR, dest, sendtag,
recvbuf, recvcount, MPI_CHAR, source, recvtag,
communicator, MPI_STATUS_IGNORE );
MPI_Sendrecv( sendbuf, sendcount, MPI_CHAR, dest, sendtag, recvbuf, recvcount, MPI_CHAR, source,
recvtag, communicator, MPI_STATUS_IGNORE );
PROFILE_STOP( "sendrecv<char>", profile_level );
}
template<>
@ -2826,9 +2809,8 @@ void MPI_CLASS::sendrecv<int>( const int* sendbuf, int sendcount, int dest, int
int *recvbuf, int recvcount, int source, int recvtag ) const
{
PROFILE_START( "sendrecv<int>", profile_level );
MPI_Sendrecv( sendbuf, sendcount, MPI_INT, dest, sendtag,
recvbuf, recvcount, MPI_INT, source, recvtag,
communicator, MPI_STATUS_IGNORE );
MPI_Sendrecv( sendbuf, sendcount, MPI_INT, dest, sendtag, recvbuf, recvcount, MPI_INT, source,
recvtag, communicator, MPI_STATUS_IGNORE );
PROFILE_STOP( "sendrecv<int>", profile_level );
}
template<>
@ -2836,9 +2818,8 @@ void MPI_CLASS::sendrecv<float>( const float* sendbuf, int sendcount, int dest,
float *recvbuf, int recvcount, int source, int recvtag ) const
{
PROFILE_START( "sendrecv<float>", profile_level );
MPI_Sendrecv( sendbuf, sendcount, MPI_FLOAT, dest, sendtag,
recvbuf, recvcount, MPI_FLOAT, source, recvtag,
communicator, MPI_STATUS_IGNORE );
MPI_Sendrecv( sendbuf, sendcount, MPI_FLOAT, dest, sendtag, recvbuf, recvcount, MPI_FLOAT,
source, recvtag, communicator, MPI_STATUS_IGNORE );
PROFILE_STOP( "sendrecv<float>", profile_level );
}
template<>
@ -2846,9 +2827,8 @@ void MPI_CLASS::sendrecv<double>( const double* sendbuf, int sendcount, int dest
double *recvbuf, int recvcount, int source, int recvtag ) const
{
PROFILE_START( "sendrecv<double>", profile_level );
MPI_Sendrecv( sendbuf, sendcount, MPI_DOUBLE, dest, sendtag,
recvbuf, recvcount, MPI_DOUBLE, source, recvtag,
communicator, MPI_STATUS_IGNORE );
MPI_Sendrecv( sendbuf, sendcount, MPI_DOUBLE, dest, sendtag, recvbuf, recvcount, MPI_DOUBLE,
source, recvtag, communicator, MPI_STATUS_IGNORE );
PROFILE_STOP( "sendrecv<double>", profile_level );
}
#endif
@ -3828,4 +3808,3 @@ MPI MPI::loadBalance( double local, std::vector<double> work )
} // namespace Utilities

View File

@ -8,10 +8,14 @@ Copyright (c) 2012 UT-Battelle, LLC
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Collection of administrative costs for redistribution of the source code or binary form is allowed. However, collection of a royalty or other fee in excess of good faith amount for cost recovery for such redistribution is prohibited.
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met: Redistributions of source code must retain the above
copyright notice, this list of conditions and the following disclaimer. Redistributions in binary
form must reproduce the above copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the distribution. Collection of
administrative costs for redistribution of the source code or binary form is allowed. However,
collection of a royalty or other fee in excess of good faith amount for cost recovery for such
redistribution is prohibited.
*/
@ -274,6 +278,8 @@ public: // Member functions
* \brief Return the global ranks for the comm
* \details This returns a vector which contains the global ranks for each
* member of the communicator. The global ranks are defined according to WORLD comm.
* Note: this function is a blocking collective on the current communicator
* (unless the current communicator is global, self, or null)
*/
std::vector<int> globalRanks() const;
@ -796,7 +802,8 @@ public: // Member functions
* @brief This function sends and recieves data using a blocking call
*/
template<class type>
void sendrecv( const type *sendbuf, int sendcount, int dest, int sendtag, type *recvbuf, int recvcount, int source, int recvtag ) const;
void sendrecv( const type *sendbuf, int sendcount, int dest, int sendtag, type *recvbuf,
int recvcount, int source, int recvtag ) const;
/*!
@ -1126,9 +1133,6 @@ private: // data members
// The rank and size of the communicator
int comm_rank, comm_size;
// The ranks of the comm in the global comm
mutable int *volatile d_ranks;
// Some attributes
int d_maxTag;
int *volatile d_currentTag;

View File

@ -306,9 +306,99 @@ ScaLBL_Communicator::ScaLBL_Communicator(std::shared_ptr <Domain> Dm){
}
ScaLBL_Communicator::~ScaLBL_Communicator(){
// destrutor does nothing (bad idea)
// -- note that there needs to be a way to free memory allocated on the device!!!
ScaLBL_Communicator::~ScaLBL_Communicator()
{
ScaLBL_FreeDeviceMemory( sendbuf_x );
ScaLBL_FreeDeviceMemory( sendbuf_X );
ScaLBL_FreeDeviceMemory( sendbuf_y );
ScaLBL_FreeDeviceMemory( sendbuf_Y );
ScaLBL_FreeDeviceMemory( sendbuf_z );
ScaLBL_FreeDeviceMemory( sendbuf_Z );
ScaLBL_FreeDeviceMemory( sendbuf_xy );
ScaLBL_FreeDeviceMemory( sendbuf_xY );
ScaLBL_FreeDeviceMemory( sendbuf_Xy );
ScaLBL_FreeDeviceMemory( sendbuf_XY );
ScaLBL_FreeDeviceMemory( sendbuf_xz );
ScaLBL_FreeDeviceMemory( sendbuf_xZ );
ScaLBL_FreeDeviceMemory( sendbuf_Xz );
ScaLBL_FreeDeviceMemory( sendbuf_XZ );
ScaLBL_FreeDeviceMemory( sendbuf_yz );
ScaLBL_FreeDeviceMemory( sendbuf_yZ );
ScaLBL_FreeDeviceMemory( sendbuf_Yz );
ScaLBL_FreeDeviceMemory( sendbuf_YZ );
ScaLBL_FreeDeviceMemory( recvbuf_x );
ScaLBL_FreeDeviceMemory( recvbuf_X );
ScaLBL_FreeDeviceMemory( recvbuf_y );
ScaLBL_FreeDeviceMemory( recvbuf_Y );
ScaLBL_FreeDeviceMemory( recvbuf_z );
ScaLBL_FreeDeviceMemory( recvbuf_Z );
ScaLBL_FreeDeviceMemory( recvbuf_xy );
ScaLBL_FreeDeviceMemory( recvbuf_xY );
ScaLBL_FreeDeviceMemory( recvbuf_Xy );
ScaLBL_FreeDeviceMemory( recvbuf_XY );
ScaLBL_FreeDeviceMemory( recvbuf_xz );
ScaLBL_FreeDeviceMemory( recvbuf_xZ );
ScaLBL_FreeDeviceMemory( recvbuf_Xz );
ScaLBL_FreeDeviceMemory( recvbuf_XZ );
ScaLBL_FreeDeviceMemory( recvbuf_yz );
ScaLBL_FreeDeviceMemory( recvbuf_yZ );
ScaLBL_FreeDeviceMemory( recvbuf_Yz );
ScaLBL_FreeDeviceMemory( recvbuf_YZ );
ScaLBL_FreeDeviceMemory( dvcSendList_x );
ScaLBL_FreeDeviceMemory( dvcSendList_X );
ScaLBL_FreeDeviceMemory( dvcSendList_y );
ScaLBL_FreeDeviceMemory( dvcSendList_Y );
ScaLBL_FreeDeviceMemory( dvcSendList_z );
ScaLBL_FreeDeviceMemory( dvcSendList_Z );
ScaLBL_FreeDeviceMemory( dvcSendList_xy );
ScaLBL_FreeDeviceMemory( dvcSendList_xY );
ScaLBL_FreeDeviceMemory( dvcSendList_Xy );
ScaLBL_FreeDeviceMemory( dvcSendList_XY );
ScaLBL_FreeDeviceMemory( dvcSendList_xz );
ScaLBL_FreeDeviceMemory( dvcSendList_xZ );
ScaLBL_FreeDeviceMemory( dvcSendList_Xz );
ScaLBL_FreeDeviceMemory( dvcSendList_XZ );
ScaLBL_FreeDeviceMemory( dvcSendList_yz );
ScaLBL_FreeDeviceMemory( dvcSendList_yZ );
ScaLBL_FreeDeviceMemory( dvcSendList_Yz );
ScaLBL_FreeDeviceMemory( dvcSendList_YZ );
ScaLBL_FreeDeviceMemory( dvcRecvList_x );
ScaLBL_FreeDeviceMemory( dvcRecvList_X );
ScaLBL_FreeDeviceMemory( dvcRecvList_y );
ScaLBL_FreeDeviceMemory( dvcRecvList_Y );
ScaLBL_FreeDeviceMemory( dvcRecvList_z );
ScaLBL_FreeDeviceMemory( dvcRecvList_Z );
ScaLBL_FreeDeviceMemory( dvcRecvList_xy );
ScaLBL_FreeDeviceMemory( dvcRecvList_xY );
ScaLBL_FreeDeviceMemory( dvcRecvList_Xy );
ScaLBL_FreeDeviceMemory( dvcRecvList_XY );
ScaLBL_FreeDeviceMemory( dvcRecvList_xz );
ScaLBL_FreeDeviceMemory( dvcRecvList_xZ );
ScaLBL_FreeDeviceMemory( dvcRecvList_Xz );
ScaLBL_FreeDeviceMemory( dvcRecvList_XZ );
ScaLBL_FreeDeviceMemory( dvcRecvList_yz );
ScaLBL_FreeDeviceMemory( dvcRecvList_yZ );
ScaLBL_FreeDeviceMemory( dvcRecvList_Yz );
ScaLBL_FreeDeviceMemory( dvcRecvList_YZ );
ScaLBL_FreeDeviceMemory( dvcRecvDist_x );
ScaLBL_FreeDeviceMemory( dvcRecvDist_X );
ScaLBL_FreeDeviceMemory( dvcRecvDist_y );
ScaLBL_FreeDeviceMemory( dvcRecvDist_Y );
ScaLBL_FreeDeviceMemory( dvcRecvDist_z );
ScaLBL_FreeDeviceMemory( dvcRecvDist_Z );
ScaLBL_FreeDeviceMemory( dvcRecvDist_xy );
ScaLBL_FreeDeviceMemory( dvcRecvDist_xY );
ScaLBL_FreeDeviceMemory( dvcRecvDist_Xy );
ScaLBL_FreeDeviceMemory( dvcRecvDist_XY );
ScaLBL_FreeDeviceMemory( dvcRecvDist_xz );
ScaLBL_FreeDeviceMemory( dvcRecvDist_xZ );
ScaLBL_FreeDeviceMemory( dvcRecvDist_Xz );
ScaLBL_FreeDeviceMemory( dvcRecvDist_XZ );
ScaLBL_FreeDeviceMemory( dvcRecvDist_yz );
ScaLBL_FreeDeviceMemory( dvcRecvDist_yZ );
ScaLBL_FreeDeviceMemory( dvcRecvDist_Yz );
ScaLBL_FreeDeviceMemory( dvcRecvDist_YZ );
}
double ScaLBL_Communicator::GetPerformance(int *NeighborList, double *fq, int Np){
/* EACH MPI PROCESS GETS ITS OWN MEASUREMENT*/
@ -394,7 +484,7 @@ int ScaLBL_Communicator::MemoryOptimizedLayoutAA(IntArray &Map, int *neighborLis
int idx,i,j,k,n;
// Check that Map has size matching sub-domain
if (Map.size(0) != Nx)
if ( (int) Map.size(0) != Nx)
ERROR("ScaLBL_Communicator::MemoryOptimizedLayout: Map array dimensions do not match! \n");
// Initialize Map

View File

@ -10,14 +10,31 @@ color lattice boltzmann model
#include <time.h>
ScaLBL_ColorModel::ScaLBL_ColorModel(int RANK, int NP, const Utilities::MPI& COMM):
rank(RANK), nprocs(NP), Restart(0),timestep(0),timestepMax(0),tauA(0),tauB(0),rhoA(0),rhoB(0),alpha(0),beta(0),
Fx(0),Fy(0),Fz(0),flux(0),din(0),dout(0),inletA(0),inletB(0),outletA(0),outletB(0),
Nx(0),Ny(0),Nz(0),N(0),Np(0),nprocx(0),nprocy(0),nprocz(0),BoundaryCondition(0),Lx(0),Ly(0),Lz(0),comm(COMM)
rank(RANK), nprocs(NP), Restart(0), timestep(0), timestepMax(0),
tauA(0), tauB(0), rhoA(0), rhoB(0), alpha(0), beta(0),
Fx(0), Fy(0), Fz(0), flux(0), din(0), dout(0),
inletA(0), inletB(0), outletA(0), outletB(0),
Nx(0), Ny(0), Nz(0), N(0), Np(0), nprocx(0), nprocy(0), nprocz(0),
BoundaryCondition(0), Lx(0), Ly(0), Lz(0), id(nullptr),
NeighborList(nullptr), dvcMap(nullptr), fq(nullptr), Aq(nullptr), Bq(nullptr),
Den(nullptr), Phi(nullptr), ColorGrad(nullptr), Velocity(nullptr), Pressure(nullptr),
comm(COMM)
{
REVERSE_FLOW_DIRECTION = false;
}
ScaLBL_ColorModel::~ScaLBL_ColorModel(){
ScaLBL_ColorModel::~ScaLBL_ColorModel()
{
delete [] id;
ScaLBL_FreeDeviceMemory( NeighborList );
ScaLBL_FreeDeviceMemory( dvcMap );
ScaLBL_FreeDeviceMemory( fq );
ScaLBL_FreeDeviceMemory( Aq );
ScaLBL_FreeDeviceMemory( Bq );
ScaLBL_FreeDeviceMemory( Den );
ScaLBL_FreeDeviceMemory( Phi );
ScaLBL_FreeDeviceMemory( Pressure );
ScaLBL_FreeDeviceMemory( Velocity );
ScaLBL_FreeDeviceMemory( ColorGrad );
}
/*void ScaLBL_ColorModel::WriteCheckpoint(const char *FILENAME, const double *cPhi, const double *cfq, int Np)
@ -408,11 +425,13 @@ void ScaLBL_ColorModel::Create(){
// copy the neighbor list
ScaLBL_CopyToDevice(NeighborList, neighborList, neighborSize);
delete [] neighborList;
// initialize phi based on PhaseLabel (include solid component labels)
double *PhaseLabel;
PhaseLabel = new double[N];
AssignComponentLabels(PhaseLabel);
ScaLBL_CopyToDevice(Phi, PhaseLabel, N*sizeof(double));
delete [] PhaseLabel;
}
/********************************************************
@ -1097,7 +1116,6 @@ double ScaLBL_ColorModel::MorphOpenConnected(double target_volume_change){
ScaLBL_CopyToHost(phase.data(), Phi, N*sizeof(double));
// Extract only the connected part of NWP
BlobIDstruct new_index;
double vF=0.0; double vS=0.0;
ComputeGlobalBlobIDs(nx-2,ny-2,nz-2,Dm->rank_info,phase,Averages->SDs,vF,vS,phase_label,Dm->Comm);
comm.barrier();
@ -1334,7 +1352,6 @@ double ScaLBL_ColorModel::MorphInit(const double beta, const double target_delta
double volume_connected = 0.0;
double second_biggest = 0.0;
if (USE_CONNECTED_NWP){
BlobIDstruct new_index;
ComputeGlobalBlobIDs(Nx-2,Ny-2,Nz-2,rank_info,phase,Averages->SDs,vF,vS,phase_label,comm);
comm.barrier();

View File

@ -1,7 +1,7 @@
// Test reading/writing netcdf files
#include "IO/netcdf.h"
#include "common/MPI_Helpers.h"
#include "common/MPI.h"
#include "common/Communication.h"
#include "common/UnitTest.h"
@ -13,7 +13,8 @@ void load( const std::string& );
void test_NETCDF( UnitTest& ut )
{
const int rank = comm_rank( MPI_COMM_WORLD );
Utilities::MPI comm( MPI_COMM_WORLD );
int rank = comm.getRank();
int nprocx = 2;
int nprocy = 2;
int nprocz = 2;
@ -26,11 +27,11 @@ void test_NETCDF( UnitTest& ut )
size_t z = info.kz*data.size(2);
const char* filename = "test.nc";
std::vector<int> dim = { (int) data.size(0)*nprocx, (int) data.size(1)*nprocy, (int) data.size(2)*nprocz };
int fid = netcdf::open( filename, netcdf::CREATE, MPI_COMM_WORLD );
int fid = netcdf::open( filename, netcdf::CREATE, comm );
auto dims = netcdf::defDim( fid, {"X", "Y", "Z"}, dim );
netcdf::write( fid, "tmp", dims, data, info );
netcdf::close( fid );
MPI_Barrier( MPI_COMM_WORLD );
comm.barrier();
// Read the contents of the file we created
fid = netcdf::open( filename, netcdf::READ );
Array<float> tmp = netcdf::getVar<float>( fid, "tmp" );

View File

@ -1,13 +1,13 @@
#include <exception>
#include <fstream>
#include <iostream>
#include <stdexcept>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <iostream>
#include <exception>
#include <stdexcept>
#include <fstream>
#include "models/ColorModel.h"
#include "common/Utilities.h"
#include "models/ColorModel.h"
//#define WRE_SURFACES
@ -24,22 +24,18 @@
int main( int argc, char **argv )
{
// Initialize MPI
// Initialize
Utilities::startup( argc, argv );
Utilities::MPI comm( MPI_COMM_WORLD );
int rank = comm.getRank();
int nprocs = comm.getSize();
// Load the input database
auto db = std::make_shared<Database>( argv[1] );
// Initialize MPI and error handlers
auto multiple = db->getWithDefault<bool>( "MPI_THREAD_MULTIPLE", true );
//Utilities::startup( argc, argv, multiple );
//Utilities::MPI::changeProfileLevel( 1 );
{ // Limit scope so variables that contain communicators will free before MPI_Finialize
Utilities::MPI comm( MPI_COMM_WORLD );
int rank = comm.getRank();
int nprocs = comm.getSize();
if ( rank == 0 ) {
printf( "********************************************************\n" );
printf( "Running Color LBM \n" );
@ -63,7 +59,8 @@ int main(int argc, char **argv)
ColorModel.ReadParams( filename );
ColorModel.SetDomain();
ColorModel.ReadInput();
ColorModel.Create(); // creating the model will create data structure to match the pore structure and allocate variables
ColorModel.Create(); // creating the model will create data structure to match the pore
// structure and allocate variables
ColorModel.Initialize(); // initializing the model will set initial conditions for variables
ColorModel.Run();
// ColorModel.WriteDebug();
@ -78,4 +75,5 @@ int main(int argc, char **argv)
} // Limit scope so variables that contain communicators will free before MPI_Finialize
Utilities::shutdown();
return 0;
}

View File

@ -128,7 +128,6 @@ int main(int argc, char **argv)
comm.barrier();
// Extract only the connected part of NWP
BlobIDstruct new_index;
double vF=0.0; double vS=0.0;
ComputeGlobalBlobIDs(nx-2,ny-2,nz-2,Dm->rank_info,phase,SignDist,vF,vS,phase_label,Dm->Comm);
Dm->Comm.barrier();

View File

@ -14,7 +14,7 @@
#include "common/Array.h"
#include "common/Domain.h"
#include "common/Communication.h"
#include "common/MPI_Helpers.h"
#include "common/MPI.h"
#include "IO/MeshDatabase.h"
#include "IO/Mesh.h"
#include "IO/Writer.h"
@ -192,7 +192,7 @@ int main(int argc, char **argv)
fillFloat[0]->fill( LOCVOL[0] );
}
netcdf::close( fid );
MPI_Barrier(comm);
comm.barrier();
PROFILE_STOP("ReadVolume");
if (rank==0) printf("Read complete\n");
@ -255,15 +255,15 @@ int main(int argc, char **argv)
}
}
}
count_plus=sumReduce( Dm[0]->Comm, count_plus);
count_minus=sumReduce( Dm[0]->Comm, count_minus);
count_plus = Dm[0]->Comm.sumReduce( count_plus);
count_minus = Dm[0]->Comm.sumReduce( count_minus);
if (rank==0) printf("minimum value=%f, max value=%f \n",min_value,max_value);
if (rank==0) printf("plus=%i, minus=%i \n",count_plus,count_minus);
ASSERT( count_plus > 0 && count_minus > 0 );
MPI_Barrier(comm);
mean_plus = sumReduce( Dm[0]->Comm, mean_plus ) / count_plus;
mean_minus = sumReduce( Dm[0]->Comm, mean_minus ) / count_minus;
MPI_Barrier(comm);
comm.barrier();
mean_plus = Dm[0]->Comm.sumReduce( mean_plus ) / count_plus;
mean_minus = Dm[0]->Comm.sumReduce( mean_minus ) / count_minus;
comm.barrier();
if (rank==0) printf(" Region 1 mean (+): %f, Region 2 mean (-): %f \n",mean_plus, mean_minus);
//if (rank==0) printf("Scale the input data (size = %i) \n",LOCVOL[0].length());
@ -284,7 +284,7 @@ int main(int argc, char **argv)
// Fill the source data for the coarse meshes
if (rank==0) printf("Coarsen the mesh for N_levels=%i \n",N_levels);
MPI_Barrier(comm);
comm.barrier();
PROFILE_START("CoarsenMesh");
for (int i=1; i<N_levels; i++) {
Array<float> filter(ratio[0],ratio[1],ratio[2]);
@ -300,7 +300,7 @@ int main(int argc, char **argv)
printf(" filter_x=%i, filter_y=%i, filter_z=%i \n",int(filter.size(0)),int(filter.size(1)),int(filter.size(2)) );
printf(" ratio= %i,%i,%i \n",int(ratio[0]),int(ratio[1]),int(ratio[2]) );
}
MPI_Barrier(comm);
comm.barrier();
}
PROFILE_STOP("CoarsenMesh");
@ -312,7 +312,7 @@ int main(int argc, char **argv)
NonLocalMean.back(), *fillFloat.back(), *Dm.back(), nprocx,
rough_cutoff, lamda, nlm_sigsq, nlm_depth);
PROFILE_STOP("Solve coarse mesh");
MPI_Barrier(comm);
comm.barrier();
// Refine the solution
PROFILE_START("Refine distance");
@ -326,7 +326,7 @@ int main(int argc, char **argv)
rough_cutoff, lamda, nlm_sigsq, nlm_depth);
}
PROFILE_STOP("Refine distance");
MPI_Barrier(comm);
comm.barrier();
// Perform a final filter
PROFILE_START("Filtering final domains");
@ -424,14 +424,14 @@ int main(int argc, char **argv)
meshData[0].vars.push_back(filter_Dist2_var);
fillDouble[0]->copy( filter_Dist2, filter_Dist2_var->data );
#endif
MPI_Barrier(comm);
comm.barrier();
if (rank==0) printf("Writing output \n");
// Write visulization data
IO::writeData( 0, meshData, comm );
if (rank==0) printf("Finished. \n");
// Compute the Minkowski functionals
MPI_Barrier(comm);
comm.barrier();
auto Averages = std::make_shared<Minkowski>(Dm[0]);
Array <char> phase_label(Nx[0]+2,Ny[0]+2,Nz[0]+2);