Merge branch 'master' of github.com:JamesEMcClure/LBPM-WIA
This commit is contained in:
commit
95d4322d82
@ -71,7 +71,8 @@ static std::vector<IO::MeshDatabase> writeMeshesOrigFormat( const std::vector<IO
|
||||
ERROR("Unknown mesh");
|
||||
}
|
||||
fclose(fid);
|
||||
std::unique(mesh_entry.variables.begin(),mesh_entry.variables.end());
|
||||
std::sort( mesh_entry.variables.begin(), mesh_entry.variables.end() );
|
||||
mesh_entry.variables.erase( std::unique( mesh_entry.variables.begin(), mesh_entry.variables.end() ), mesh_entry.variables.end() );
|
||||
meshes_written.push_back(mesh_entry);
|
||||
}
|
||||
return meshes_written;
|
||||
|
@ -339,9 +339,8 @@ static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_
|
||||
neighbors.push_back( rank_info.rank[1][2][1] );
|
||||
neighbors.push_back( rank_info.rank[1][1][0] );
|
||||
neighbors.push_back( rank_info.rank[1][1][2] );
|
||||
std::unique(neighbors.begin(),neighbors.end());
|
||||
if ( std::find(neighbors.begin(),neighbors.end(),rank) != neighbors.end() )
|
||||
neighbors.erase(std::find(neighbors.begin(),neighbors.end(),rank));
|
||||
std::sort( neighbors.begin(), neighbors.end() );
|
||||
neighbors.erase( std::unique( neighbors.begin(), neighbors.end() ), neighbors.end() );
|
||||
// Create a map of all local ids to the neighbor ids
|
||||
std::map<int64_t,global_id_info_struct> map;
|
||||
std::set<int64_t> local;
|
||||
|
@ -1,6 +1,7 @@
|
||||
# Copy files for the tests
|
||||
ADD_LBPM_EXECUTABLE( lbpm_permeability_simulator )
|
||||
ADD_LBPM_EXECUTABLE( lbpm_color_simulator )
|
||||
ADD_LBPM_EXECUTABLE( lbpm_color_macro_simulator )
|
||||
ADD_LBPM_EXECUTABLE( lbpm_sphere_pp )
|
||||
ADD_LBPM_EXECUTABLE( lbpm_random_pp )
|
||||
ADD_LBPM_EXECUTABLE( lbpm_morphdrain_pp )
|
||||
|
1003
tests/lbpm_color_macro_simulator.cpp
Normal file
1003
tests/lbpm_color_macro_simulator.cpp
Normal file
File diff suppressed because it is too large
Load Diff
@ -61,18 +61,19 @@ public:
|
||||
std::shared_ptr<const DoubleArray> phase_, const DoubleArray& dist_,
|
||||
BlobIDstruct last_id_, BlobIDstruct new_index_, BlobIDstruct new_id_, BlobIDList new_list_ ):
|
||||
timestep(timestep_), Nx(Nx_), Ny(Ny_), Nz(Nz_), rank_info(rank_info_),
|
||||
phase(phase_), dist(dist_), last_id(last_id_), new_index(new_index_), new_id(new_id_), new_list(new_list_) { }
|
||||
phase(phase_), dist(dist_), last_id(last_id_), new_index(new_index_), new_id(new_id_), new_list(new_list_)
|
||||
{
|
||||
MPI_Comm_dup(MPI_COMM_WORLD,&newcomm);
|
||||
}
|
||||
~BlobIdentificationWorkItem1() { MPI_Comm_free(&newcomm); }
|
||||
virtual void run() {
|
||||
ThreadPool::WorkItem::d_state = 1; // Change state to in progress
|
||||
// Compute the global blob id and compare to the previous version
|
||||
PROFILE_START("Identify blobs",1);
|
||||
MPI_Comm newcomm;
|
||||
MPI_Comm_dup(MPI_COMM_WORLD,&newcomm);
|
||||
double vF = 0.0;
|
||||
double vS = -1.0; // one voxel buffer region around solid
|
||||
IntArray& ids = new_index->second;
|
||||
new_index->first = ComputeGlobalBlobIDs(Nx-2,Ny-2,Nz-2,rank_info,*phase,dist,vF,vS,ids,newcomm);
|
||||
MPI_Comm_free(&newcomm);
|
||||
PROFILE_STOP("Identify blobs",1);
|
||||
ThreadPool::WorkItem::d_state = 2; // Change state to finished
|
||||
}
|
||||
@ -85,6 +86,7 @@ private:
|
||||
const DoubleArray& dist;
|
||||
BlobIDstruct last_id, new_index, new_id;
|
||||
BlobIDList new_list;
|
||||
MPI_Comm newcomm;
|
||||
};
|
||||
class BlobIdentificationWorkItem2: public ThreadPool::WorkItem
|
||||
{
|
||||
@ -93,13 +95,15 @@ public:
|
||||
std::shared_ptr<const DoubleArray> phase_, const DoubleArray& dist_,
|
||||
BlobIDstruct last_id_, BlobIDstruct new_index_, BlobIDstruct new_id_, BlobIDList new_list_ ):
|
||||
timestep(timestep_), Nx(Nx_), Ny(Ny_), Nz(Nz_), rank_info(rank_info_),
|
||||
phase(phase_), dist(dist_), last_id(last_id_), new_index(new_index_), new_id(new_id_), new_list(new_list_) { }
|
||||
phase(phase_), dist(dist_), last_id(last_id_), new_index(new_index_), new_id(new_id_), new_list(new_list_)
|
||||
{
|
||||
MPI_Comm_dup(MPI_COMM_WORLD,&newcomm);
|
||||
}
|
||||
~BlobIdentificationWorkItem2() { MPI_Comm_free(&newcomm); }
|
||||
virtual void run() {
|
||||
ThreadPool::WorkItem::d_state = 1; // Change state to in progress
|
||||
// Compute the global blob id and compare to the previous version
|
||||
PROFILE_START("Identify blobs maps",1);
|
||||
MPI_Comm newcomm;
|
||||
MPI_Comm_dup(MPI_COMM_WORLD,&newcomm);
|
||||
const IntArray& ids = new_index->second;
|
||||
static int max_id = -1;
|
||||
new_id->first = new_index->first;
|
||||
@ -118,7 +122,6 @@ public:
|
||||
getNewIDs(map,max_id,*new_list);
|
||||
writeIDMap(map,timestep,id_map_filename);
|
||||
}
|
||||
MPI_Comm_free(&newcomm);
|
||||
PROFILE_STOP("Identify blobs maps",1);
|
||||
ThreadPool::WorkItem::d_state = 2; // Change state to finished
|
||||
}
|
||||
@ -131,6 +134,7 @@ private:
|
||||
const DoubleArray& dist;
|
||||
BlobIDstruct last_id, new_index, new_id;
|
||||
BlobIDList new_list;
|
||||
MPI_Comm newcomm;
|
||||
};
|
||||
|
||||
|
||||
@ -140,7 +144,11 @@ class WriteVisWorkItem: public ThreadPool::WorkItem
|
||||
public:
|
||||
WriteVisWorkItem( int timestep_, std::vector<IO::MeshDataStruct>& visData_,
|
||||
TwoPhase& Avgerages_, fillHalo<double>& fillData_ ):
|
||||
timestep(timestep_), visData(visData_), Averages(Avgerages_), fillData(fillData_) {}
|
||||
timestep(timestep_), visData(visData_), Averages(Avgerages_), fillData(fillData_)
|
||||
{
|
||||
MPI_Comm_dup(MPI_COMM_WORLD,&newcomm);
|
||||
}
|
||||
~WriteVisWorkItem() { MPI_Comm_free(&newcomm); }
|
||||
virtual void run() {
|
||||
ThreadPool::WorkItem::d_state = 1; // Change state to in progress
|
||||
PROFILE_START("Save Vis",1);
|
||||
@ -156,10 +164,7 @@ public:
|
||||
fillData.copy(Averages.Press,PressData);
|
||||
fillData.copy(Averages.SDs,SignData);
|
||||
fillData.copy(Averages.Label_NWP,BlobData);
|
||||
MPI_Comm newcomm;
|
||||
MPI_Comm_dup(MPI_COMM_WORLD,&newcomm);
|
||||
IO::writeData( timestep, visData, 2, newcomm );
|
||||
MPI_Comm_free(&newcomm);
|
||||
PROFILE_STOP("Save Vis",1);
|
||||
ThreadPool::WorkItem::d_state = 2; // Change state to finished
|
||||
};
|
||||
@ -169,8 +174,10 @@ private:
|
||||
std::vector<IO::MeshDataStruct>& visData;
|
||||
TwoPhase& Averages;
|
||||
fillHalo<double>& fillData;
|
||||
MPI_Comm newcomm;
|
||||
};
|
||||
|
||||
|
||||
// Helper class to run the analysis from within a thread
|
||||
// Note: Averages will be modified after the constructor is called
|
||||
class AnalysisWorkItem: public ThreadPool::WorkItem
|
||||
@ -180,6 +187,7 @@ public:
|
||||
BlobIDstruct ids, BlobIDList id_list_, double beta_ ):
|
||||
type(type_), timestep(timestep_), Averages(Averages_),
|
||||
blob_ids(ids), id_list(id_list_), beta(beta_) { }
|
||||
~AnalysisWorkItem() { }
|
||||
virtual void run() {
|
||||
ThreadPool::WorkItem::d_state = 1; // Change state to in progress
|
||||
Averages.NumberComponents_NWP = blob_ids->first;
|
||||
@ -243,7 +251,7 @@ void run_analysis( int timestep, int restart_interval,
|
||||
// Identify blobs and update global ids in time
|
||||
type = static_cast<AnalysisType>( type | IdentifyBlobs );
|
||||
}
|
||||
#ifdef USE_CUDA
|
||||
/* #ifdef USE_CUDA
|
||||
if ( tpool.getQueueSize()<=3 && tpool.getNumThreads()>0 && timestep%50==0 ) {
|
||||
// Keep a few blob identifications queued up to keep the processors busy,
|
||||
// allowing us to track the blobs as fast as possible
|
||||
@ -251,7 +259,7 @@ void run_analysis( int timestep, int restart_interval,
|
||||
type = static_cast<AnalysisType>( type | IdentifyBlobs );
|
||||
}
|
||||
#endif
|
||||
|
||||
*/
|
||||
if ( timestep%ANALYSIS_INTERVAL == 0 ) {
|
||||
// Copy the averages to the CPU (and identify blobs)
|
||||
type = static_cast<AnalysisType>( type | CopySimState );
|
||||
@ -369,7 +377,7 @@ void run_analysis( int timestep, int restart_interval,
|
||||
// Retain the timestep associated with the restart files
|
||||
if (rank==0){
|
||||
FILE *Rst = fopen("Restart.txt","w");
|
||||
fprintf(Rst,"%i\n",timestep);
|
||||
fprintf(Rst,"%i\n",timestep+5);
|
||||
fclose(Rst);
|
||||
}
|
||||
// Write the restart file (using a seperate thread)
|
||||
|
@ -228,8 +228,8 @@ int main(int argc, char **argv)
|
||||
printf("Force(x) = %f \n", Fx);
|
||||
printf("Force(y) = %f \n", Fy);
|
||||
printf("Force(z) = %f \n", Fz);
|
||||
printf("Sub-domain size = %i x %i x %i\n",Nz,Nz,Nz);
|
||||
printf("Parallel domain size = %i x %i x %i\n",nprocx,nprocy,nprocz);
|
||||
printf("Sub-domain size = %i x %i x %i\n",Nx,Ny,Nz);
|
||||
printf("Process grid = %i x %i x %i\n",nprocx,nprocy,nprocz);
|
||||
printf("********************************************************\n");
|
||||
}
|
||||
|
||||
@ -246,8 +246,7 @@ int main(int argc, char **argv)
|
||||
|
||||
MPI_Barrier(comm);
|
||||
|
||||
Nz += 2;
|
||||
//Nx = Ny = Nz; // Cubic domain
|
||||
Nx += 2; Ny += 2; Nz += 2;
|
||||
|
||||
int N = Nx*Ny*Nz;
|
||||
int dist_mem_size = N*sizeof(double);
|
||||
|
@ -18,18 +18,22 @@ int main(int argc, char **argv)
|
||||
// Initialize MPI
|
||||
int rank, nprocs;
|
||||
MPI_Init(&argc,&argv);
|
||||
MPI_Comm comm = MPI_COMM_WORLD;
|
||||
|
||||
MPI_Comm comm = MPI_COMM_WORLD;
|
||||
MPI_Comm_rank(comm,&rank);
|
||||
MPI_Comm_size(comm,&nprocs);
|
||||
|
||||
|
||||
int SOLID=atoi(argv[1]);
|
||||
int NWP=atoi(argv[2]);
|
||||
if (rank==0){
|
||||
printf("Solid Label %i \n",SOLID);
|
||||
printf("NWP Label %i \n",NWP);
|
||||
}
|
||||
{
|
||||
|
||||
int SOLID=atoi(argv[1]);
|
||||
int NWP=atoi(argv[2]);
|
||||
//char NWP,SOLID;
|
||||
//SOLID=argv[1][0];
|
||||
//NWP=argv[2][0];
|
||||
if (rank==0){
|
||||
printf("Solid Label: %i \n",SOLID);
|
||||
printf("NWP Label: %i \n",NWP);
|
||||
}
|
||||
|
||||
//.......................................................................
|
||||
// Reading the domain information file
|
||||
//.......................................................................
|
||||
@ -190,22 +194,15 @@ int main(int argc, char **argv)
|
||||
nx+=2; ny+=2; nz+=2;
|
||||
N=nx*ny*nz;
|
||||
|
||||
//if (rank==0) printf("WARNING: assumed that INPUT: WP(1),NWP(2) OUTPUT will be NWP(1),WP(2) (lbpm_segmented_decomp) \n");
|
||||
if (rank==0) printf("All sub-domains recieved \n");
|
||||
// Really need a better way to do this -- this is to flip convention for a particular data set
|
||||
for (k=0;k<nz;k++){
|
||||
for (j=0;j<ny;j++){
|
||||
for (i=0;i<nx;i++){
|
||||
n = k*nx*ny+j*nx+i;
|
||||
if (Dm.id[n]==char(SOLID)) Dm.id[n] = 0;
|
||||
else if (Dm.id[n]==char(NWP)) Dm.id[n] = 1;
|
||||
else Dm.id[n] = 2;
|
||||
else if (Dm.id[n]==char(NWP)) Dm.id[n] = 1;
|
||||
else Dm.id[n] = 2;
|
||||
|
||||
// if (Dm.id[n] == 1) Dm.id[n]=2;
|
||||
//else if (Dm.id[n] == 2) Dm.id[n]=1;
|
||||
// Initialize the solid phase
|
||||
// if (Dm.id[n] == 0) id[n] = 0;
|
||||
// else id[n] = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -231,6 +228,24 @@ int main(int argc, char **argv)
|
||||
float porosity = float(totalGlobal-countGlobal)/totalGlobal;
|
||||
if (rank==0) printf("Porosity=%f\n",porosity);
|
||||
|
||||
count = 0;
|
||||
total = 0;
|
||||
countGlobal = 0;
|
||||
totalGlobal = 0;
|
||||
for (k=1;k<nz-1;k++){
|
||||
for (j=1;j<ny-1;j++){
|
||||
for (i=1;i<nx-1;i++){
|
||||
n=k*nx*ny+j*nx+i;
|
||||
if (Dm.id[n] != 0) total++;
|
||||
if (Dm.id[n] == 2) count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
MPI_Allreduce(&count,&countGlobal,1,MPI_INT,MPI_SUM,comm);
|
||||
MPI_Allreduce(&total,&totalGlobal,1,MPI_INT,MPI_SUM,comm);
|
||||
float saturation = float(countGlobal)/totalGlobal;
|
||||
if (rank==0) printf("wetting phase saturation=%f\n",saturation);
|
||||
|
||||
|
||||
char LocalRankFilename[40];
|
||||
|
||||
@ -265,7 +280,7 @@ int main(int argc, char **argv)
|
||||
// fwrite(SymDist.get(),8,SymDist.length(),SYMDIST);
|
||||
fwrite(symid,1,N,SYMID);
|
||||
fclose(SYMID);
|
||||
}
|
||||
}
|
||||
MPI_Barrier(comm);
|
||||
MPI_Finalize();
|
||||
}
|
||||
|
@ -4,7 +4,7 @@ import csv
|
||||
import glob
|
||||
|
||||
name=sys.argv[1]
|
||||
numpts=int(sys.argv[2])
|
||||
#numpts=int(sys.argv[2])
|
||||
process="drain"
|
||||
cwd=os.getcwd()
|
||||
|
||||
@ -13,7 +13,7 @@ print("Drainage should initialize imbibition!")
|
||||
print("Base name is "+name)
|
||||
|
||||
# Parameters for color LBM
|
||||
tau=0.7
|
||||
tau=0.8
|
||||
alpha=0.005
|
||||
beta=0.95
|
||||
phisol=-1.0
|
||||
@ -25,7 +25,7 @@ Restart=1
|
||||
pBC=3
|
||||
din=1.001
|
||||
dout=0.999
|
||||
maxtime=100005
|
||||
maxtime=2000005
|
||||
interval=50000
|
||||
tolerance=1e-5
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user