add wide halo to fom

This commit is contained in:
James McClure 2021-01-15 17:12:38 -05:00
parent 9bc8100a1d
commit c911a4f3d2
3 changed files with 58 additions and 201 deletions

View File

@ -55,9 +55,8 @@ ADD_LBPM_TEST( TestTopo3D )
ADD_LBPM_TEST( TestFluxBC ) ADD_LBPM_TEST( TestFluxBC )
ADD_LBPM_TEST( TestMap ) ADD_LBPM_TEST( TestMap )
#ADD_LBPM_TEST( TestMRT ) #ADD_LBPM_TEST( TestMRT )
ADD_LBPM_TEST( TestColorGrad ) #ADD_LBPM_TEST( TestColorGrad )
ADD_LBPM_TEST( TestWideHalo ) ADD_LBPM_TEST( TestWideHalo )
#ADD_LBPM_TEST( TestColorGradDFH )
ADD_LBPM_TEST( TestColorGradDFH ) ADD_LBPM_TEST( TestColorGradDFH )
ADD_LBPM_TEST( TestBubbleDFH ../example/Bubble/input.db) ADD_LBPM_TEST( TestBubbleDFH ../example/Bubble/input.db)
#ADD_LBPM_TEST( TestColorMassBounceback ../example/Bubble/input.db) #ADD_LBPM_TEST( TestColorMassBounceback ../example/Bubble/input.db)

View File

@ -3,7 +3,12 @@
// Lattice Boltzmann Simulator for Single Phase Flow in Porous Media // Lattice Boltzmann Simulator for Single Phase Flow in Porous Media
// James E. McCLure // James E. McCLure
//************************************************************************* //*************************************************************************
#include <stdio.h> #include <stdio.h> // Initialize MPI
Utilities::startup( argc, argv );
Utilities::MPI comm( MPI_COMM_WORLD );
int rank = comm.getRank();
int nprocs = comm.getSize();
int check;
#include <iostream> #include <iostream>
#include <fstream> #include <fstream>
#include "common/ScaLBL.h" #include "common/ScaLBL.h"
@ -21,8 +26,8 @@ int main(int argc, char **argv)
// Initialize MPI // Initialize MPI
Utilities::startup( argc, argv ); Utilities::startup( argc, argv );
Utilities::MPI comm( MPI_COMM_WORLD ); Utilities::MPI comm( MPI_COMM_WORLD );
int rank = comm.getRank(); int rank = comm.getRank();
int nprocs = comm.getSize(); int nprocs = comm.getSize();
int check; int check;
{ {
// parallel domain size (# of sub-domains) // parallel domain size (# of sub-domains)
@ -47,105 +52,13 @@ int main(int argc, char **argv)
int Nx,Ny,Nz; int Nx,Ny,Nz;
int i,j,k,n; int i,j,k,n;
int dim = 3; int dim = 3;
Nx = Ny = Nz = 32;
Lx = Ly = Lz = 1.0;
//if (rank == 0) printf("dim=%d\n",dim); //if (rank == 0) printf("dim=%d\n",dim);
int timestep = 0; int timestep = 0;
int timesteps = 100; int timesteps = 100;
int centralNode = 2; int centralNode = 2;
double tauA = 1.0;
double tauB = 1.0;
double rhoA = 1.0;
double rhoB = 1.0;
double alpha = 0.005;
double beta = 0.95;
double tau = 1.0;
double mu=(tau-0.5)/3.0;
double rlx_setA=1.0/tau;
double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
Fx = Fy = 0.f;
Fz = 0.f;
if (rank==0){
//.......................................................................
// Reading the domain information file
//.......................................................................
ifstream domain("Domain.in");
if (domain.good()){
domain >> nprocx;
domain >> nprocy;
domain >> nprocz;
domain >> Nx;
domain >> Ny;
domain >> Nz;
domain >> nspheres;
domain >> Lx;
domain >> Ly;
domain >> Lz;
}
else if (nprocs==1){
nprocx=nprocy=nprocz=1;
Nx=Ny=Nz=3;
nspheres=0;
Lx=Ly=Lz=1;
}
else if (nprocs==2){
nprocx=2; nprocy=1;
nprocz=1;
Nx=Ny=Nz=dim;
Nx = dim; Ny = dim; Nz = dim;
nspheres=0;
Lx=Ly=Lz=1;
}
else if (nprocs==4){
nprocx=nprocy=2;
nprocz=1;
Nx=Ny=Nz=dim;
nspheres=0;
Lx=Ly=Lz=1;
}
else if (nprocs==8){
nprocx=nprocy=nprocz=2;
Nx=Ny=Nz=dim;
nspheres=0;
Lx=Ly=Lz=1;
}
//.......................................................................
}
// **************************************************************
// Broadcast simulation parameters from rank 0 to all other procs
MPI_Barrier(comm);
//.................................................
MPI_Bcast(&Nx,1,MPI_INT,0,comm);
MPI_Bcast(&Ny,1,MPI_INT,0,comm);
MPI_Bcast(&Nz,1,MPI_INT,0,comm);
MPI_Bcast(&nprocx,1,MPI_INT,0,comm);
MPI_Bcast(&nprocy,1,MPI_INT,0,comm);
MPI_Bcast(&nprocz,1,MPI_INT,0,comm);
MPI_Bcast(&nspheres,1,MPI_INT,0,comm);
MPI_Bcast(&Lx,1,MPI_DOUBLE,0,comm);
MPI_Bcast(&Ly,1,MPI_DOUBLE,0,comm);
MPI_Bcast(&Lz,1,MPI_DOUBLE,0,comm);
//.................................................
MPI_Barrier(comm);
// **************************************************************
// **************************************************************
if (nprocs != nprocx*nprocy*nprocz){
printf("nprocx = %i \n",nprocx);
printf("nprocy = %i \n",nprocy);
printf("nprocz = %i \n",nprocz);
INSIST(nprocs == nprocx*nprocy*nprocz,"Fatal error in processor count!");
}
if (rank==0){
printf("********************************************************\n");
printf("Sub-domain size = %i x %i x %i\n",Nx,Ny,Nz);
printf("********************************************************\n");
}
MPI_Barrier(comm);
double iVol_global = 1.0/Nx/Ny/Nz/nprocx/nprocy/nprocz; double iVol_global = 1.0/Nx/Ny/Nz/nprocx/nprocy/nprocz;
int BoundaryCondition=0; int BoundaryCondition=0;
@ -190,7 +103,7 @@ int main(int argc, char **argv)
IntArray Map(Nx,Ny,Nz); IntArray Map(Nx,Ny,Nz);
neighborList= new int[18*Np]; neighborList= new int[18*Np];
ScaLBL_Comm.MemoryOptimizedLayoutAA(Map,neighborList,Dm.id,Np); ScaLBL_Comm.MemoryOptimizedLayoutAA(Map,neighborList,Dm.id,Np,1);
MPI_Barrier(comm); MPI_Barrier(comm);
//......................device distributions................................. //......................device distributions.................................

View File

@ -8,7 +8,7 @@
#include <fstream> #include <fstream>
#include "common/ScaLBL.h" #include "common/ScaLBL.h"
#include "common/WideHalo.h" #include "common/WideHalo.h"
#include "common/MPI_Helpers.h" #include "common/MPI.h"
using namespace std; using namespace std;
@ -20,105 +20,55 @@ int main(int argc, char **argv)
// ***** MPI STUFF **************** // ***** MPI STUFF ****************
//***************************************** //*****************************************
// Initialize MPI // Initialize MPI
int rank,nprocs; Utilities::startup( argc, argv );
MPI_Init(&argc,&argv); Utilities::MPI comm( MPI_COMM_WORLD );
MPI_Comm comm = MPI_COMM_WORLD; int rank = comm.getRank();
MPI_Comm_rank(comm,&rank); int nprocs = comm.getSize();
MPI_Comm_size(comm,&nprocs); int check=0;
int check;
{ {
// parallel domain size (# of sub-domains)
int nprocx,nprocy,nprocz;
int iproc,jproc,kproc;
if (rank == 0){ if (rank == 0){
printf("********************************************************\n"); printf("********************************************************\n");
printf("Running Color Model: TestColor \n"); printf("Running Color Model: TestColor \n");
printf("********************************************************\n"); printf("********************************************************\n");
} }
// BGK Model parameters
string FILENAME;
unsigned int nBlocks, nthreads;
int timestepMax, interval;
double Fx,Fy,Fz,tol;
// Domain variables // Domain variables
int nprocx, nprocy, nprocz;
double Lx,Ly,Lz; double Lx,Ly,Lz;
int nspheres;
int Nx,Ny,Nz; int Nx,Ny,Nz;
int i,j,k,n; int i,j,k,n;
int dim = 3; int dim = 16;
//if (rank == 0) printf("dim=%d\n",dim); Lx = Ly = Lz = 1.0;
int timestep = 0; int BoundaryCondition=0;
int timesteps = 100;
int centralNode = 2;
double tauA = 1.0; //.......................................................................
double tauB = 1.0; // Reading the domain information file
double rhoA = 1.0; //.......................................................................
double rhoB = 1.0; nprocx=nprocy=nprocz=1;
double alpha = 0.005; if (nprocs==1){
double beta = 0.95; nprocx=nprocy=nprocz=1;
Nx=Ny=Nz=dim;
double tau = 1.0; Lx=Ly=Lz=1;
double mu=(tau-0.5)/3.0;
double rlx_setA=1.0/tau;
double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA);
Fx = Fy = 0.f;
Fz = 0.f;
if (rank==0){
//.......................................................................
// Reading the domain information file
//.......................................................................
if (nprocs==1){
nprocx=nprocy=nprocz=1;
Nx=Ny=Nz=3;
nspheres=0;
Lx=Ly=Lz=1;
}
else if (nprocs==2){
nprocx=2; nprocy=1;
nprocz=1;
Nx=Ny=Nz=dim;
Nx = dim; Ny = dim; Nz = dim;
nspheres=0;
Lx=Ly=Lz=1;
}
else if (nprocs==4){
nprocx=nprocy=2;
nprocz=1;
Nx=Ny=Nz=dim;
nspheres=0;
Lx=Ly=Lz=1;
}
else if (nprocs==8){
nprocx=nprocy=nprocz=2;
Nx=Ny=Nz=dim;
nspheres=0;
Lx=Ly=Lz=1;
}
//.......................................................................
} }
// ************************************************************** else if (nprocs==2){
// Broadcast simulation parameters from rank 0 to all other procs nprocx=2; nprocy=1;
MPI_Barrier(comm); nprocz=1;
//................................................. Nx=Ny=Nz=dim;
MPI_Bcast(&Nx,1,MPI_INT,0,comm); Nx = dim; Ny = dim; Nz = dim;
MPI_Bcast(&Ny,1,MPI_INT,0,comm); Lx=Ly=Lz=1;
MPI_Bcast(&Nz,1,MPI_INT,0,comm); }
MPI_Bcast(&nprocx,1,MPI_INT,0,comm); else if (nprocs==4){
MPI_Bcast(&nprocy,1,MPI_INT,0,comm); nprocx=nprocy=2;
MPI_Bcast(&nprocz,1,MPI_INT,0,comm); nprocz=1;
MPI_Bcast(&nspheres,1,MPI_INT,0,comm); Nx=Ny=Nz=dim;
MPI_Bcast(&Lx,1,MPI_DOUBLE,0,comm); Lx=Ly=Lz=1;
MPI_Bcast(&Ly,1,MPI_DOUBLE,0,comm); }
MPI_Bcast(&Lz,1,MPI_DOUBLE,0,comm); else if (nprocs==8){
//................................................. nprocx=nprocy=nprocz=2;
MPI_Barrier(comm); Nx=Ny=Nz=dim;
// ************************************************************** Lx=Ly=Lz=1;
}
//.......................................................................
// ************************************************************** // **************************************************************
if (nprocs != nprocx*nprocy*nprocz){ if (nprocs != nprocx*nprocy*nprocz){
@ -134,10 +84,7 @@ int main(int argc, char **argv)
printf("********************************************************\n"); printf("********************************************************\n");
} }
MPI_Barrier(comm); comm.barrier();
double iVol_global = 1.0/Nx/Ny/Nz/nprocx/nprocy/nprocz;
int BoundaryCondition=0;
std::shared_ptr<Domain> Dm = std::shared_ptr<Domain>(new Domain(Nx,Ny,Nz,rank,nprocx,nprocy,nprocz,Lx,Ly,Lz,BoundaryCondition)); std::shared_ptr<Domain> Dm = std::shared_ptr<Domain>(new Domain(Nx,Ny,Nz,rank,nprocx,nprocy,nprocz,Lx,Ly,Lz,BoundaryCondition));
Nx += 2; Nx += 2;
@ -162,7 +109,7 @@ int main(int argc, char **argv)
} }
} }
Dm->CommInit(); Dm->CommInit();
MPI_Barrier(comm); comm.barrier();
if (rank == 0) cout << "Domain set." << endl; if (rank == 0) cout << "Domain set." << endl;
if (rank==0) printf ("Create ScaLBL_Communicator \n"); if (rank==0) printf ("Create ScaLBL_Communicator \n");
@ -179,12 +126,8 @@ int main(int argc, char **argv)
IntArray Map(Nx,Ny,Nz); IntArray Map(Nx,Ny,Nz);
neighborList= new int[18*Np]; neighborList= new int[18*Np];
ScaLBL_Comm.MemoryOptimizedLayoutAA(Map,neighborList,Dm->id,Np,2); ScaLBL_Comm.MemoryOptimizedLayoutAA(Map,neighborList,Dm->id.data(),Np,2);
MPI_Barrier(comm); comm.barrier();
//......................device distributions.................................
int dist_mem_size = Np*sizeof(double);
if (rank==0) printf ("Allocating distributions \n");
int *NeighborList; int *NeighborList;
int *dvcMap; int *dvcMap;
@ -241,8 +184,10 @@ int main(int argc, char **argv)
CY=COLORGRAD[Np+idx]; CY=COLORGRAD[Np+idx];
CZ=COLORGRAD[2*Np+idx]; CZ=COLORGRAD[2*Np+idx];
double error=sqrt((CX-1.0)*(CX-1.0)+(CY-2.0)*(CY-2.0)+ (CZ-3.0)*(CZ-3.0)); double error=sqrt((CX-1.0)*(CX-1.0)+(CY-2.0)*(CY-2.0)+ (CZ-3.0)*(CZ-3.0));
if (error > 1e-8) if (error > 1e-8){
check++;
printf("i,j,k=%i,%i,%i: Color gradient=%f,%f,%f \n",i,j,k,CX,CY,CZ); printf("i,j,k=%i,%i,%i: Color gradient=%f,%f,%f \n",i,j,k,CX,CY,CZ);
}
} }
} }
} }
@ -250,8 +195,8 @@ int main(int argc, char **argv)
} }
// **************************************************** // ****************************************************
MPI_Barrier(comm); comm.barrier();
MPI_Finalize(); Utilities::shutdown();
// **************************************************** // ****************************************************
return check; return check;