diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 414a13cf..0a8074a3 100755 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -55,9 +55,8 @@ ADD_LBPM_TEST( TestTopo3D ) ADD_LBPM_TEST( TestFluxBC ) ADD_LBPM_TEST( TestMap ) #ADD_LBPM_TEST( TestMRT ) -ADD_LBPM_TEST( TestColorGrad ) +#ADD_LBPM_TEST( TestColorGrad ) ADD_LBPM_TEST( TestWideHalo ) -#ADD_LBPM_TEST( TestColorGradDFH ) ADD_LBPM_TEST( TestColorGradDFH ) ADD_LBPM_TEST( TestBubbleDFH ../example/Bubble/input.db) #ADD_LBPM_TEST( TestColorMassBounceback ../example/Bubble/input.db) diff --git a/tests/TestColorGrad.cpp b/tests/TestColorGrad.cpp index ac59fc38..9ea243ea 100644 --- a/tests/TestColorGrad.cpp +++ b/tests/TestColorGrad.cpp @@ -3,7 +3,12 @@ // Lattice Boltzmann Simulator for Single Phase Flow in Porous Media // James E. McCLure //************************************************************************* -#include +#include // Initialize MPI + Utilities::startup( argc, argv ); + Utilities::MPI comm( MPI_COMM_WORLD ); + int rank = comm.getRank(); + int nprocs = comm.getSize(); + int check; #include #include #include "common/ScaLBL.h" @@ -21,8 +26,8 @@ int main(int argc, char **argv) // Initialize MPI Utilities::startup( argc, argv ); Utilities::MPI comm( MPI_COMM_WORLD ); - int rank = comm.getRank(); - int nprocs = comm.getSize(); + int rank = comm.getRank(); + int nprocs = comm.getSize(); int check; { // parallel domain size (# of sub-domains) @@ -47,105 +52,13 @@ int main(int argc, char **argv) int Nx,Ny,Nz; int i,j,k,n; int dim = 3; + Nx = Ny = Nz = 32; + Lx = Ly = Lz = 1.0; //if (rank == 0) printf("dim=%d\n",dim); int timestep = 0; int timesteps = 100; int centralNode = 2; - double tauA = 1.0; - double tauB = 1.0; - double rhoA = 1.0; - double rhoB = 1.0; - double alpha = 0.005; - double beta = 0.95; - - double tau = 1.0; - double mu=(tau-0.5)/3.0; - double rlx_setA=1.0/tau; - double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA); - - Fx = Fy = 0.f; - Fz = 0.f; - - if (rank==0){ - //....................................................................... - // Reading the domain information file - //....................................................................... - ifstream domain("Domain.in"); - if (domain.good()){ - domain >> nprocx; - domain >> nprocy; - domain >> nprocz; - domain >> Nx; - domain >> Ny; - domain >> Nz; - domain >> nspheres; - domain >> Lx; - domain >> Ly; - domain >> Lz; - } - else if (nprocs==1){ - nprocx=nprocy=nprocz=1; - Nx=Ny=Nz=3; - nspheres=0; - Lx=Ly=Lz=1; - } - else if (nprocs==2){ - nprocx=2; nprocy=1; - nprocz=1; - Nx=Ny=Nz=dim; - Nx = dim; Ny = dim; Nz = dim; - nspheres=0; - Lx=Ly=Lz=1; - } - else if (nprocs==4){ - nprocx=nprocy=2; - nprocz=1; - Nx=Ny=Nz=dim; - nspheres=0; - Lx=Ly=Lz=1; - } - else if (nprocs==8){ - nprocx=nprocy=nprocz=2; - Nx=Ny=Nz=dim; - nspheres=0; - Lx=Ly=Lz=1; - } - //....................................................................... - } - // ************************************************************** - // Broadcast simulation parameters from rank 0 to all other procs - MPI_Barrier(comm); - //................................................. - MPI_Bcast(&Nx,1,MPI_INT,0,comm); - MPI_Bcast(&Ny,1,MPI_INT,0,comm); - MPI_Bcast(&Nz,1,MPI_INT,0,comm); - MPI_Bcast(&nprocx,1,MPI_INT,0,comm); - MPI_Bcast(&nprocy,1,MPI_INT,0,comm); - MPI_Bcast(&nprocz,1,MPI_INT,0,comm); - MPI_Bcast(&nspheres,1,MPI_INT,0,comm); - MPI_Bcast(&Lx,1,MPI_DOUBLE,0,comm); - MPI_Bcast(&Ly,1,MPI_DOUBLE,0,comm); - MPI_Bcast(&Lz,1,MPI_DOUBLE,0,comm); - //................................................. - MPI_Barrier(comm); - // ************************************************************** - // ************************************************************** - - if (nprocs != nprocx*nprocy*nprocz){ - printf("nprocx = %i \n",nprocx); - printf("nprocy = %i \n",nprocy); - printf("nprocz = %i \n",nprocz); - INSIST(nprocs == nprocx*nprocy*nprocz,"Fatal error in processor count!"); - } - - if (rank==0){ - printf("********************************************************\n"); - printf("Sub-domain size = %i x %i x %i\n",Nx,Ny,Nz); - printf("********************************************************\n"); - } - - MPI_Barrier(comm); double iVol_global = 1.0/Nx/Ny/Nz/nprocx/nprocy/nprocz; int BoundaryCondition=0; @@ -190,7 +103,7 @@ int main(int argc, char **argv) IntArray Map(Nx,Ny,Nz); neighborList= new int[18*Np]; - ScaLBL_Comm.MemoryOptimizedLayoutAA(Map,neighborList,Dm.id,Np); + ScaLBL_Comm.MemoryOptimizedLayoutAA(Map,neighborList,Dm.id,Np,1); MPI_Barrier(comm); //......................device distributions................................. diff --git a/tests/TestWideHalo.cpp b/tests/TestWideHalo.cpp index cc29a15d..767aeaeb 100644 --- a/tests/TestWideHalo.cpp +++ b/tests/TestWideHalo.cpp @@ -8,7 +8,7 @@ #include #include "common/ScaLBL.h" #include "common/WideHalo.h" -#include "common/MPI_Helpers.h" +#include "common/MPI.h" using namespace std; @@ -20,105 +20,55 @@ int main(int argc, char **argv) // ***** MPI STUFF **************** //***************************************** // Initialize MPI - int rank,nprocs; - MPI_Init(&argc,&argv); - MPI_Comm comm = MPI_COMM_WORLD; - MPI_Comm_rank(comm,&rank); - MPI_Comm_size(comm,&nprocs); - int check; + Utilities::startup( argc, argv ); + Utilities::MPI comm( MPI_COMM_WORLD ); + int rank = comm.getRank(); + int nprocs = comm.getSize(); + int check=0; { - // parallel domain size (# of sub-domains) - int nprocx,nprocy,nprocz; - int iproc,jproc,kproc; - if (rank == 0){ printf("********************************************************\n"); printf("Running Color Model: TestColor \n"); printf("********************************************************\n"); } - - // BGK Model parameters - string FILENAME; - unsigned int nBlocks, nthreads; - int timestepMax, interval; - double Fx,Fy,Fz,tol; // Domain variables + int nprocx, nprocy, nprocz; double Lx,Ly,Lz; - int nspheres; int Nx,Ny,Nz; int i,j,k,n; - int dim = 3; - //if (rank == 0) printf("dim=%d\n",dim); - int timestep = 0; - int timesteps = 100; - int centralNode = 2; + int dim = 16; + Lx = Ly = Lz = 1.0; + int BoundaryCondition=0; - double tauA = 1.0; - double tauB = 1.0; - double rhoA = 1.0; - double rhoB = 1.0; - double alpha = 0.005; - double beta = 0.95; - - double tau = 1.0; - double mu=(tau-0.5)/3.0; - double rlx_setA=1.0/tau; - double rlx_setB = 8.f*(2.f-rlx_setA)/(8.f-rlx_setA); - - Fx = Fy = 0.f; - Fz = 0.f; - - if (rank==0){ - //....................................................................... - // Reading the domain information file - //....................................................................... - if (nprocs==1){ - nprocx=nprocy=nprocz=1; - Nx=Ny=Nz=3; - nspheres=0; - Lx=Ly=Lz=1; - } - else if (nprocs==2){ - nprocx=2; nprocy=1; - nprocz=1; - Nx=Ny=Nz=dim; - Nx = dim; Ny = dim; Nz = dim; - nspheres=0; - Lx=Ly=Lz=1; - } - else if (nprocs==4){ - nprocx=nprocy=2; - nprocz=1; - Nx=Ny=Nz=dim; - nspheres=0; - Lx=Ly=Lz=1; - } - else if (nprocs==8){ - nprocx=nprocy=nprocz=2; - Nx=Ny=Nz=dim; - nspheres=0; - Lx=Ly=Lz=1; - } - //....................................................................... + //....................................................................... + // Reading the domain information file + //....................................................................... + nprocx=nprocy=nprocz=1; + if (nprocs==1){ + nprocx=nprocy=nprocz=1; + Nx=Ny=Nz=dim; + Lx=Ly=Lz=1; } - // ************************************************************** - // Broadcast simulation parameters from rank 0 to all other procs - MPI_Barrier(comm); - //................................................. - MPI_Bcast(&Nx,1,MPI_INT,0,comm); - MPI_Bcast(&Ny,1,MPI_INT,0,comm); - MPI_Bcast(&Nz,1,MPI_INT,0,comm); - MPI_Bcast(&nprocx,1,MPI_INT,0,comm); - MPI_Bcast(&nprocy,1,MPI_INT,0,comm); - MPI_Bcast(&nprocz,1,MPI_INT,0,comm); - MPI_Bcast(&nspheres,1,MPI_INT,0,comm); - MPI_Bcast(&Lx,1,MPI_DOUBLE,0,comm); - MPI_Bcast(&Ly,1,MPI_DOUBLE,0,comm); - MPI_Bcast(&Lz,1,MPI_DOUBLE,0,comm); - //................................................. - MPI_Barrier(comm); - // ************************************************************** + else if (nprocs==2){ + nprocx=2; nprocy=1; + nprocz=1; + Nx=Ny=Nz=dim; + Nx = dim; Ny = dim; Nz = dim; + Lx=Ly=Lz=1; + } + else if (nprocs==4){ + nprocx=nprocy=2; + nprocz=1; + Nx=Ny=Nz=dim; + Lx=Ly=Lz=1; + } + else if (nprocs==8){ + nprocx=nprocy=nprocz=2; + Nx=Ny=Nz=dim; + Lx=Ly=Lz=1; + } + //....................................................................... // ************************************************************** if (nprocs != nprocx*nprocy*nprocz){ @@ -134,10 +84,7 @@ int main(int argc, char **argv) printf("********************************************************\n"); } - MPI_Barrier(comm); - - double iVol_global = 1.0/Nx/Ny/Nz/nprocx/nprocy/nprocz; - int BoundaryCondition=0; + comm.barrier(); std::shared_ptr Dm = std::shared_ptr(new Domain(Nx,Ny,Nz,rank,nprocx,nprocy,nprocz,Lx,Ly,Lz,BoundaryCondition)); Nx += 2; @@ -162,7 +109,7 @@ int main(int argc, char **argv) } } Dm->CommInit(); - MPI_Barrier(comm); + comm.barrier(); if (rank == 0) cout << "Domain set." << endl; if (rank==0) printf ("Create ScaLBL_Communicator \n"); @@ -179,12 +126,8 @@ int main(int argc, char **argv) IntArray Map(Nx,Ny,Nz); neighborList= new int[18*Np]; - ScaLBL_Comm.MemoryOptimizedLayoutAA(Map,neighborList,Dm->id,Np,2); - MPI_Barrier(comm); - - //......................device distributions................................. - int dist_mem_size = Np*sizeof(double); - if (rank==0) printf ("Allocating distributions \n"); + ScaLBL_Comm.MemoryOptimizedLayoutAA(Map,neighborList,Dm->id.data(),Np,2); + comm.barrier(); int *NeighborList; int *dvcMap; @@ -241,8 +184,10 @@ int main(int argc, char **argv) CY=COLORGRAD[Np+idx]; CZ=COLORGRAD[2*Np+idx]; double error=sqrt((CX-1.0)*(CX-1.0)+(CY-2.0)*(CY-2.0)+ (CZ-3.0)*(CZ-3.0)); - if (error > 1e-8) + if (error > 1e-8){ + check++; printf("i,j,k=%i,%i,%i: Color gradient=%f,%f,%f \n",i,j,k,CX,CY,CZ); + } } } } @@ -250,8 +195,8 @@ int main(int argc, char **argv) } // **************************************************** - MPI_Barrier(comm); - MPI_Finalize(); + comm.barrier(); + Utilities::shutdown(); // **************************************************** return check;