db refactr

This commit is contained in:
James E McClure
2018-05-15 16:04:29 -04:00
4 changed files with 171 additions and 685 deletions

View File

@@ -32,13 +32,9 @@ static inline void fgetl( char * str, int num, FILE * stream )
********************************************************/
Domain::Domain( int nx, int ny, int nz, int rnk, int npx, int npy, int npz,
double lx, double ly, double lz, int BC):
Nx(0), Ny(0), Nz(0), iproc(0), jproc(0), nprocx(0), nprocy(0), nprocz(0),
Nx(0), Ny(0), Nz(0),
Lx(0), Ly(0), Lz(0), Volume(0), rank(0), BoundaryCondition(0),
Group(MPI_GROUP_NULL), Comm(MPI_COMM_NULL),
rank_x(0), rank_y(0), rank_z(0), rank_X(0), rank_Y(0), rank_Z(0),
rank_xy(0), rank_XY(0), rank_xY(0), rank_Xy(0),
rank_xz(0), rank_XZ(0), rank_xZ(0), rank_Xz(0),
rank_yz(0), rank_YZ(0), rank_yZ(0), rank_Yz(0),
Comm(MPI_COMM_NULL),
sendCount_x(0), sendCount_y(0), sendCount_z(0), sendCount_X(0), sendCount_Y(0), sendCount_Z(0),
sendCount_xy(0), sendCount_yz(0), sendCount_xz(0), sendCount_Xy(0), sendCount_Yz(0), sendCount_xZ(0),
sendCount_xY(0), sendCount_yZ(0), sendCount_Xz(0), sendCount_XY(0), sendCount_YZ(0), sendCount_XZ(0),
@@ -74,13 +70,9 @@ Domain::Domain( int nx, int ny, int nz, int rnk, int npx, int npy, int npz,
initialize( db );
}
Domain::Domain( std::shared_ptr<Database> db ):
Nx(0), Ny(0), Nz(0), iproc(0), jproc(0), nprocx(0), nprocy(0), nprocz(0),
Nx(0), Ny(0), Nz(0), iproc(0), jproc(0),
Lx(0), Ly(0), Lz(0), Volume(0), rank(0), BoundaryCondition(0),
Group(MPI_GROUP_NULL), Comm(MPI_COMM_NULL),
rank_x(0), rank_y(0), rank_z(0), rank_X(0), rank_Y(0), rank_Z(0),
rank_xy(0), rank_XY(0), rank_xY(0), rank_Xy(0),
rank_xz(0), rank_XZ(0), rank_xZ(0), rank_Xz(0),
rank_yz(0), rank_YZ(0), rank_yZ(0), rank_Yz(0),
Comm(MPI_COMM_NULL),
sendCount_x(0), sendCount_y(0), sendCount_z(0), sendCount_X(0), sendCount_Y(0), sendCount_Z(0),
sendCount_xy(0), sendCount_yz(0), sendCount_xz(0), sendCount_Xy(0), sendCount_Yz(0), sendCount_xZ(0),
sendCount_xY(0), sendCount_yZ(0), sendCount_Xz(0), sendCount_XY(0), sendCount_YZ(0), sendCount_XZ(0),
@@ -115,6 +107,7 @@ void Domain::initialize( std::shared_ptr<Database> db )
auto nproc = d_db->getVector<int>("nproc");
auto n = d_db->getVector<int>("n");
auto L = d_db->getVector<double>("L");
//nspheres = d_db->getScalar<int>("nspheres");
ASSERT( n.size() == 3u );
ASSERT( L.size() == 3u );
ASSERT( nproc.size() == 3u );
@@ -127,17 +120,17 @@ void Domain::initialize( std::shared_ptr<Database> db )
Nx = nx+2;
Ny = ny+2;
Nz = nz+2;
nprocx = nproc[0];
nprocy = nproc[1];
nprocz = nproc[2];
// Initialize ranks
int myrank;
MPI_Comm_rank( MPI_COMM_WORLD, &myrank );
rank_info = RankInfoStruct(myrank,nproc[0],nproc[1],nproc[2]);
// Fill remaining variables
N = Nx*Ny*Nz;
Volume = nx*ny*nx*nprocx*nprocy*nprocz*1.0;
id = new char[N];
memset(id,0,N);
BoundaryCondition = d_db->getScalar<int>("BC");
MPI_Comm_rank( MPI_COMM_WORLD, &rank );
rank_info=RankInfoStruct(rank,nprocx,nprocy,nprocz);
int nprocs;
int nprocs;
MPI_Comm_size( MPI_COMM_WORLD, &nprocs );
INSIST(nprocs == nprocx*nprocy*nprocz,"Fatal error in processor count!");
}
@@ -188,91 +181,27 @@ Domain::~Domain()
// Free id
delete [] id;
// Free the communicator
if ( Group!=MPI_GROUP_NULL ) {
MPI_Group_free(&Group);
if ( Comm != MPI_COMM_WORLD && Comm != MPI_COMM_NULL ) {
MPI_Comm_free(&Comm);
}
}
void Domain::InitializeRanks()
/********************************************************
* Initialize communication *
********************************************************/
void Domain::CommInit(MPI_Comm Communicator)
{
int i,j,k;
kproc = rank/(nprocx*nprocy);
jproc = (rank-nprocx*nprocy*kproc)/nprocx;
iproc = rank-nprocx*nprocy*kproc-nprocx*jproc;
// set up the neighbor ranks
i = iproc;
j = jproc;
k = kproc;
rank_X = getRankForBlock(i+1,j,k);
rank_x = getRankForBlock(i-1,j,k);
rank_Y = getRankForBlock(i,j+1,k);
rank_y = getRankForBlock(i,j-1,k);
rank_Z = getRankForBlock(i,j,k+1);
rank_z = getRankForBlock(i,j,k-1);
rank_XY = getRankForBlock(i+1,j+1,k);
rank_xy = getRankForBlock(i-1,j-1,k);
rank_Xy = getRankForBlock(i+1,j-1,k);
rank_xY = getRankForBlock(i-1,j+1,k);
rank_XZ = getRankForBlock(i+1,j,k+1);
rank_xz = getRankForBlock(i-1,j,k-1);
rank_Xz = getRankForBlock(i+1,j,k-1);
rank_xZ = getRankForBlock(i-1,j,k+1);
rank_YZ = getRankForBlock(i,j+1,k+1);
rank_yz = getRankForBlock(i,j-1,k-1);
rank_Yz = getRankForBlock(i,j+1,k-1);
rank_yZ = getRankForBlock(i,j-1,k+1);
}
void Domain::CommInit(MPI_Comm Communicator){
int i,j,k,n;
int sendtag = 21;
int recvtag = 21;
//......................................................................................
//Get the ranks of each process and it's neighbors
// map the rank to the block index
//iproc = rank%nprocx;
//jproc = (rank/nprocx)%nprocy;
//kproc = rank/(nprocx*nprocy);
MPI_Comm_dup(Communicator,&Comm);
// set up the neighbor ranks
rank_info = RankInfoStruct( rank, nprocx, nprocy, nprocz );
MPI_Barrier(Communicator);
kproc = rank/(nprocx*nprocy);
jproc = (rank-nprocx*nprocy*kproc)/nprocx;
iproc = rank-nprocx*nprocy*kproc-nprocx*jproc;
// set up the neighbor ranks
i = iproc;
j = jproc;
k = kproc;
rank_X = getRankForBlock(i+1,j,k);
rank_x = getRankForBlock(i-1,j,k);
rank_Y = getRankForBlock(i,j+1,k);
rank_y = getRankForBlock(i,j-1,k);
rank_Z = getRankForBlock(i,j,k+1);
rank_z = getRankForBlock(i,j,k-1);
rank_XY = getRankForBlock(i+1,j+1,k);
rank_xy = getRankForBlock(i-1,j-1,k);
rank_Xy = getRankForBlock(i+1,j-1,k);
rank_xY = getRankForBlock(i-1,j+1,k);
rank_XZ = getRankForBlock(i+1,j,k+1);
rank_xz = getRankForBlock(i-1,j,k-1);
rank_Xz = getRankForBlock(i+1,j,k-1);
rank_xZ = getRankForBlock(i-1,j,k+1);
rank_YZ = getRankForBlock(i,j+1,k+1);
rank_yz = getRankForBlock(i,j-1,k-1);
rank_Yz = getRankForBlock(i,j+1,k-1);
rank_yZ = getRankForBlock(i,j-1,k+1);
//......................................................................................
//MPI_Comm_group(Communicator,&Group);
//MPI_Comm_create(Communicator,Group,&Comm);
MPI_Comm_dup(MPI_COMM_WORLD,&Comm);
//......................................................................................
MPI_Request req1[18], req2[18];
@@ -572,6 +501,9 @@ void Domain::CommInit(MPI_Comm Communicator){
}
/********************************************************
* AssignComponentLabels *
********************************************************/
void Domain::AssignComponentLabels(double *phase)
{
int NLABELS=0;
@@ -660,543 +592,6 @@ void Domain::AssignComponentLabels(double *phase)
}
void Domain::TestCommInit(MPI_Comm Communicator){
int i,j,k,n;
int sendtag = 21;
int recvtag = 21;
//......................................................................................
//Get the ranks of each process and it's neighbors
// map the rank to the block index
iproc = rank%nprocx;
jproc = (rank/nprocx)%nprocy;
kproc = rank/(nprocx*nprocy);
// set up the neighbor ranks
i = iproc;
j = jproc;
k = kproc;
MPI_Barrier(MPI_COMM_WORLD);
kproc = rank/(nprocx*nprocy);
jproc = (rank-nprocx*nprocy*kproc)/nprocx;
iproc = rank-nprocx*nprocy*kproc-nprocz*jproc;
if (rank == 0) {
printf("* In Domain::CommInit...\n");
printf("* i,j,k proc=%d %d %d \n",i,j,k);
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank == 1){
printf("* i,j,k proc=%d %d %d \n",i,j,k);
printf("\n\n");
}
if(rank == 0) { printf("* Setting up ranks for each processor...\n");
}
MPI_Barrier(MPI_COMM_WORLD);
rank_X = getRankForBlock(i+1,j,k);
rank_x = getRankForBlock(i-1,j,k);
if (rank ==0 ) printf("rank = %d: rank_X = %d, rank_x = %d \n", rank, rank_X, rank_x);
if (rank ==1 ) printf("rank = %d: rank_X = %d, rank_x = %d \n", rank, rank_X, rank_x);
if (rank ==2 ) printf("rank = %d: rank_X = %d, rank_x = %d \n", rank, rank_X, rank_x);
rank_Y = getRankForBlock(i,j+1,k);
rank_y = getRankForBlock(i,j-1,k);
rank_Z = getRankForBlock(i,j,k+1);
rank_z = getRankForBlock(i,j,k-1);
rank_XY = getRankForBlock(i+1,j+1,k);
rank_xy = getRankForBlock(i-1,j-1,k);
rank_Xy = getRankForBlock(i+1,j-1,k);
rank_xY = getRankForBlock(i-1,j+1,k);
rank_XZ = getRankForBlock(i+1,j,k+1);
rank_xz = getRankForBlock(i-1,j,k-1);
rank_Xz = getRankForBlock(i+1,j,k-1);
rank_xZ = getRankForBlock(i-1,j,k+1);
rank_YZ = getRankForBlock(i,j+1,k+1);
rank_yz = getRankForBlock(i,j-1,k-1);
rank_Yz = getRankForBlock(i,j+1,k-1);
rank_yZ = getRankForBlock(i,j-1,k+1);
//......................................................................................
MPI_Comm_group(Communicator,&Group);
MPI_Comm_create(Communicator,Group,&Comm);
//......................................................................................
MPI_Request req1[18], req2[18];
MPI_Status stat1[18],stat2[18];
//......................................................................................
for (k=1; k<Nz-1; k++){
for (j=1; j<Ny-1; j++){
for (i=1; i<Nx-1; i++){
// Check the phase ID
if (id[k*Nx*Ny+j*Nx+i] != 0){
// Counts for the six faces
if (i==1) sendCount_x++;
if (j==1) sendCount_y++;
if (k==1) sendCount_z++;
if (i==Nx-2) sendCount_X++;
if (j==Ny-2) sendCount_Y++;
if (k==Nz-2) sendCount_Z++;
// Counts for the twelve edges
if (i==1 && j==1) sendCount_xy++;
if (i==1 && j==Ny-2) sendCount_xY++;
if (i==Nx-2 && j==1) sendCount_Xy++;
if (i==Nx-2 && j==Ny-2) sendCount_XY++;
if (i==1 && k==1) sendCount_xz++;
if (i==1 && k==Nz-2) sendCount_xZ++;
if (i==Nx-2 && k==1) sendCount_Xz++;
if (i==Nx-2 && k==Nz-2) sendCount_XZ++;
if (j==1 && k==1) sendCount_yz++;
if (j==1 && k==Nz-2) sendCount_yZ++;
if (j==Ny-2 && k==1) sendCount_Yz++;
if (j==Ny-2 && k==Nz-2) sendCount_YZ++;
}
}
}
}
if (rank == 0) {
printf("* All sendCount_# should be the same across multiple processors for block-type domains except for when solid is in the domain... *\n\n");
printf("* sendCount_x: %d \n",sendCount_x);
printf("* sendCount_y: %d \n",sendCount_y);
printf("* sendCount_z: %d \n",sendCount_z);
printf("* sendCount_X: %d \n",sendCount_X);
printf("* sendCount_Y: %d \n",sendCount_Y);
printf("* sendCount_Z: %d \n",sendCount_Z);
printf("* sendCount_xy:%d \n",sendCount_xy);
printf("* sendCount_xY:%d \n",sendCount_xY);
printf("* sendCount_Xy:%d \n",sendCount_Xy);
printf("* sendCount_XY:%d \n",sendCount_XY);
printf("* sendCount_xz:%d \n",sendCount_xz);
printf("* sendCount_xZ:%d \n",sendCount_xZ);
printf("* sendCount_Xz:%d \n",sendCount_Xz);
printf("* sendCount_XZ:%d \n",sendCount_XZ);
printf("* sendCount_yz:%d \n",sendCount_yz);
printf("* sendCount_yZ:%d \n",sendCount_yZ);
printf("* sendCount_Yz:%d \n",sendCount_Yz);
printf("* sendCount_YZ:%d \n",sendCount_YZ);
printf("\n\n");
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank == 1) {
printf("* sendCount_x: %d \n",sendCount_x);
printf("* sendCount_y: %d \n",sendCount_y);
printf("* sendCount_z: %d \n",sendCount_z);
printf("* sendCount_X: %d \n",sendCount_X);
printf("* sendCount_Y: %d \n",sendCount_Y);
printf("* sendCount_Z: %d \n",sendCount_Z);
printf("* sendCount_xy:%d \n",sendCount_xy);
printf("* sendCount_xY:%d \n",sendCount_xY);
printf("* sendCount_Xy:%d \n",sendCount_Xy);
printf("* sendCount_XY:%d \n",sendCount_XY);
printf("* sendCount_xz:%d \n",sendCount_xz);
printf("* sendCount_xZ:%d \n",sendCount_xZ);
printf("* sendCount_Xz:%d \n",sendCount_Xz);
printf("* sendCount_XZ:%d \n",sendCount_XZ);
printf("* sendCount_yz:%d \n",sendCount_yz);
printf("* sendCount_yZ:%d \n",sendCount_yZ);
printf("* sendCount_Yz:%d \n",sendCount_Yz);
printf("* sendCount_YZ:%d \n",sendCount_YZ);
printf("\n\n");
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank == 0) printf("* sendList_# has been allocated through construction of Dm but sizes not determined. Creating arrays with size sendCount_#... *\n");
MPI_Barrier(MPI_COMM_WORLD);
// allocate send lists
sendList_x = new int [sendCount_x];
sendList_y = new int [sendCount_y];
sendList_z = new int [sendCount_z];
sendList_X = new int [sendCount_X];
sendList_Y = new int [sendCount_Y];
sendList_Z = new int [sendCount_Z];
sendList_xy = new int [sendCount_xy];
sendList_yz = new int [sendCount_yz];
sendList_xz = new int [sendCount_xz];
sendList_Xy = new int [sendCount_Xy];
sendList_Yz = new int [sendCount_Yz];
sendList_xZ = new int [sendCount_xZ];
sendList_xY = new int [sendCount_xY];
sendList_yZ = new int [sendCount_yZ];
sendList_Xz = new int [sendCount_Xz];
sendList_XY = new int [sendCount_XY];
sendList_YZ = new int [sendCount_YZ];
sendList_XZ = new int [sendCount_XZ];
// Populate the send list
sendCount_x = sendCount_y = sendCount_z = sendCount_X = sendCount_Y = sendCount_Z = 0;
sendCount_xy = sendCount_yz = sendCount_xz = sendCount_Xy = sendCount_Yz = sendCount_xZ = 0;
sendCount_xY = sendCount_yZ = sendCount_Xz = sendCount_XY = sendCount_YZ = sendCount_XZ = 0;
for (k=1; k<Nz-1; k++){
for (j=1; j<Ny-1; j++){
for (i=1; i<Nx-1; i++){
// Local value to send
n = k*Nx*Ny+j*Nx+i;
if (id[n] != 0){
// Counts for the six faces
if (i==1) sendList_x[sendCount_x++]=n;
if (j==1) sendList_y[sendCount_y++]=n;
if (k==1) sendList_z[sendCount_z++]=n;
if (i==Nx-2) sendList_X[sendCount_X++]=n;
if (j==Ny-2) sendList_Y[sendCount_Y++]=n;
if (k==Nz-2) sendList_Z[sendCount_Z++]=n;
// Counts for the twelve edges
if (i==1 && j==1) sendList_xy[sendCount_xy++]=n;
if (i==1 && j==Ny-2) sendList_xY[sendCount_xY++]=n;
if (i==Nx-2 && j==1) sendList_Xy[sendCount_Xy++]=n;
if (i==Nx-2 && j==Ny-2) sendList_XY[sendCount_XY++]=n;
if (i==1 && k==1) sendList_xz[sendCount_xz++]=n;
if (i==1 && k==Nz-2) sendList_xZ[sendCount_xZ++]=n;
if (i==Nx-2 && k==1) sendList_Xz[sendCount_Xz++]=n;
if (i==Nx-2 && k==Nz-2) sendList_XZ[sendCount_XZ++]=n;
if (j==1 && k==1) sendList_yz[sendCount_yz++]=n;
if (j==1 && k==Nz-2) sendList_yZ[sendCount_yZ++]=n;
if (j==Ny-2 && k==1) sendList_Yz[sendCount_Yz++]=n;
if (j==Ny-2 && k==Nz-2) sendList_YZ[sendCount_YZ++]=n;
}
}
}
}
if (rank == 0) {
printf("* All blocks should be sending information to the same locations relative to its block - so the data should be the same across processors... *\n");
printf("* Expecting some random memory addresses... \n\n");
printf("* sendList_x: %d %d %d %d \n",sendList_x[0],sendList_x[1],sendList_x[2],sendList_x[3]);
printf("* sendList_y: %d %d %d %d \n",sendList_y[0],sendList_y[1],sendList_y[2],sendList_y[3]);
printf("* sendList_z: %d %d %d %d \n",sendList_z[0],sendList_z[1],sendList_z[2],sendList_z[3]);
printf("* sendList_X: %d %d %d %d \n",sendList_X[0],sendList_X[1],sendList_X[2],sendList_X[3]);
printf("* sendList_Y: %d %d %d %d \n",sendList_Y[0],sendList_Y[1],sendList_Y[2],sendList_Y[3]);
printf("* sendList_Z: %d %d %d %d \n",sendList_Z[0],sendList_Z[1],sendList_Z[2],sendList_Z[3]);
printf("* sendList_xy:%d %d %d %d \n",sendList_xy[0],sendList_xy[1],sendList_xy[2],sendList_xy[3]);
printf("* sendList_xY:%d %d %d %d \n",sendList_xY[0],sendList_xY[1],sendList_xY[2],sendList_xY[3]);
printf("* sendList_Xy:%d %d %d %d \n",sendList_Xy[0],sendList_Xy[1],sendList_Xy[2],sendList_Xy[3]);
printf("* sendList_XY:%d %d %d %d \n",sendList_XY[0],sendList_XY[1],sendList_XY[2],sendList_XY[3]);
printf("* sendList_xz:%d %d %d %d \n",sendList_xz[0],sendList_xz[1],sendList_xz[2],sendList_xz[3]);
printf("* sendList_xZ:%d %d %d %d \n",sendList_xZ[0],sendList_xZ[1],sendList_xZ[2],sendList_xZ[3]);
printf("* sendList_Xz:%d %d %d %d \n",sendList_Xz[0],sendList_Xz[1],sendList_Xz[2],sendList_Xz[3]);
printf("* sendList_XZ:%d %d %d %d \n",sendList_XZ[0],sendList_XZ[1],sendList_XZ[2],sendList_XZ[3]);
printf("* sendList_yz:%d %d %d %d \n",sendList_yz[0],sendList_yz[1],sendList_yz[2],sendList_yz[3]);
printf("* sendList_yZ:%d %d %d %d \n",sendList_yZ[0],sendList_yZ[1],sendList_yZ[2],sendList_yZ[3]);
printf("* sendList_Yz:%d %d %d %d \n",sendList_Yz[0],sendList_Yz[1],sendList_Yz[2],sendList_Yz[3]);
printf("* sendList_YZ:%d %d %d %d \n",sendList_YZ[0],sendList_YZ[1],sendList_YZ[2],sendList_YZ[3]);
printf("\n");
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank == 1) {
printf("* Expecting some random memory addresses... \n\n");
printf("* sendList_x: %d %d %d %d \n",sendList_x[0],sendList_x[1],sendList_x[2],sendList_x[3]);
printf("* sendList_y: %d %d %d %d \n",sendList_y[0],sendList_y[1],sendList_y[2],sendList_y[3]);
printf("* sendList_z: %d %d %d %d \n",sendList_z[0],sendList_z[1],sendList_z[2],sendList_z[3]);
printf("* sendList_X: %d %d %d %d \n",sendList_X[0],sendList_X[1],sendList_X[2],sendList_X[3]);
printf("* sendList_Y: %d %d %d %d \n",sendList_Y[0],sendList_Y[1],sendList_Y[2],sendList_Y[3]);
printf("* sendList_Z: %d %d %d %d \n",sendList_Z[0],sendList_Z[1],sendList_Z[2],sendList_Z[3]);
printf("* sendList_xy:%d %d %d %d \n",sendList_xy[0],sendList_xy[1],sendList_xy[2],sendList_xy[3]);
printf("* sendList_xY:%d %d %d %d \n",sendList_xY[0],sendList_xY[1],sendList_xY[2],sendList_xY[3]);
printf("* sendList_Xy:%d %d %d %d \n",sendList_Xy[0],sendList_Xy[1],sendList_Xy[2],sendList_Xy[3]);
printf("* sendList_XY:%d %d %d %d \n",sendList_XY[0],sendList_XY[1],sendList_XY[2],sendList_XY[3]);
printf("* sendList_xz:%d %d %d %d \n",sendList_xz[0],sendList_xz[1],sendList_xz[2],sendList_xz[3]);
printf("* sendList_xZ:%d %d %d %d \n",sendList_xZ[0],sendList_xZ[1],sendList_xZ[2],sendList_xZ[3]);
printf("* sendList_Xz:%d %d %d %d \n",sendList_Xz[0],sendList_Xz[1],sendList_Xz[2],sendList_Xz[3]);
printf("* sendList_XZ:%d %d %d %d \n",sendList_XZ[0],sendList_XZ[1],sendList_XZ[2],sendList_XZ[3]);
printf("* sendList_yz:%d %d %d %d \n",sendList_yz[0],sendList_yz[1],sendList_yz[2],sendList_yz[3]);
printf("* sendList_yZ:%d %d %d %d \n",sendList_yZ[0],sendList_yZ[1],sendList_yZ[2],sendList_yZ[3]);
printf("* sendList_Yz:%d %d %d %d \n",sendList_Yz[0],sendList_Yz[1],sendList_Yz[2],sendList_Yz[3]);
printf("* sendList_YZ:%d %d %d %d \n",sendList_YZ[0],sendList_YZ[1],sendList_YZ[2],sendList_YZ[3]);
printf("\n\n");
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank == 0) printf("* sendBuf_# has been allocated through construction of Dm but sizes not determined. Creating arrays with size sendCount_#... *\n");
MPI_Barrier(MPI_COMM_WORLD);
// allocate send buffers
sendBuf_x = new int [sendCount_x];
sendBuf_y = new int [sendCount_y];
sendBuf_z = new int [sendCount_z];
sendBuf_X = new int [sendCount_X];
sendBuf_Y = new int [sendCount_Y];
sendBuf_Z = new int [sendCount_Z];
sendBuf_xy = new int [sendCount_xy];
sendBuf_yz = new int [sendCount_yz];
sendBuf_xz = new int [sendCount_xz];
sendBuf_Xy = new int [sendCount_Xy];
sendBuf_Yz = new int [sendCount_Yz];
sendBuf_xZ = new int [sendCount_xZ];
sendBuf_xY = new int [sendCount_xY];
sendBuf_yZ = new int [sendCount_yZ];
sendBuf_Xz = new int [sendCount_Xz];
sendBuf_XY = new int [sendCount_XY];
sendBuf_YZ = new int [sendCount_YZ];
sendBuf_XZ = new int [sendCount_XZ];
//......................................................................................
if (rank == 0) printf("* >> Isending and Ireceiving count data across processors...\n");
MPI_Barrier(MPI_COMM_WORLD);
MPI_Isend(&sendCount_x, 1,MPI_INT,rank_x,sendtag+0,Communicator,&req1[0]);
MPI_Irecv(&recvCount_X, 1,MPI_INT,rank_X,recvtag+0,Communicator,&req2[0]);
MPI_Isend(&sendCount_X, 1,MPI_INT,rank_X,sendtag+1,Communicator,&req1[1]);
MPI_Irecv(&recvCount_x, 1,MPI_INT,rank_x,recvtag+1,Communicator,&req2[1]);
MPI_Isend(&sendCount_y, 1,MPI_INT,rank_y,sendtag+2,Communicator,&req1[2]);
MPI_Irecv(&recvCount_Y, 1,MPI_INT,rank_Y,recvtag+2,Communicator,&req2[2]);
MPI_Isend(&sendCount_Y, 1,MPI_INT,rank_Y,sendtag+3,Communicator,&req1[3]);
MPI_Irecv(&recvCount_y, 1,MPI_INT,rank_y,recvtag+3,Communicator,&req2[3]);
MPI_Isend(&sendCount_z, 1,MPI_INT,rank_z,sendtag+4,Communicator,&req1[4]);
MPI_Irecv(&recvCount_Z, 1,MPI_INT,rank_Z,recvtag+4,Communicator,&req2[4]);
MPI_Isend(&sendCount_Z, 1,MPI_INT,rank_Z,sendtag+5,Communicator,&req1[5]);
MPI_Irecv(&recvCount_z, 1,MPI_INT,rank_z,recvtag+5,Communicator,&req2[5]);
MPI_Isend(&sendCount_xy, 1,MPI_INT,rank_xy,sendtag+6,Communicator,&req1[6]);
MPI_Irecv(&recvCount_XY, 1,MPI_INT,rank_XY,recvtag+6,Communicator,&req2[6]);
MPI_Isend(&sendCount_XY, 1,MPI_INT,rank_XY,sendtag+7,Communicator,&req1[7]);
MPI_Irecv(&recvCount_xy, 1,MPI_INT,rank_xy,recvtag+7,Communicator,&req2[7]);
MPI_Isend(&sendCount_Xy, 1,MPI_INT,rank_Xy,sendtag+8,Communicator,&req1[8]);
MPI_Irecv(&recvCount_xY, 1,MPI_INT,rank_xY,recvtag+8,Communicator,&req2[8]);
MPI_Isend(&sendCount_xY, 1,MPI_INT,rank_xY,sendtag+9,Communicator,&req1[9]);
MPI_Irecv(&recvCount_Xy, 1,MPI_INT,rank_Xy,recvtag+9,Communicator,&req2[9]);
MPI_Isend(&sendCount_xz, 1,MPI_INT,rank_xz,sendtag+10,Communicator,&req1[10]);
MPI_Irecv(&recvCount_XZ, 1,MPI_INT,rank_XZ,recvtag+10,Communicator,&req2[10]);
MPI_Isend(&sendCount_XZ, 1,MPI_INT,rank_XZ,sendtag+11,Communicator,&req1[11]);
MPI_Irecv(&recvCount_xz, 1,MPI_INT,rank_xz,recvtag+11,Communicator,&req2[11]);
MPI_Isend(&sendCount_Xz, 1,MPI_INT,rank_Xz,sendtag+12,Communicator,&req1[12]);
MPI_Irecv(&recvCount_xZ, 1,MPI_INT,rank_xZ,recvtag+12,Communicator,&req2[12]);
MPI_Isend(&sendCount_xZ, 1,MPI_INT,rank_xZ,sendtag+13,Communicator,&req1[13]);
MPI_Irecv(&recvCount_Xz, 1,MPI_INT,rank_Xz,recvtag+13,Communicator,&req2[13]);
MPI_Isend(&sendCount_yz, 1,MPI_INT,rank_yz,sendtag+14,Communicator,&req1[14]);
MPI_Irecv(&recvCount_YZ, 1,MPI_INT,rank_YZ,recvtag+14,Communicator,&req2[14]);
MPI_Isend(&sendCount_YZ, 1,MPI_INT,rank_YZ,sendtag+15,Communicator,&req1[15]);
MPI_Irecv(&recvCount_yz, 1,MPI_INT,rank_yz,recvtag+15,Communicator,&req2[15]);
MPI_Isend(&sendCount_Yz, 1,MPI_INT,rank_Yz,sendtag+16,Communicator,&req1[16]);
MPI_Irecv(&recvCount_yZ, 1,MPI_INT,rank_yZ,recvtag+16,Communicator,&req2[16]);
MPI_Isend(&sendCount_yZ, 1,MPI_INT,rank_yZ,sendtag+17,Communicator,&req1[17]);
MPI_Irecv(&recvCount_Yz, 1,MPI_INT,rank_Yz,recvtag+17,Communicator,&req2[17]);
MPI_Waitall(18,req1,stat1);
MPI_Waitall(18,req2,stat2);
MPI_Barrier(Communicator);
//......................................................................................
if (rank == 0) printf("* recvList_# has been allocated through construction of Dm but sizes not determined. Creating arrays with size recvCount_#... *\n");
MPI_Barrier(MPI_COMM_WORLD);
// recv buffers
recvList_x = new int [recvCount_x];
recvList_y = new int [recvCount_y];
recvList_z = new int [recvCount_z];
recvList_X = new int [recvCount_X];
recvList_Y = new int [recvCount_Y];
recvList_Z = new int [recvCount_Z];
recvList_xy = new int [recvCount_xy];
recvList_yz = new int [recvCount_yz];
recvList_xz = new int [recvCount_xz];
recvList_Xy = new int [recvCount_Xy];
recvList_Yz = new int [recvCount_Yz];
recvList_xZ = new int [recvCount_xZ];
recvList_xY = new int [recvCount_xY];
recvList_yZ = new int [recvCount_yZ];
recvList_Xz = new int [recvCount_Xz];
recvList_XY = new int [recvCount_XY];
recvList_YZ = new int [recvCount_YZ];
recvList_XZ = new int [recvCount_XZ];
//......................................................................................
if (rank == 0) printf("* >> Isending and Ireceiving list data (of size sendCount_# and recvCount_#) across processors...\n");
MPI_Barrier(MPI_COMM_WORLD);
MPI_Isend(sendList_x, sendCount_x,MPI_INT,rank_x,sendtag,Communicator,&req1[0]);
MPI_Irecv(recvList_X, recvCount_X,MPI_INT,rank_X,recvtag,Communicator,&req2[0]);
MPI_Isend(sendList_X, sendCount_X,MPI_INT,rank_X,sendtag,Communicator,&req1[1]);
MPI_Irecv(recvList_x, recvCount_x,MPI_INT,rank_x,recvtag,Communicator,&req2[1]);
MPI_Isend(sendList_y, sendCount_y,MPI_INT,rank_y,sendtag,Communicator,&req1[2]);
MPI_Irecv(recvList_Y, recvCount_Y,MPI_INT,rank_Y,recvtag,Communicator,&req2[2]);
MPI_Isend(sendList_Y, sendCount_Y,MPI_INT,rank_Y,sendtag,Communicator,&req1[3]);
MPI_Irecv(recvList_y, recvCount_y,MPI_INT,rank_y,recvtag,Communicator,&req2[3]);
MPI_Isend(sendList_z, sendCount_z,MPI_INT,rank_z,sendtag,Communicator,&req1[4]);
MPI_Irecv(recvList_Z, recvCount_Z,MPI_INT,rank_Z,recvtag,Communicator,&req2[4]);
MPI_Isend(sendList_Z, sendCount_Z,MPI_INT,rank_Z,sendtag,Communicator,&req1[5]);
MPI_Irecv(recvList_z, recvCount_z,MPI_INT,rank_z,recvtag,Communicator,&req2[5]);
MPI_Isend(sendList_xy, sendCount_xy,MPI_INT,rank_xy,sendtag,Communicator,&req1[6]);
MPI_Irecv(recvList_XY, recvCount_XY,MPI_INT,rank_XY,recvtag,Communicator,&req2[6]);
MPI_Isend(sendList_XY, sendCount_XY,MPI_INT,rank_XY,sendtag,Communicator,&req1[7]);
MPI_Irecv(recvList_xy, recvCount_xy,MPI_INT,rank_xy,recvtag,Communicator,&req2[7]);
MPI_Isend(sendList_Xy, sendCount_Xy,MPI_INT,rank_Xy,sendtag,Communicator,&req1[8]);
MPI_Irecv(recvList_xY, recvCount_xY,MPI_INT,rank_xY,recvtag,Communicator,&req2[8]);
MPI_Isend(sendList_xY, sendCount_xY,MPI_INT,rank_xY,sendtag,Communicator,&req1[9]);
MPI_Irecv(recvList_Xy, recvCount_Xy,MPI_INT,rank_Xy,recvtag,Communicator,&req2[9]);
MPI_Isend(sendList_xz, sendCount_xz,MPI_INT,rank_xz,sendtag,Communicator,&req1[10]);
MPI_Irecv(recvList_XZ, recvCount_XZ,MPI_INT,rank_XZ,recvtag,Communicator,&req2[10]);
MPI_Isend(sendList_XZ, sendCount_XZ,MPI_INT,rank_XZ,sendtag,Communicator,&req1[11]);
MPI_Irecv(recvList_xz, recvCount_xz,MPI_INT,rank_xz,recvtag,Communicator,&req2[11]);
MPI_Isend(sendList_Xz, sendCount_Xz,MPI_INT,rank_Xz,sendtag,Communicator,&req1[12]);
MPI_Irecv(recvList_xZ, recvCount_xZ,MPI_INT,rank_xZ,recvtag,Communicator,&req2[12]);
MPI_Isend(sendList_xZ, sendCount_xZ,MPI_INT,rank_xZ,sendtag,Communicator,&req1[13]);
MPI_Irecv(recvList_Xz, recvCount_Xz,MPI_INT,rank_Xz,recvtag,Communicator,&req2[13]);
MPI_Isend(sendList_yz, sendCount_yz,MPI_INT,rank_yz,sendtag,Communicator,&req1[14]);
MPI_Irecv(recvList_YZ, recvCount_YZ,MPI_INT,rank_YZ,recvtag,Communicator,&req2[14]);
MPI_Isend(sendList_YZ, sendCount_YZ,MPI_INT,rank_YZ,sendtag,Communicator,&req1[15]);
MPI_Irecv(recvList_yz, recvCount_yz,MPI_INT,rank_yz,recvtag,Communicator,&req2[15]);
MPI_Isend(sendList_Yz, sendCount_Yz,MPI_INT,rank_Yz,sendtag,Communicator,&req1[16]);
MPI_Irecv(recvList_yZ, recvCount_yZ,MPI_INT,rank_yZ,recvtag,Communicator,&req2[16]);
MPI_Isend(sendList_yZ, sendCount_yZ,MPI_INT,rank_yZ,sendtag,Communicator,&req1[17]);
MPI_Irecv(recvList_Yz, recvCount_Yz,MPI_INT,rank_Yz,recvtag,Communicator,&req2[17]);
MPI_Waitall(18,req1,stat1);
MPI_Waitall(18,req2,stat2);
if (rank == 0) {
printf("* recvList_x: %d %d %d %d \n",recvList_x[0],recvList_x[1],recvList_x[2],recvList_x[3]);
printf("* recvList_X: %d %d %d %d \n",recvList_X[0],recvList_X[1],recvList_X[2],recvList_X[3]);
printf("\n");
}
MPI_Barrier(MPI_COMM_WORLD);
if (rank == 1) {
printf("* recvList_x: %d %d %d %d \n",recvList_x[0],recvList_x[1],recvList_x[2],recvList_x[3]);
printf("* recvList_X: %d %d %d %d \n",recvList_X[0],recvList_X[1],recvList_X[2],recvList_X[3]);
printf("\n\n");
}
//......................................................................................
for (int idx=0; idx<recvCount_x; idx++) {recvList_x[idx] -= (Nx-2); if (rank ==1) printf("%d ",recvList_x[idx]); }
if (rank == 1) printf("\n");
for (int idx=0; idx<recvCount_X; idx++) recvList_X[idx] += (Nx-2);
for (int idx=0; idx<recvCount_y; idx++) recvList_y[idx] -= (Ny-2)*Nx;
for (int idx=0; idx<recvCount_Y; idx++) recvList_Y[idx] += (Ny-2)*Nx;
for (int idx=0; idx<recvCount_z; idx++) recvList_z[idx] -= (Nz-2)*Nx*Ny;
for (int idx=0; idx<recvCount_Z; idx++) recvList_Z[idx] += (Nz-2)*Nx*Ny;
for (int idx=0; idx<recvCount_xy; idx++) recvList_xy[idx] -= (Nx-2)+(Ny-2)*Nx;
for (int idx=0; idx<recvCount_XY; idx++) recvList_XY[idx] += (Nx-2)+(Ny-2)*Nx;
for (int idx=0; idx<recvCount_xY; idx++) recvList_xY[idx] -= (Nx-2)-(Ny-2)*Nx;
for (int idx=0; idx<recvCount_Xy; idx++) recvList_Xy[idx] += (Nx-2)-(Ny-2)*Nx;
for (int idx=0; idx<recvCount_xz; idx++) recvList_xz[idx] -= (Nx-2)+(Nz-2)*Nx*Ny;
for (int idx=0; idx<recvCount_XZ; idx++) recvList_XZ[idx] += (Nx-2)+(Nz-2)*Nx*Ny;
for (int idx=0; idx<recvCount_xZ; idx++) recvList_xZ[idx] -= (Nx-2)-(Nz-2)*Nx*Ny;
for (int idx=0; idx<recvCount_Xz; idx++) recvList_Xz[idx] += (Nx-2)-(Nz-2)*Nx*Ny;
for (int idx=0; idx<recvCount_yz; idx++) recvList_yz[idx] -= (Ny-2)*Nx + (Nz-2)*Nx*Ny;
for (int idx=0; idx<recvCount_YZ; idx++) recvList_YZ[idx] += (Ny-2)*Nx + (Nz-2)*Nx*Ny;
for (int idx=0; idx<recvCount_yZ; idx++) recvList_yZ[idx] -= (Ny-2)*Nx - (Nz-2)*Nx*Ny;
for (int idx=0; idx<recvCount_Yz; idx++) recvList_Yz[idx] += (Ny-2)*Nx - (Nz-2)*Nx*Ny;
//......................................................................................
if (rank == 0) printf("* recvBuf_# has been allocated through construction of Dm but sizes not determined. Creating arrays with size recvCount_#... *\n");
MPI_Barrier(MPI_COMM_WORLD);
// allocate recv buffers
recvBuf_x = new int [recvCount_x];
recvBuf_y = new int [recvCount_y];
recvBuf_z = new int [recvCount_z];
recvBuf_X = new int [recvCount_X];
recvBuf_Y = new int [recvCount_Y];
recvBuf_Z = new int [recvCount_Z];
recvBuf_xy = new int [recvCount_xy];
recvBuf_yz = new int [recvCount_yz];
recvBuf_xz = new int [recvCount_xz];
recvBuf_Xy = new int [recvCount_Xy];
recvBuf_Yz = new int [recvCount_Yz];
recvBuf_xZ = new int [recvCount_xZ];
recvBuf_xY = new int [recvCount_xY];
recvBuf_yZ = new int [recvCount_yZ];
recvBuf_Xz = new int [recvCount_Xz];
recvBuf_XY = new int [recvCount_XY];
recvBuf_YZ = new int [recvCount_YZ];
recvBuf_XZ = new int [recvCount_XZ];
//......................................................................................
if (rank == 0) printf("* sendData_# has been allocated through construction of Dm but sizes not determined. Creating arrays with size sendCount_#... *\n");
MPI_Barrier(MPI_COMM_WORLD);
// send buffers
sendData_x = new double [sendCount_x];
sendData_y = new double [sendCount_y];
sendData_z = new double [sendCount_z];
sendData_X = new double [sendCount_X];
sendData_Y = new double [sendCount_Y];
sendData_Z = new double [sendCount_Z];
sendData_xy = new double [sendCount_xy];
sendData_yz = new double [sendCount_yz];
sendData_xz = new double [sendCount_xz];
sendData_Xy = new double [sendCount_Xy];
sendData_Yz = new double [sendCount_Yz];
sendData_xZ = new double [sendCount_xZ];
sendData_xY = new double [sendCount_xY];
sendData_yZ = new double [sendCount_yZ];
sendData_Xz = new double [sendCount_Xz];
sendData_XY = new double [sendCount_XY];
sendData_YZ = new double [sendCount_YZ];
sendData_XZ = new double [sendCount_XZ];
//......................................................................................
if (rank == 0) printf("* recvData_# has been allocated through construction of Dm but sizes not determined. Creating arrays with size recvCount_#... *\n");
MPI_Barrier(MPI_COMM_WORLD);
// recv buffers
recvData_x = new double [recvCount_x];
recvData_y = new double [recvCount_y];
recvData_z = new double [recvCount_z];
recvData_X = new double [recvCount_X];
recvData_Y = new double [recvCount_Y];
recvData_Z = new double [recvCount_Z];
recvData_xy = new double [recvCount_xy];
recvData_yz = new double [recvCount_yz];
recvData_xz = new double [recvCount_xz];
recvData_Xy = new double [recvCount_Xy];
recvData_xZ = new double [recvCount_xZ];
recvData_xY = new double [recvCount_xY];
recvData_yZ = new double [recvCount_yZ];
recvData_Yz = new double [recvCount_Yz];
recvData_Xz = new double [recvCount_Xz];
recvData_XY = new double [recvCount_XY];
recvData_YZ = new double [recvCount_YZ];
recvData_XZ = new double [recvCount_XZ];
//......................................................................................
if (rank == 0) printf("* End of TestCommInit...\n\n");
}
void Domain::CommunicateMeshHalo(DoubleArray &Mesh)
{
@@ -1281,8 +676,9 @@ void Domain::CommunicateMeshHalo(DoubleArray &Mesh)
/********************************************************
* Misc *
********************************************************/
double SSO(DoubleArray &Distance, char *ID, Domain &Dm, int timesteps){
/*
* This routine converts the data in the Distance array to a signed distance
@@ -1548,8 +944,8 @@ void AssignLocalSolidID(char *ID, int nspheres, double *List_cx, double *List_cy
}
void SignedDistance(double *Distance, int nspheres, double *List_cx, double *List_cy, double *List_cz, double *List_rad,
double Lx, double Ly, double Lz, int Nx, int Ny, int Nz,
int iproc, int jproc, int kproc, int nprocx, int nprocy, int nprocz)
double Lx, double Ly, double Lz, int Nx, int Ny, int Nz,
int iproc, int jproc, int kproc, int nprocx, int nprocy, int nprocz)
{
// Use sphere lists to determine which nodes are in porespace
// Write out binary file for nodes

View File

@@ -1,7 +1,5 @@
#ifndef Domain_INC
#define Domain_INC
// Created by James McClure
// Copyright 2008-2013
#include <stdio.h>
#include <stdlib.h>
@@ -19,9 +17,47 @@
#include "common/Database.h"
class Domain;
template<class TYPE> class PatchData;
//! Read the domain information file
std::shared_ptr<Database> read_domain( );
//! Class to hold information about a box
class Box {
public:
int ifirst[3];
int ilast[3];
};
enum class DataLocation { CPU, DEVICE };
//! Class to hold information about a patch
class Patch {
public:
//! Empty constructor
Patch() = delete;
//! Copy constructor
Patch( const Patch& ) = delete;
//! Assignment operator
Patch& operator=( const Patch& ) = delete;
//! Return the box for the patch
inline const Box& getBox() const { return d_box; }
//! Create patch data
template<class TYPE>
std::shared_ptr<PatchData<TYPE>> createPatchData( DataLocation location ) const;
private:
Box d_box;
int d_owner;
Domain *d_domain;
};
//! Class to hold domain info
@@ -49,32 +85,67 @@ public:
//! Get the database
inline std::shared_ptr<const Database> getDatabase() const { return d_db; }
//! Get the domain box
inline const Box& getBox() const { return d_box; }
//! Get local patch
inline const Patch& getLocalPatch() const { return *d_localPatch; }
//! Get all patches
inline const std::vector<Patch>& getAllPatch() const { return d_patches; }
private:
void initialize( std::shared_ptr<Database> db );
std::shared_ptr<Database> d_db;
Box d_box;
Patch *d_localPatch;
std::vector<Patch> d_patches;
public:
// Basic domain information
int Nx,Ny,Nz,N;
int iproc,jproc,kproc;
int nprocx,nprocy,nprocz;
public: // Public variables (need to create accessors instead)
double Lx,Ly,Lz,Volume;
int rank;
int BoundaryCondition;
int Nx,Ny,Nz,N;
RankInfoStruct rank_info;
MPI_Group Group; // Group of processors associated with this domain
MPI_Comm Comm; // MPI Communicator for this domain
int BoundaryCondition;
MPI_Group Group; // Group of processors associated with this domain
//**********************************
// MPI ranks for all 18 neighbors
//**********************************
int rank_x,rank_y,rank_z,rank_X,rank_Y,rank_Z;
int rank_xy,rank_XY,rank_xY,rank_Xy;
int rank_xz,rank_XZ,rank_xZ,rank_Xz;
int rank_yz,rank_YZ,rank_yZ,rank_Yz;
const int& iproc = rank_info.ix;
const int& jproc = rank_info.jy;
const int& kproc = rank_info.kz;
const int& nprocx = rank_info.nx;
const int& nprocy = rank_info.ny;
const int& nprocz = rank_info.nz;
const int& rank = rank_info.rank[1][1][1];
const int& rank_X = rank_info.rank[2][1][1];
const int& rank_x = rank_info.rank[0][1][1];
const int& rank_Y = rank_info.rank[1][2][1];
const int& rank_y = rank_info.rank[1][0][1];
const int& rank_Z = rank_info.rank[1][1][2];
const int& rank_z = rank_info.rank[1][1][0];
const int& rank_XY = rank_info.rank[2][2][1];
const int& rank_xy = rank_info.rank[0][0][1];
const int& rank_Xy = rank_info.rank[2][0][1];
const int& rank_xY = rank_info.rank[0][2][1];
const int& rank_XZ = rank_info.rank[2][1][2];
const int& rank_xz = rank_info.rank[0][1][0];
const int& rank_Xz = rank_info.rank[2][1][0];
const int& rank_xZ = rank_info.rank[0][1][2];
const int& rank_YZ = rank_info.rank[1][2][2];
const int& rank_yz = rank_info.rank[1][0][0];
const int& rank_Yz = rank_info.rank[1][2][0];
const int& rank_yZ = rank_info.rank[1][0][2];
//**********************************
//......................................................................................
// Get the actual D3Q19 communication counts (based on location of solid phase)
@@ -88,10 +159,6 @@ public:
int *sendList_xy, *sendList_yz, *sendList_xz, *sendList_Xy, *sendList_Yz, *sendList_xZ;
int *sendList_xY, *sendList_yZ, *sendList_Xz, *sendList_XY, *sendList_YZ, *sendList_XZ;
//......................................................................................
int *sendBuf_x, *sendBuf_y, *sendBuf_z, *sendBuf_X, *sendBuf_Y, *sendBuf_Z;
int *sendBuf_xy, *sendBuf_yz, *sendBuf_xz, *sendBuf_Xy, *sendBuf_Yz, *sendBuf_xZ;
int *sendBuf_xY, *sendBuf_yZ, *sendBuf_Xz, *sendBuf_XY, *sendBuf_YZ, *sendBuf_XZ;
//......................................................................................
int recvCount_x, recvCount_y, recvCount_z, recvCount_X, recvCount_Y, recvCount_Z;
int recvCount_xy, recvCount_yz, recvCount_xz, recvCount_Xy, recvCount_Yz, recvCount_xZ;
int recvCount_xY, recvCount_yZ, recvCount_Xz, recvCount_XY, recvCount_YZ, recvCount_XZ;
@@ -99,53 +166,78 @@ public:
int *recvList_x, *recvList_y, *recvList_z, *recvList_X, *recvList_Y, *recvList_Z;
int *recvList_xy, *recvList_yz, *recvList_xz, *recvList_Xy, *recvList_Yz, *recvList_xZ;
int *recvList_xY, *recvList_yZ, *recvList_Xz, *recvList_XY, *recvList_YZ, *recvList_XZ;
//......................................................................................
int *recvBuf_x, *recvBuf_y, *recvBuf_z, *recvBuf_X, *recvBuf_Y, *recvBuf_Z;
int *recvBuf_xy, *recvBuf_yz, *recvBuf_xz, *recvBuf_Xy, *recvBuf_Yz, *recvBuf_xZ;
int *recvBuf_xY, *recvBuf_yZ, *recvBuf_Xz, *recvBuf_XY, *recvBuf_YZ, *recvBuf_XZ;
//......................................................................................
double *sendData_x, *sendData_y, *sendData_z, *sendData_X, *sendData_Y, *sendData_Z;
double *sendData_xy, *sendData_yz, *sendData_xz, *sendData_Xy, *sendData_Yz, *sendData_xZ;
double *sendData_xY, *sendData_yZ, *sendData_Xz, *sendData_XY, *sendData_YZ, *sendData_XZ;
double *recvData_x, *recvData_y, *recvData_z, *recvData_X, *recvData_Y, *recvData_Z;
double *recvData_xy, *recvData_yz, *recvData_xz, *recvData_Xy, *recvData_Yz, *recvData_xZ;
double *recvData_xY, *recvData_yZ, *recvData_Xz, *recvData_XY, *recvData_YZ, *recvData_XZ;
// Solid indicator function
char *id;
void InitializeRanks();
void CommInit(MPI_Comm comm);
void CommunicateMeshHalo(DoubleArray &Mesh);
void AssignComponentLabels(double *phase);
void CommInit(MPI_Comm comm);
void TestCommInit(MPI_Comm comm);
//void MemoryOptimizedLayout(IntArray &Map, int *neighborList, int Np);
private:
inline int getRankForBlock( int i, int j, int k )
{
int i2 = (i+nprocx)%nprocx;
int j2 = (j+nprocy)%nprocy;
int k2 = (k+nprocz)%nprocz;
return i2 + j2*nprocx + k2*nprocx*nprocy;
}
int *sendBuf_x, *sendBuf_y, *sendBuf_z, *sendBuf_X, *sendBuf_Y, *sendBuf_Z;
int *sendBuf_xy, *sendBuf_yz, *sendBuf_xz, *sendBuf_Xy, *sendBuf_Yz, *sendBuf_xZ;
int *sendBuf_xY, *sendBuf_yZ, *sendBuf_Xz, *sendBuf_XY, *sendBuf_YZ, *sendBuf_XZ;
//......................................................................................
int *recvBuf_x, *recvBuf_y, *recvBuf_z, *recvBuf_X, *recvBuf_Y, *recvBuf_Z;
int *recvBuf_xy, *recvBuf_yz, *recvBuf_xz, *recvBuf_Xy, *recvBuf_Yz, *recvBuf_xZ;
int *recvBuf_xY, *recvBuf_yZ, *recvBuf_Xz, *recvBuf_XY, *recvBuf_YZ, *recvBuf_XZ;
//......................................................................................
double *sendData_x, *sendData_y, *sendData_z, *sendData_X, *sendData_Y, *sendData_Z;
double *sendData_xy, *sendData_yz, *sendData_xz, *sendData_Xy, *sendData_Yz, *sendData_xZ;
double *sendData_xY, *sendData_yZ, *sendData_Xz, *sendData_XY, *sendData_YZ, *sendData_XZ;
double *recvData_x, *recvData_y, *recvData_z, *recvData_X, *recvData_Y, *recvData_Z;
double *recvData_xy, *recvData_yz, *recvData_xz, *recvData_Xy, *recvData_Yz, *recvData_xZ;
double *recvData_xY, *recvData_yZ, *recvData_Xz, *recvData_XY, *recvData_YZ, *recvData_XZ;
};
double SSO(DoubleArray &Distance, char *ID, Domain &Dm, int timesteps);
// Class to hold data on a patch
template<class TYPE>
class PatchData {
public:
//! Get the raw data pointer
TYPE* data() { return d_data; }
//! Get the raw data pointer
const TYPE* data() const { return d_data; }
//! Get the patch
const Patch& getPatch() const { return *d_patch; }
//! Start communication
void beginCommunication();
//! End communication
void endCommunication();
//! Access ghost values
TYPE operator()( int, int, int ) const;
//! Copy data from another PatchData
void copy( const PatchData& rhs );
private:
DataLocation d_location;
const Patch *d_patch;
TYPE *d_data;
TYPE *d_gcw;
};
void ReadSpherePacking(int nspheres, double *List_cx, double *List_cy, double *List_cz, double *List_rad);
void AssignLocalSolidID(char *ID, int nspheres, double *List_cx, double *List_cy, double *List_cz, double *List_rad,
double Lx, double Ly, double Lz, int Nx, int Ny, int Nz,
int iproc, int jproc, int kproc, int nprocx, int nprocy, int nprocz);
void SignedDistance(double *Distance, int nspheres, double *List_cx, double *List_cy, double *List_cz, double *List_rad,
double Lx, double Ly, double Lz, int Nx, int Ny, int Nz,
int iproc, int jproc, int kproc, int nprocx, int nprocy, int nprocz);
double Lx, double Ly, double Lz, int Nx, int Ny, int Nz,
int iproc, int jproc, int kproc, int nprocx, int nprocy, int nprocz);
void WriteLocalSolidID(char *FILENAME, char *ID, int N);
@@ -158,4 +250,6 @@ void ReadCheckpoint(char *FILENAME, double *cDen, double *cfq, int Np);
void ReadBinaryFile(char *FILENAME, double *Data, int N);
#endif

View File

@@ -18,7 +18,7 @@ Color {
Domain {
nproc = 1, 1, 1 // Number of processors (Npx,Npy,Npz)
n = 80, 80, 80 // Size of local domain (Nx,Ny,Nz)
n = 16, 16, 16 // Size of local domain (Nx,Ny,Nz)
n_spheres = 1 // Number of spheres
L = 1, 1, 1 // Length of domain (x,y,z)
BC = 0 // Boundary condition type

View File

@@ -45,10 +45,6 @@ int main (int argc, char *argv[])
TwoPhase Averages(Dm);
int timestep=0;
int Nx = Dm.Nx;
int Ny = Dm.Ny;
int Nz = Dm.Nz;
double Cx,Cy,Cz;
double dist1,dist2;