From a12e0ba7f1e278b2eac00515dc1606bd533ec03f Mon Sep 17 00:00:00 2001 From: Mark Berrill Date: Fri, 4 Sep 2015 13:06:36 -0400 Subject: [PATCH] Replacing MPI_COMM_WORLD with a passed communicator --- IO/Writer.cpp | 8 +- IO/Writer.h | 3 +- analysis/analysis.cpp | 77 ++- analysis/analysis.h | 8 +- common/Communication.h | 2 +- common/Communication.hpp | 4 +- common/Domain.h | 2 +- common/MPI_Helpers.cpp | 24 + common/MPI_Helpers.h | 39 +- common/TwoPhase.cpp | 10 +- cpu/exe/lb2_Color_mpi.cpp | 487 +++++++++---------- cpu/exe/lb2_Color_wia_mpi_bubble.cpp | 647 +++++++++++++------------- gpu/exe/lb1_MRT_mpi.cpp | 309 ++++++------ gpu/exe/lb1_MRT_mpi.cu | 311 +++++++------ gpu/exe/lb2_Color.cu | 50 +- gpu/exe/lb2_Color_mpi.cpp | 489 +++++++++---------- gpu/exe/lb2_Color_pBC_wia_mpi.cpp | 599 ++++++++++++------------ tests/BasicSimulator.cpp | 463 +++++++++--------- tests/BlobAnalyzeParallel.cpp | 45 +- tests/BlobIdentify.cpp | 2 +- tests/BlobIdentifyParallel.cpp | 11 +- tests/ComponentLabel.cpp | 13 +- tests/TestBlobAnalyze.cpp | 45 +- tests/TestBlobIdentify.cpp | 62 +-- tests/TestBubble.cpp | 635 ++++++++++++------------- tests/TestCommD3Q19.cpp | 57 +-- tests/TestInterfaceSpeed.cpp | 9 +- tests/TestMassConservationD3Q7.cpp | 7 +- tests/TestSegDist.cpp | 9 +- tests/TestTwoPhase.cpp | 9 +- tests/TestWriter.cpp | 13 +- tests/hello_world.cpp | 5 +- tests/lb2_CMT_wia.cpp | 32 +- tests/lb2_Color_blob_wia_mpi.cpp | 461 +++++++++--------- tests/lb2_Color_wia_mpi.cpp | 391 ++++++++-------- tests/lbpm_BlobAnalysis.cpp | 51 +- tests/lbpm_captube_pp.cpp | 37 +- tests/lbpm_color_simulator.cpp | 129 ++--- tests/lbpm_color_simulator.h | 10 +- tests/lbpm_disc_pp.cpp | 47 +- tests/lbpm_permeability_simulator.cpp | 69 +-- tests/lbpm_random_pp.cpp | 89 ++-- tests/lbpm_segmented_decomp.cpp | 53 +-- tests/lbpm_segmented_pp.cpp | 37 +- tests/lbpm_sphere_pp.cpp | 51 +- tests/testCommunication.cpp | 45 +- tests/testUtilities.cpp | 3 +- 47 files changed, 3029 insertions(+), 2930 deletions(-) diff --git a/IO/Writer.cpp b/IO/Writer.cpp index 52effdf6..b0767843 100644 --- a/IO/Writer.cpp +++ b/IO/Writer.cpp @@ -156,16 +156,16 @@ static std::vector writeMeshesNewFormat( // Write the mesh data -void IO::writeData( int timestep, const std::vector& meshData, int format ) +void IO::writeData( int timestep, const std::vector& meshData, int format, MPI_Comm comm ) { PROFILE_START("writeData"); - int rank = MPI_WORLD_RANK(); + int rank = comm_rank(comm); // Create the output directory char path[100]; sprintf(path,"vis%03i",timestep); if ( rank == 0 ) mkdir(path,S_IRWXU|S_IRGRP); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Write the mesh files std::vector meshes_written; if ( format == 1 ) { @@ -178,7 +178,7 @@ void IO::writeData( int timestep, const std::vector& meshDat ERROR("Unknown format"); } // Gather a complete list of files on rank 0 - meshes_written = gatherAll(meshes_written,MPI_COMM_WORLD); + meshes_written = gatherAll(meshes_written,comm); // Write the summary files if ( rank == 0 ) { // Write the summary file for the current timestep diff --git a/IO/Writer.h b/IO/Writer.h index 2cefdac3..2dcb5126 100644 --- a/IO/Writer.h +++ b/IO/Writer.h @@ -21,8 +21,9 @@ namespace IO { * 1 - Old mesh format (provided for backward compatibility, cannot write variables) * 2 - New format, 1 file/process, double precision * 3 - New format, 1 file/process, single precision (not finished) + * @param[in] comm The comm to use for writing (usually MPI_COMM_WORLD or a dup thereof) */ -void writeData( int timestep, const std::vector& meshData, int format ); +void writeData( int timestep, const std::vector& meshData, int format, MPI_Comm comm ); } // IO namespace diff --git a/analysis/analysis.cpp b/analysis/analysis.cpp index 876d2cb7..85925e0d 100644 --- a/analysis/analysis.cpp +++ b/analysis/analysis.cpp @@ -186,7 +186,7 @@ int ComputeLocalPhaseComponent(const IntArray &PhaseID, int &VALUE, BlobIDArray /****************************************************************** * Reorder the global blob ids * ******************************************************************/ -static int ReorderBlobIDs2( BlobIDArray& ID, int N_blobs, int ngx, int ngy, int ngz ) +static int ReorderBlobIDs2( BlobIDArray& ID, int N_blobs, int ngx, int ngy, int ngz, MPI_Comm comm ) { if ( N_blobs==0 ) return 0; @@ -210,7 +210,7 @@ static int ReorderBlobIDs2( BlobIDArray& ID, int N_blobs, int ngx, int ngy, int } } ASSERT(max_id > map1(N_blobs); int N_blobs2 = 0; for (int i=0; i& neighbors, int N_send, const std::vector& N_recv, int64_t *send_buf, std::vector& recv_buf, - std::map& remote_map ) + std::map& remote_map, + MPI_Comm comm ) { std::vector send_req(neighbors.size()); std::vector recv_req(neighbors.size()); @@ -269,8 +270,8 @@ static void updateRemoteIds( send_buf[2*i+1] = it->second.new_id; } for (size_t i=0; ifirst] = it->second.new_id; @@ -301,19 +302,18 @@ static bool updateLocalIds( const std::map& remote_map, return changed; } static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info, - int nblobs, BlobIDArray& IDs ) + int nblobs, BlobIDArray& IDs, MPI_Comm comm ) { PROFILE_START("LocalToGlobalIDs",1); const int rank = rank_info.rank[1][1][1]; - int nprocs; - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + int nprocs = comm_size(comm); const int ngx = (IDs.size(0)-nx)/2; const int ngy = (IDs.size(1)-ny)/2; const int ngz = (IDs.size(2)-nz)/2; // Get the number of blobs for each rank std::vector N_blobs(nprocs,0); PROFILE_START("LocalToGlobalIDs-Allgather",1); - MPI_Allgather(&nblobs,1,MPI_INT,getPtr(N_blobs),1,MPI_INT,MPI_COMM_WORLD); + MPI_Allgather(&nblobs,1,MPI_INT,getPtr(N_blobs),1,MPI_INT,comm); PROFILE_STOP("LocalToGlobalIDs-Allgather",1); int64_t N_blobs_tot = 0; int offset = 0; @@ -329,7 +329,7 @@ static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_ } const BlobIDArray LocalIDs = IDs; // Copy the ids and get the neighbors through the halos - fillHalo fillData(rank_info,nx,ny,nz,1,1,1,0,1,true,true,true); + fillHalo fillData(comm,rank_info,nx,ny,nz,1,1,1,0,1,true,true,true); fillData.fill(IDs); // Create a list of all neighbor ranks (excluding self) std::vector neighbors; @@ -364,8 +364,8 @@ static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_ std::vector recv_req(neighbors.size()); std::vector status(neighbors.size()); for (size_t i=0; i fillData2(rank_info,nx,ny,nz,1,1,1,0,1,true,true,true); + fillHalo fillData2(comm,rank_info,nx,ny,nz,1,1,1,0,1,true,true,true); fillData2.fill(IDs); // Reorder based on size (and compress the id space - int N_blobs_global = ReorderBlobIDs2(IDs,N_blobs_tot,ngx,ngy,ngz); + int N_blobs_global = ReorderBlobIDs2(IDs,N_blobs_tot,ngx,ngy,ngz,comm); // Finished delete [] send_buf; for (size_t i=0; i inline MPI_Datatype getMPIType() { return MPI_DOUBLE; } template -void gatherSet( std::set& set ) +void gatherSet( std::set& set, MPI_Comm comm ) { - int nprocs; - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + int nprocs = comm_size(comm); MPI_Datatype type = getMPIType(); std::vector send_data(set.begin(),set.end()); int send_count = send_data.size(); std::vector recv_count(nprocs,0), recv_disp(nprocs,0); - MPI_Allgather(&send_count,1,MPI_INT,getPtr(recv_count),1,MPI_INT,MPI_COMM_WORLD); + MPI_Allgather(&send_count,1,MPI_INT,getPtr(recv_count),1,MPI_INT,comm); for (int i=1; i recv_data(recv_disp[nprocs-1]+recv_count[nprocs-1]); MPI_Allgatherv(getPtr(send_data),send_count,type, - getPtr(recv_data),getPtr(recv_count),getPtr(recv_disp),type, - MPI_COMM_WORLD); + getPtr(recv_data),getPtr(recv_count),getPtr(recv_disp),type,comm); for (size_t i=0; i -void gatherSrcIDMap( std::map >& src_map ) +void gatherSrcIDMap( std::map >& src_map, MPI_Comm comm ) { - int nprocs; - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + int nprocs = comm_size(comm); MPI_Datatype type = getMPIType(); std::vector send_data; typename std::map >::const_iterator it; @@ -506,13 +503,12 @@ void gatherSrcIDMap( std::map >& src_map ) } int send_count = send_data.size(); std::vector recv_count(nprocs,0), recv_disp(nprocs,0); - MPI_Allgather(&send_count,1,MPI_INT,getPtr(recv_count),1,MPI_INT,MPI_COMM_WORLD); + MPI_Allgather(&send_count,1,MPI_INT,getPtr(recv_count),1,MPI_INT,comm); for (int i=1; i recv_data(recv_disp[nprocs-1]+recv_count[nprocs-1]); MPI_Allgatherv(getPtr(send_data),send_count,type, - getPtr(recv_data),getPtr(recv_count),getPtr(recv_disp),type, - MPI_COMM_WORLD); + getPtr(recv_data),getPtr(recv_count),getPtr(recv_disp),type,comm); size_t i=0; while ( i < recv_data.size() ) { int id = recv_data[i]; @@ -533,7 +529,7 @@ void addSrcDstIDs( BlobIDType src_id, std::map > addSrcDstIDs(*it,dst_map,src_map,dst,src); } } -ID_map_struct computeIDMap( const BlobIDArray& ID1, const BlobIDArray& ID2 ) +ID_map_struct computeIDMap( const BlobIDArray& ID1, const BlobIDArray& ID2, MPI_Comm comm ) { ASSERT(ID1.size()==ID2.size()); PROFILE_START("computeIDMap"); @@ -552,9 +548,9 @@ ID_map_struct computeIDMap( const BlobIDArray& ID1, const BlobIDArray& ID2 ) } } // Communicate the src/dst ids and src id map to all processors and reduce - gatherSet( src_set ); - gatherSet( dst_set ); - gatherSrcIDMap( src_map ); + gatherSet( src_set, comm ); + gatherSet( dst_set, comm ); + gatherSrcIDMap( src_map, comm ); // Compute the dst id map std::map > dst_map; // Map of the dst ids for each src id for (std::map >::const_iterator it=src_map.begin(); it!=src_map.end(); ++it) { @@ -703,8 +699,7 @@ void renumberIDs( const std::vector& new_ids, BlobIDArray& IDs ) ******************************************************************/ void writeIDMap( const ID_map_struct& map, long long int timestep, const std::string& filename ) { - int rank; - MPI_Comm_rank(MPI_COMM_WORLD,&rank); + int rank = MPI_WORLD_RANK(); if ( rank!=0 ) return; bool empty = map.created.empty() && map.destroyed.empty() && diff --git a/analysis/analysis.h b/analysis/analysis.h index e477bbde..2bc34128 100644 --- a/analysis/analysis.h +++ b/analysis/analysis.h @@ -58,7 +58,7 @@ int ComputeLocalPhaseComponent( const IntArray &PhaseID, int &VALUE, IntArray &C */ int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info, const DoubleArray& Phase, const DoubleArray& SignDist, double vF, double vS, - BlobIDArray& GlobalBlobID ); + BlobIDArray& GlobalBlobID, MPI_Comm comm ); /*! @@ -75,7 +75,7 @@ int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_inf * @return Return the number of components in the specified phase */ int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& rank_info, - const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID ); + const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, MPI_Comm comm ); /*! @@ -87,7 +87,7 @@ int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& r * @param[in] nz Number of elements in the z-direction * @param[in/out] ID The ids of the blobs */ -void ReorderBlobIDs( BlobIDArray& ID ); +void ReorderBlobIDs( BlobIDArray& ID, MPI_Comm comm ); typedef std::pair > BlobIDSplitStruct; @@ -118,7 +118,7 @@ struct ID_map_struct { * @param[in] ID1 The blob ids at the first timestep * @param[in] ID2 The blob ids at the second timestep */ -ID_map_struct computeIDMap( const BlobIDArray& ID1, const BlobIDArray& ID2 ); +ID_map_struct computeIDMap( const BlobIDArray& ID1, const BlobIDArray& ID2, MPI_Comm comm ); /*! diff --git a/common/Communication.h b/common/Communication.h index 29df1806..eefd580d 100644 --- a/common/Communication.h +++ b/common/Communication.h @@ -51,7 +51,7 @@ public: * @param[in] tag Initial tag to use for the communication (we will require tag:tag+26) * @param[in] depth Maximum depth to support */ - fillHalo( const RankInfoStruct& info, int nx, int ny, int nz, + fillHalo( MPI_Comm comm, const RankInfoStruct& info, int nx, int ny, int nz, int ngx, int ngy, int ngz, int tag, int depth, bool fill_face=true, bool fill_edge=true, bool fill_corner=true ); diff --git a/common/Communication.hpp b/common/Communication.hpp index 156aeaa4..965dfbc7 100644 --- a/common/Communication.hpp +++ b/common/Communication.hpp @@ -10,12 +10,12 @@ * Structure to store the rank info * ********************************************************/ template -fillHalo::fillHalo( const RankInfoStruct& info0, int nx0, int ny0, int nz0, +fillHalo::fillHalo( MPI_Comm comm0, const RankInfoStruct& info0, int nx0, int ny0, int nz0, int ngx0, int ngy0, int ngz0, int tag0, int depth0, bool fill_face, bool fill_edge, bool fill_corner ): info(info0), nx(nx0), ny(ny0), nz(nz0), ngx(ngx0), ngy(ngy0), ngz(ngz0), depth(depth0) { - comm = MPI_COMM_WORLD; + comm = comm0; datatype = getMPItype(); // Set the fill pattern memset(fill_pattern,0,sizeof(fill_pattern)); diff --git a/common/Domain.h b/common/Domain.h index ed138bee..b4b773f5 100755 --- a/common/Domain.h +++ b/common/Domain.h @@ -171,7 +171,7 @@ inline void SSO(DoubleArray &Distance, char *ID, Domain &Dm, int timesteps){ xdim=Dm.Nx-2; ydim=Dm.Ny-2; zdim=Dm.Nz-2; - fillHalo fillData(Dm.rank_info,xdim,ydim,zdim,1,1,1,0,1); + fillHalo fillData(Dm.Comm, Dm.rank_info,xdim,ydim,zdim,1,1,1,0,1); int count = 0; while (count < timesteps){ diff --git a/common/MPI_Helpers.cpp b/common/MPI_Helpers.cpp index b91ec422..23924f21 100644 --- a/common/MPI_Helpers.cpp +++ b/common/MPI_Helpers.cpp @@ -140,6 +140,11 @@ int MPI_Init(int*,char***) { return 0; } +int MPI_Init_thread(int*,char***, int required, int *provided ) +{ + *provided = required; + return 0; +} int MPI_Finalize() { return 0; @@ -223,6 +228,12 @@ int MPI_Sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, ERROR("Not implimented yet"); return 0; } +int MPI_Reduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, + MPI_Op op, int root, MPI_Comm comm) +{ + ERROR("Not implimented yet"); + return 0; +} int MPI_Comm_group(MPI_Comm comm, MPI_Group *group) { ERROR("Not implimented yet"); @@ -233,10 +244,23 @@ int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm) ERROR("Not implimented yet"); return 0; } +int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm) +{ + *newcomm = comm; + return 0; +} double MPI_Wtime( void ) { return 0.0; } +int MPI_Comm_free(MPI_Comm *group) +{ + return 0; +} +int MPI_Group_free(MPI_Group *group) +{ + return 0; +} #endif diff --git a/common/MPI_Helpers.h b/common/MPI_Helpers.h index cf523abc..02d67732 100644 --- a/common/MPI_Helpers.h +++ b/common/MPI_Helpers.h @@ -17,13 +17,19 @@ typedef int MPI_Status; #define MPI_COMM_WORLD 0 #define MPI_COMM_SELF 0 + #define MPI_COMM_NULL -1 #define MPI_STATUS_IGNORE NULL enum MPI_Datatype { MPI_LOGICAL, MPI_CHAR, MPI_UNSIGNED_CHAR, MPI_INT, MPI_UNSIGNED, MPI_LONG, MPI_UNSIGNED_LONG, MPI_LONG_LONG, MPI_FLOAT, MPI_DOUBLE }; enum MPI_Op { MPI_MIN, MPI_MAX, MPI_SUM }; - enum MPI_Group { }; + typedef int MPI_Group; + #define MPI_THREAD_SINGLE 0 + #define MPI_THREAD_FUNNELED 1 + #define MPI_THREAD_SERIALIZED 2 + #define MPI_THREAD_MULTIPLE 3 // Fake MPI functions int MPI_Init(int*,char***); + int MPI_Init_thread( int *argc, char ***argv, int required, int *provided ); int MPI_Finalize(); int MPI_Comm_size( MPI_Comm, int *size ); int MPI_Comm_rank( MPI_Comm, int *rank ); @@ -52,24 +58,43 @@ void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag, MPI_Comm comm, MPI_Status *status); + int MPI_Reduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, + MPI_Op op, int root, MPI_Comm comm); double MPI_Wtime( void ); int MPI_Comm_group(MPI_Comm comm, MPI_Group *group); int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm); + int MPI_Comm_free(MPI_Comm *group); + int MPI_Group_free(MPI_Group *group); + int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm); #endif +//! Get the size of the MPI_Comm +// Note: this is a thread and interrupt safe function +inline int comm_size( MPI_Comm comm ) { + int size = 1; + MPI_Comm_size( comm, &size ); + return size; +} + + +//! Get the rank of the MPI_Comm +// Note: this is a thread and interrupt safe function +inline int comm_rank( MPI_Comm comm ) { + int rank = 1; + MPI_Comm_rank( comm, &rank ); + return rank; +} + + //! Get the size of MPI_COMM_WORLD inline int MPI_WORLD_SIZE( ) { - int size = 1; - MPI_Comm_size( MPI_COMM_WORLD, &size ); - return size; + return comm_size( MPI_COMM_WORLD ); } //! Get the size of MPI_COMM_WORLD inline int MPI_WORLD_RANK( ) { - int rank = 0; - MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - return rank; + return comm_rank( MPI_COMM_WORLD ); } //! Return the appropriate MPI datatype for a class diff --git a/common/TwoPhase.cpp b/common/TwoPhase.cpp index 09efabf4..17b2cece 100644 --- a/common/TwoPhase.cpp +++ b/common/TwoPhase.cpp @@ -505,7 +505,7 @@ void TwoPhase::AssignComponentLabels() // Fewer non-wetting phase features are present //NumberComponents_NWP = ComputeGlobalPhaseComponent(Dm.Nx-2,Dm.Ny-2,Dm.Nz-2,Dm.rank_info,PhaseID,LabelNWP,Label_NWP); - NumberComponents_NWP = ComputeGlobalBlobIDs(Dm.Nx-2,Dm.Ny-2,Dm.Nz-2,Dm.rank_info,SDs,SDn,solid_isovalue,fluid_isovalue,Label_NWP); + NumberComponents_NWP = ComputeGlobalBlobIDs(Dm.Nx-2,Dm.Ny-2,Dm.Nz-2,Dm.rank_info,SDs,SDn,solid_isovalue,fluid_isovalue,Label_NWP,Dm.Comm); } void TwoPhase::ComponentAverages() @@ -761,8 +761,8 @@ void TwoPhase::ComponentAverages() RecvBuffer.resize(BLOB_AVG_COUNT,NumberComponents_NWP); /* for (int b=0; b> Ly; domain >> Lz; } - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Computational domain - MPI_Bcast(&nx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&ny,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocy,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nspheres,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Lx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Ly,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Lz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(&nx,1,MPI_INT,0,comm); + MPI_Bcast(&ny,1,MPI_INT,0,comm); + MPI_Bcast(&nz,1,MPI_INT,0,comm); + MPI_Bcast(&nprocx,1,MPI_INT,0,comm); + MPI_Bcast(&nprocy,1,MPI_INT,0,comm); + MPI_Bcast(&nprocz,1,MPI_INT,0,comm); + MPI_Bcast(&nspheres,1,MPI_INT,0,comm); + MPI_Bcast(&Lx,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Ly,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Lz,1,MPI_DOUBLE,0,comm); //................................................. - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Check that the number of processors >= the number of ranks if ( rank==0 ) { @@ -208,7 +209,7 @@ int main(int argc, char **argv) // WriteLocalSolidID(LocalRankFilename, id, N); sprintf(LocalRankFilename,"%s%s","SignDist.",LocalRankString); ReadBinaryFile(LocalRankFilename, Averages.SDs.get(), N); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); if (rank == 0) cout << "Domain set." << endl; //....................................................................... //copies of data needed to perform checkpointing from cpu @@ -220,7 +221,7 @@ int main(int argc, char **argv) if (rank==0) printf("Reading restart file! \n"); // Read in the restart file to CPU buffers ReadCheckpoint(LocalRestartFile, Den, DistEven, DistOdd, N); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); //......................................................................... // Populate the arrays needed to perform averaging if (rank==0) printf("Populate arrays \n"); @@ -289,9 +290,9 @@ int main(int argc, char **argv) } } } - Dm.CommInit(MPI_COMM_WORLD); // Initialize communications for domains + Dm.CommInit(comm); // Initialize communications for domains - MPI_Allreduce(&sum,&sum_global,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + MPI_Allreduce(&sum,&sum_global,1,MPI_DOUBLE,MPI_SUM,comm); porosity = sum_global/Dm.Volume; if (rank==0) printf("Porosity = %f \n",porosity); @@ -328,14 +329,14 @@ int main(int argc, char **argv) // BlobContainer Blobs; DoubleArray RecvBuffer(dimx); // MPI_Allreduce(&Averages.BlobAverages.get(),&Blobs.get(),1,MPI_DOUBLE,MPI_SUM,Dm.Comm); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); if (rank==0) printf("Number of components is %i \n",dimy); for (int b=0; b 0.0){ double Vn,pn,awn,ans,Jwn,Kwn,lwns,cwns,trawn,trJwn; @@ -481,7 +482,7 @@ int main(int argc, char **argv) fclose(BLOBS);*/ PROFILE_STOP("main"); PROFILE_SAVE("BlobIdentifyParallel",false); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); MPI_Finalize(); return 0; } diff --git a/tests/BlobIdentify.cpp b/tests/BlobIdentify.cpp index 9122a4bb..066ba8f0 100644 --- a/tests/BlobIdentify.cpp +++ b/tests/BlobIdentify.cpp @@ -266,7 +266,7 @@ int main(int argc, char **argv) printf("Execute blob identification algorithm... \n"); IntArray GlobalBlobID; int nblobs = ComputeLocalBlobIDs( Phase, SignDist, vF, vS, GlobalBlobID ); - ReorderBlobIDs(GlobalBlobID); // This will reorder by blob size + ReorderBlobIDs(GlobalBlobID,MPI_COMM_WORLD); // This will reorder by blob size printf("Identified %i blobs. Writing per-process output files. \n",nblobs); int sizeLoc = nx*ny*nz; diff --git a/tests/BlobIdentifyParallel.cpp b/tests/BlobIdentifyParallel.cpp index 7ceae67c..714d2c6c 100644 --- a/tests/BlobIdentifyParallel.cpp +++ b/tests/BlobIdentifyParallel.cpp @@ -49,8 +49,9 @@ int main(int argc, char **argv) // Initialize MPI int rank, nprocs; MPI_Init(&argc,&argv); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm,&rank); + MPI_Comm_size(comm,&nprocs); #ifdef PROFILE PROFILE_ENABLE(0); PROFILE_DISABLE_TRACE(); @@ -99,7 +100,7 @@ int main(int argc, char **argv) readRankData( rank, nx+2, ny+2, nz+2, Phase, SignDist ); // Communication the halos - fillHalo fillData(rank_info,nx,ny,nz,1,1,1,0,1); + fillHalo fillData(comm,rank_info,nx,ny,nz,1,1,1,0,1); fillData.fill(Phase); fillData.fill(SignDist); @@ -109,7 +110,7 @@ int main(int argc, char **argv) double vS=0.0; IntArray GlobalBlobID; int nblobs = ComputeGlobalBlobIDs(nx,ny,nz,rank_info, - Phase,SignDist,vF,vS,GlobalBlobID); + Phase,SignDist,vF,vS,GlobalBlobID,comm); if ( rank==0 ) { printf("Identified %i blobs\n",nblobs); } // Write the local blob ids @@ -128,7 +129,7 @@ int main(int argc, char **argv) PROFILE_STOP("main"); PROFILE_SAVE("BlobIdentifyParallel",false); #endif - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); MPI_Finalize(); return 0; } diff --git a/tests/ComponentLabel.cpp b/tests/ComponentLabel.cpp index c6365072..eef2efba 100644 --- a/tests/ComponentLabel.cpp +++ b/tests/ComponentLabel.cpp @@ -182,8 +182,9 @@ int main(int argc, char **argv) // Initialize MPI int rank,nprocs; MPI_Init(&argc,&argv); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm,&rank); + MPI_Comm_size(comm,&nprocs); printf("----------------------------------------------------------\n"); printf("COMPUTING TCAT ANALYSIS FOR NON-WETTING PHASE FEATURES \n"); @@ -391,7 +392,7 @@ int main(int argc, char **argv) } porosity /= (Nx*Ny*Nz*1.0); printf("Media porosity is %f \n",porosity); - Dm.CommInit(MPI_COMM_WORLD); + Dm.CommInit(comm); /* **************************************************************** IDENTIFY ALL COMPONENTS FOR BOTH PHASES @@ -430,7 +431,7 @@ int main(int argc, char **argv) Averages.PrintComponents(timestep); // Create the MeshDataStruct - fillHalo fillData(Dm.rank_info,Nx-2,Ny-2,Nz-2,1,1,1,0,1); + fillHalo fillData(Dm.Comm,Dm.rank_info,Nx-2,Ny-2,Nz-2,1,1,1,0,1); std::vector meshData(1); meshData[0].meshName = "domain"; meshData[0].mesh = std::shared_ptr( new IO::DomainMesh(Dm.rank_info,Nx-2,Ny-2,Nz-2,Lx,Ly,Lz) ); @@ -475,7 +476,7 @@ int main(int argc, char **argv) fillData.copy(Averages.Label_WP,LabelWPVar->data); fillData.copy(Averages.Label_NWP,LabelNWPVar->data); fillData.copy(Averages.PhaseID,PhaseIDVar->data); - IO::writeData( 0, meshData, 2 ); + IO::writeData( 0, meshData, 2, comm ); /* FILE *NWP_FILE; NWP_FILE = fopen("NWP.dat","wb"); @@ -493,7 +494,7 @@ int main(int argc, char **argv) fclose(DISTANCE); */ // **************************************************** - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); MPI_Finalize(); // **************************************************** } diff --git a/tests/TestBlobAnalyze.cpp b/tests/TestBlobAnalyze.cpp index bc554763..1f483d90 100644 --- a/tests/TestBlobAnalyze.cpp +++ b/tests/TestBlobAnalyze.cpp @@ -128,8 +128,9 @@ int main(int argc, char **argv) // Initialize MPI int rank, nprocs; MPI_Init(&argc,&argv); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm,&rank); + MPI_Comm_size(comm,&nprocs); { // Limit scope so variables that contain communicators will free before MPI_Finialize if ( rank==0 ) { @@ -159,20 +160,20 @@ int main(int argc, char **argv) domain >> Ly; domain >> Lz; } - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Computational domain - MPI_Bcast(&nx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&ny,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocy,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nspheres,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Lx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Ly,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Lz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(&nx,1,MPI_INT,0,comm); + MPI_Bcast(&ny,1,MPI_INT,0,comm); + MPI_Bcast(&nz,1,MPI_INT,0,comm); + MPI_Bcast(&nprocx,1,MPI_INT,0,comm); + MPI_Bcast(&nprocy,1,MPI_INT,0,comm); + MPI_Bcast(&nprocz,1,MPI_INT,0,comm); + MPI_Bcast(&nspheres,1,MPI_INT,0,comm); + MPI_Bcast(&Lx,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Ly,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Lz,1,MPI_DOUBLE,0,comm); //................................................. - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Check that the number of processors >= the number of ranks if ( rank==0 ) { @@ -204,7 +205,7 @@ int main(int argc, char **argv) } } //....................................................................... - Dm.CommInit(MPI_COMM_WORLD); // Initialize communications for domains + Dm.CommInit(comm); // Initialize communications for domains //....................................................................... // Read in sphere pack (initialize the non-wetting phase as inside of spheres) if (rank==1) printf("nspheres =%i \n",nspheres); @@ -216,14 +217,14 @@ int main(int argc, char **argv) //....................................................................... if (rank == 0) printf("Reading the sphere packing \n"); if (rank == 0) ReadSpherePacking(nspheres,cx,cy,cz,rad); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Broadcast the sphere packing to all processes - MPI_Bcast(cx,nspheres,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(cy,nspheres,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(cz,nspheres,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(rad,nspheres,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(cx,nspheres,MPI_DOUBLE,0,comm); + MPI_Bcast(cy,nspheres,MPI_DOUBLE,0,comm); + MPI_Bcast(cz,nspheres,MPI_DOUBLE,0,comm); + MPI_Bcast(rad,nspheres,MPI_DOUBLE,0,comm); //........................................................................... - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); //....................................................................... SignedDistance(Averages.Phase.get(),nspheres,cx,cy,cz,rad,Lx,Ly,Lz,Nx,Ny,Nz, Dm.iproc,Dm.jproc,Dm.kproc,Dm.nprocx,Dm.nprocy,Dm.nprocz); @@ -289,7 +290,7 @@ int main(int argc, char **argv) delete [] rad; } // Limit scope so variables that contain communicators will free before MPI_Finialize - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); MPI_Finalize(); return 0; } diff --git a/tests/TestBlobIdentify.cpp b/tests/TestBlobIdentify.cpp index 91e75ce2..f79d1c81 100644 --- a/tests/TestBlobIdentify.cpp +++ b/tests/TestBlobIdentify.cpp @@ -23,10 +23,10 @@ inline double rand2() // Test if all ranks agree on a value -bool allAgree( int x ) { +bool allAgree( int x, MPI_Comm comm ) { int min, max; - MPI_Allreduce(&x,&min,1,MPI_INT,MPI_MIN,MPI_COMM_WORLD); - MPI_Allreduce(&x,&max,1,MPI_INT,MPI_MAX,MPI_COMM_WORLD); + MPI_Allreduce(&x,&min,1,MPI_INT,MPI_MIN,comm); + MPI_Allreduce(&x,&max,1,MPI_INT,MPI_MAX,comm); return min==max; } @@ -63,10 +63,9 @@ struct bubble_struct { // Create a random set of bubles -std::vector create_bubbles( int N_bubbles, double Lx, double Ly, double Lz ) +std::vector create_bubbles( int N_bubbles, double Lx, double Ly, double Lz, MPI_Comm comm ) { - int rank; - MPI_Comm_rank(MPI_COMM_WORLD,&rank); + int rank = comm_rank(comm); std::vector bubbles(N_bubbles); if ( rank == 0 ) { double R0 = 0.2*Lx*Ly*Lz/pow((double)N_bubbles,0.333); @@ -81,7 +80,7 @@ std::vector create_bubbles( int N_bubbles, double Lx, double Ly, } } size_t N_bytes = N_bubbles*sizeof(bubble_struct); - MPI_Bcast(&bubbles[0],N_bytes,MPI_CHAR,0,MPI_COMM_WORLD); + MPI_Bcast(&bubbles[0],N_bytes,MPI_CHAR,0,comm); return bubbles; } @@ -114,7 +113,7 @@ void fillBubbleData( const std::vector& bubbles, DoubleArray& Pha // Shift all of the data by the given number of cells -void shift_data( DoubleArray& data, int sx, int sy, int sz, const RankInfoStruct& rank_info ) +void shift_data( DoubleArray& data, int sx, int sy, int sz, const RankInfoStruct& rank_info, MPI_Comm comm ) { int nx = data.size(0)-2; int ny = data.size(1)-2; @@ -123,8 +122,8 @@ void shift_data( DoubleArray& data, int sx, int sy, int sz, const RankInfoStruct int ngy = ny+2*abs(sy); int ngz = nz+2*abs(sz); Array tmp1(nx,ny,nz), tmp2(ngx,ngy,ngz), tmp3(ngx,ngy,ngz); - fillHalo fillData1(rank_info,nx,ny,nz,1,1,1,0,1); - fillHalo fillData2(rank_info,nx,ny,nz,abs(sx),abs(sy),abs(sz),0,1); + fillHalo fillData1(comm,rank_info,nx,ny,nz,1,1,1,0,1); + fillHalo fillData2(comm,rank_info,nx,ny,nz,abs(sx),abs(sy),abs(sz),0,1); fillData1.copy(data,tmp1); fillData2.copy(tmp1,tmp2); fillData2.fill(tmp2); @@ -146,8 +145,9 @@ int main(int argc, char **argv) // Initialize MPI int rank, nprocs; MPI_Init(&argc,&argv); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm,&rank); + MPI_Comm_size(comm,&nprocs); PROFILE_ENABLE(1); PROFILE_DISABLE_TRACE(); PROFILE_SYNCHRONIZE(); @@ -174,11 +174,11 @@ int main(int argc, char **argv) // Create the dummy info DoubleArray Phase(nx+2,ny+2,nz+2); DoubleArray SignDist(nx+2,ny+2,nz+2); - std::vector bubbles = create_bubbles(20,Lx,Ly,Lz); + std::vector bubbles = create_bubbles(20,Lx,Ly,Lz,comm); fillBubbleData( bubbles, Phase, SignDist, Lx, Ly, Lz, rank_info ); // Communication the halos - fillHalo fillData(rank_info,nx,ny,nz,1,1,1,0,1); + fillHalo fillData(comm,rank_info,nx,ny,nz,1,1,1,0,1); fillData.fill(Phase); fillData.fill(SignDist); @@ -188,7 +188,7 @@ int main(int argc, char **argv) double vS=0.0; IntArray GlobalBlobID; int nblobs = ComputeGlobalBlobIDs(nx,ny,nz,rank_info, - Phase,SignDist,vF,vS,GlobalBlobID); + Phase,SignDist,vF,vS,GlobalBlobID,comm); if ( rank==0 ) { printf("Identified %i blobs\n",nblobs); } // Create the MeshDataStruct @@ -218,7 +218,7 @@ int main(int argc, char **argv) fillData.copy(Phase,PhaseVar->data); fillData.copy(SignDist,SignDistVar->data); fillData.copy(GlobalBlobID,BlobIDVar->data); - IO::writeData( 0, meshData, 2 ); + IO::writeData( 0, meshData, 2, comm ); writeIDMap(ID_map_struct(nblobs),0,"lbpm_id_map.txt"); int save_it = 1; @@ -236,18 +236,18 @@ int main(int argc, char **argv) int id_max = nblobs-1; for (int i=0; i<20; i++, save_it++) { // Shift all the data - shift_data( Phase, 3, -2, 1, rank_info ); - shift_data( SignDist, 3, -2, 1, rank_info ); + shift_data( Phase, 3, -2, 1, rank_info, comm ); + shift_data( SignDist, 3, -2, 1, rank_info, comm ); // Find blob domains IntArray GlobalBlobID2; int nblobs2 = ComputeGlobalBlobIDs(nx,ny,nz,rank_info, - Phase,SignDist,vF,vS,GlobalBlobID2); + Phase,SignDist,vF,vS,GlobalBlobID2,comm); if ( nblobs2 != nblobs ) { printf("Error, number of blobs changed under constant velocity (%i,%i)\n",nblobs,nblobs2); N_errors++; } // Identify the blob maps and renumber the ids - ID_map_struct map = computeIDMap(GlobalBlobID,GlobalBlobID2); + ID_map_struct map = computeIDMap(GlobalBlobID,GlobalBlobID2,comm); std::swap(GlobalBlobID,GlobalBlobID2); std::vector new_list; getNewIDs(map,id_max,new_list); @@ -270,7 +270,7 @@ int main(int argc, char **argv) fillData.copy(Phase,PhaseVar->data); fillData.copy(SignDist,SignDistVar->data); fillData.copy(GlobalBlobID,BlobIDVar->data); - IO::writeData( save_it, meshData, 2 ); + IO::writeData( save_it, meshData, 2, comm ); } PROFILE_STOP("constant velocity test"); @@ -286,15 +286,15 @@ int main(int argc, char **argv) velocity[i].z = bubbles[i].radius*(2*rand2()-1); } } - MPI_Bcast(&velocity[0],bubbles.size()*sizeof(Point),MPI_CHAR,0,MPI_COMM_WORLD); + MPI_Bcast(&velocity[0],bubbles.size()*sizeof(Point),MPI_CHAR,0,comm); fillBubbleData( bubbles, Phase, SignDist, Lx, Ly, Lz, rank_info ); fillData.fill(Phase); fillData.fill(SignDist); - ComputeGlobalBlobIDs(nx,ny,nz,rank_info,Phase,SignDist,vF,vS,GlobalBlobID); + ComputeGlobalBlobIDs(nx,ny,nz,rank_info,Phase,SignDist,vF,vS,GlobalBlobID,comm); fillData.copy(Phase,PhaseVar->data); fillData.copy(SignDist,SignDistVar->data); fillData.copy(GlobalBlobID,BlobIDVar->data); - IO::writeData( save_it, meshData, 2 ); + IO::writeData( save_it, meshData, 2, comm ); save_it++; id_max = nblobs-1; for (int i=0; i<25; i++, save_it++) { @@ -309,15 +309,15 @@ int main(int argc, char **argv) fillData.fill(SignDist); // Compute the ids IntArray GlobalBlobID2; - int nblobs2 = ComputeGlobalBlobIDs(nx,ny,nz,rank_info,Phase,SignDist,vF,vS,GlobalBlobID2); + int nblobs2 = ComputeGlobalBlobIDs(nx,ny,nz,rank_info,Phase,SignDist,vF,vS,GlobalBlobID2,comm); // Identify the blob maps and renumber the ids - ID_map_struct map = computeIDMap(GlobalBlobID,GlobalBlobID2); + ID_map_struct map = computeIDMap(GlobalBlobID,GlobalBlobID2,comm); std::swap(GlobalBlobID,GlobalBlobID2); std::vector new_list; getNewIDs(map,id_max,new_list); renumberIDs(new_list,GlobalBlobID); writeIDMap(map,save_it,"lbpm_id_map.txt"); - if ( !allAgree(id_max) ) { + if ( !allAgree(id_max,comm) ) { if ( rank==0 ) printf("All ranks do not agree on id_max\n"); N_errors++; @@ -370,8 +370,8 @@ int main(int argc, char **argv) printf("\n"); } } - MPI_Bcast(&N1,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&N2,1,MPI_INT,0,MPI_COMM_WORLD); + MPI_Bcast(&N1,1,MPI_INT,0,comm); + MPI_Bcast(&N2,1,MPI_INT,0,comm); if ( N1!=nblobs || N2!=nblobs2 ) { if ( rank==0 ) printf("Error, blob ids do not map in moving bubble test (%i,%i,%i,%i)\n", @@ -383,7 +383,7 @@ int main(int argc, char **argv) fillData.copy(Phase,PhaseVar->data); fillData.copy(SignDist,SignDistVar->data); fillData.copy(GlobalBlobID,BlobIDVar->data); - IO::writeData( save_it, meshData, 2 ); + IO::writeData( save_it, meshData, 2, comm ); } PROFILE_STOP("moving bubble test"); @@ -391,7 +391,7 @@ int main(int argc, char **argv) // Finished PROFILE_STOP("main"); PROFILE_SAVE("TestBlobIdentify",false); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); MPI_Finalize(); return N_errors; } diff --git a/tests/TestBubble.cpp b/tests/TestBubble.cpp index 65d5c0b6..fc0bced3 100644 --- a/tests/TestBubble.cpp +++ b/tests/TestBubble.cpp @@ -98,8 +98,9 @@ int main(int argc, char **argv) // Initialize MPI int rank,nprocs; MPI_Init(&argc,&argv); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm,&rank); + MPI_Comm_size(comm,&nprocs); Utilities::setAbortBehavior(true,true,false); PROFILE_ENABLE(0); // parallel domain size (# of sub-domains) @@ -204,38 +205,38 @@ int main(int argc, char **argv) } // ************************************************************** // Broadcast simulation parameters from rank 0 to all other procs - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); //................................................. - MPI_Bcast(&tau,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&alpha,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&beta,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&das,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&dbs,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&phi_s,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&wp_saturation,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&pBC,1,MPI_LOGICAL,0,MPI_COMM_WORLD); - MPI_Bcast(&Restart,1,MPI_LOGICAL,0,MPI_COMM_WORLD); - MPI_Bcast(&din,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&dout,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Fx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Fy,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Fz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(×tepMax,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&interval,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&tol,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(&tau,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&alpha,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&beta,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&das,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&dbs,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&phi_s,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&wp_saturation,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&pBC,1,MPI_LOGICAL,0,comm); + MPI_Bcast(&Restart,1,MPI_LOGICAL,0,comm); + MPI_Bcast(&din,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&dout,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Fx,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Fy,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Fz,1,MPI_DOUBLE,0,comm); + MPI_Bcast(×tepMax,1,MPI_INT,0,comm); + MPI_Bcast(&interval,1,MPI_INT,0,comm); + MPI_Bcast(&tol,1,MPI_DOUBLE,0,comm); // Computational domain - MPI_Bcast(&Nz,1,MPI_INT,0,MPI_COMM_WORLD); -// MPI_Bcast(&nBlocks,1,MPI_INT,0,MPI_COMM_WORLD); -// MPI_Bcast(&nthreads,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocy,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nspheres,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Lx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Ly,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Lz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(&Nz,1,MPI_INT,0,comm); +// MPI_Bcast(&nBlocks,1,MPI_INT,0,comm); +// MPI_Bcast(&nthreads,1,MPI_INT,0,comm); + MPI_Bcast(&nprocx,1,MPI_INT,0,comm); + MPI_Bcast(&nprocy,1,MPI_INT,0,comm); + MPI_Bcast(&nprocz,1,MPI_INT,0,comm); + MPI_Bcast(&nspheres,1,MPI_INT,0,comm); + MPI_Bcast(&Lx,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Ly,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Lz,1,MPI_DOUBLE,0,comm); //................................................. - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // ************************************************************** // ************************************************************** double Ps = -(das-dbs)/(das+dbs); @@ -276,7 +277,7 @@ int main(int argc, char **argv) rank_xy, rank_XY, rank_xY, rank_Xy, rank_xz, rank_XZ, rank_xZ, rank_Xz, rank_yz, rank_YZ, rank_yZ, rank_Yz ); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); Nz += 2; Nx = Ny = Nz; // Cubic domain @@ -449,7 +450,7 @@ int main(int argc, char **argv) } } } - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); if (rank==0) printf ("SendLists are ready on host\n"); //...................................................................................... // Use MPI to fill in the recvCounts form the associated processes @@ -460,88 +461,88 @@ int main(int argc, char **argv) //********************************************************************************** // Fill in the recieve counts using MPI sendtag = recvtag = 3; - MPI_Isend(&sendCount_x, 1,MPI_INT,rank_x,sendtag,MPI_COMM_WORLD,&req1[0]); - MPI_Irecv(&recvCount_X, 1,MPI_INT,rank_X,recvtag,MPI_COMM_WORLD,&req2[0]); - MPI_Isend(&sendCount_X, 1,MPI_INT,rank_X,sendtag,MPI_COMM_WORLD,&req1[1]); - MPI_Irecv(&recvCount_x, 1,MPI_INT,rank_x,recvtag,MPI_COMM_WORLD,&req2[1]); - MPI_Isend(&sendCount_y, 1,MPI_INT,rank_y,sendtag,MPI_COMM_WORLD,&req1[2]); - MPI_Irecv(&recvCount_Y, 1,MPI_INT,rank_Y,recvtag,MPI_COMM_WORLD,&req2[2]); - MPI_Isend(&sendCount_Y, 1,MPI_INT,rank_Y,sendtag,MPI_COMM_WORLD,&req1[3]); - MPI_Irecv(&recvCount_y, 1,MPI_INT,rank_y,recvtag,MPI_COMM_WORLD,&req2[3]); - MPI_Isend(&sendCount_z, 1,MPI_INT,rank_z,sendtag,MPI_COMM_WORLD,&req1[4]); - MPI_Irecv(&recvCount_Z, 1,MPI_INT,rank_Z,recvtag,MPI_COMM_WORLD,&req2[4]); - MPI_Isend(&sendCount_Z, 1,MPI_INT,rank_Z,sendtag,MPI_COMM_WORLD,&req1[5]); - MPI_Irecv(&recvCount_z, 1,MPI_INT,rank_z,recvtag,MPI_COMM_WORLD,&req2[5]); + MPI_Isend(&sendCount_x, 1,MPI_INT,rank_x,sendtag,comm,&req1[0]); + MPI_Irecv(&recvCount_X, 1,MPI_INT,rank_X,recvtag,comm,&req2[0]); + MPI_Isend(&sendCount_X, 1,MPI_INT,rank_X,sendtag,comm,&req1[1]); + MPI_Irecv(&recvCount_x, 1,MPI_INT,rank_x,recvtag,comm,&req2[1]); + MPI_Isend(&sendCount_y, 1,MPI_INT,rank_y,sendtag,comm,&req1[2]); + MPI_Irecv(&recvCount_Y, 1,MPI_INT,rank_Y,recvtag,comm,&req2[2]); + MPI_Isend(&sendCount_Y, 1,MPI_INT,rank_Y,sendtag,comm,&req1[3]); + MPI_Irecv(&recvCount_y, 1,MPI_INT,rank_y,recvtag,comm,&req2[3]); + MPI_Isend(&sendCount_z, 1,MPI_INT,rank_z,sendtag,comm,&req1[4]); + MPI_Irecv(&recvCount_Z, 1,MPI_INT,rank_Z,recvtag,comm,&req2[4]); + MPI_Isend(&sendCount_Z, 1,MPI_INT,rank_Z,sendtag,comm,&req1[5]); + MPI_Irecv(&recvCount_z, 1,MPI_INT,rank_z,recvtag,comm,&req2[5]); - MPI_Isend(&sendCount_xy, 1,MPI_INT,rank_xy,sendtag,MPI_COMM_WORLD,&req1[6]); - MPI_Irecv(&recvCount_XY, 1,MPI_INT,rank_XY,recvtag,MPI_COMM_WORLD,&req2[6]); - MPI_Isend(&sendCount_XY, 1,MPI_INT,rank_XY,sendtag,MPI_COMM_WORLD,&req1[7]); - MPI_Irecv(&recvCount_xy, 1,MPI_INT,rank_xy,recvtag,MPI_COMM_WORLD,&req2[7]); - MPI_Isend(&sendCount_Xy, 1,MPI_INT,rank_Xy,sendtag,MPI_COMM_WORLD,&req1[8]); - MPI_Irecv(&recvCount_xY, 1,MPI_INT,rank_xY,recvtag,MPI_COMM_WORLD,&req2[8]); - MPI_Isend(&sendCount_xY, 1,MPI_INT,rank_xY,sendtag,MPI_COMM_WORLD,&req1[9]); - MPI_Irecv(&recvCount_Xy, 1,MPI_INT,rank_Xy,recvtag,MPI_COMM_WORLD,&req2[9]); + MPI_Isend(&sendCount_xy, 1,MPI_INT,rank_xy,sendtag,comm,&req1[6]); + MPI_Irecv(&recvCount_XY, 1,MPI_INT,rank_XY,recvtag,comm,&req2[6]); + MPI_Isend(&sendCount_XY, 1,MPI_INT,rank_XY,sendtag,comm,&req1[7]); + MPI_Irecv(&recvCount_xy, 1,MPI_INT,rank_xy,recvtag,comm,&req2[7]); + MPI_Isend(&sendCount_Xy, 1,MPI_INT,rank_Xy,sendtag,comm,&req1[8]); + MPI_Irecv(&recvCount_xY, 1,MPI_INT,rank_xY,recvtag,comm,&req2[8]); + MPI_Isend(&sendCount_xY, 1,MPI_INT,rank_xY,sendtag,comm,&req1[9]); + MPI_Irecv(&recvCount_Xy, 1,MPI_INT,rank_Xy,recvtag,comm,&req2[9]); - MPI_Isend(&sendCount_xz, 1,MPI_INT,rank_xz,sendtag,MPI_COMM_WORLD,&req1[10]); - MPI_Irecv(&recvCount_XZ, 1,MPI_INT,rank_XZ,recvtag,MPI_COMM_WORLD,&req2[10]); - MPI_Isend(&sendCount_XZ, 1,MPI_INT,rank_XZ,sendtag,MPI_COMM_WORLD,&req1[11]); - MPI_Irecv(&recvCount_xz, 1,MPI_INT,rank_xz,recvtag,MPI_COMM_WORLD,&req2[11]); - MPI_Isend(&sendCount_Xz, 1,MPI_INT,rank_Xz,sendtag,MPI_COMM_WORLD,&req1[12]); - MPI_Irecv(&recvCount_xZ, 1,MPI_INT,rank_xZ,recvtag,MPI_COMM_WORLD,&req2[12]); - MPI_Isend(&sendCount_xZ, 1,MPI_INT,rank_xZ,sendtag,MPI_COMM_WORLD,&req1[13]); - MPI_Irecv(&recvCount_Xz, 1,MPI_INT,rank_Xz,recvtag,MPI_COMM_WORLD,&req2[13]); + MPI_Isend(&sendCount_xz, 1,MPI_INT,rank_xz,sendtag,comm,&req1[10]); + MPI_Irecv(&recvCount_XZ, 1,MPI_INT,rank_XZ,recvtag,comm,&req2[10]); + MPI_Isend(&sendCount_XZ, 1,MPI_INT,rank_XZ,sendtag,comm,&req1[11]); + MPI_Irecv(&recvCount_xz, 1,MPI_INT,rank_xz,recvtag,comm,&req2[11]); + MPI_Isend(&sendCount_Xz, 1,MPI_INT,rank_Xz,sendtag,comm,&req1[12]); + MPI_Irecv(&recvCount_xZ, 1,MPI_INT,rank_xZ,recvtag,comm,&req2[12]); + MPI_Isend(&sendCount_xZ, 1,MPI_INT,rank_xZ,sendtag,comm,&req1[13]); + MPI_Irecv(&recvCount_Xz, 1,MPI_INT,rank_Xz,recvtag,comm,&req2[13]); - MPI_Isend(&sendCount_yz, 1,MPI_INT,rank_yz,sendtag,MPI_COMM_WORLD,&req1[14]); - MPI_Irecv(&recvCount_YZ, 1,MPI_INT,rank_YZ,recvtag,MPI_COMM_WORLD,&req2[14]); - MPI_Isend(&sendCount_YZ, 1,MPI_INT,rank_YZ,sendtag,MPI_COMM_WORLD,&req1[15]); - MPI_Irecv(&recvCount_yz, 1,MPI_INT,rank_yz,recvtag,MPI_COMM_WORLD,&req2[15]); - MPI_Isend(&sendCount_Yz, 1,MPI_INT,rank_Yz,sendtag,MPI_COMM_WORLD,&req1[16]); - MPI_Irecv(&recvCount_yZ, 1,MPI_INT,rank_yZ,recvtag,MPI_COMM_WORLD,&req2[16]); - MPI_Isend(&sendCount_yZ, 1,MPI_INT,rank_yZ,sendtag,MPI_COMM_WORLD,&req1[17]); - MPI_Irecv(&recvCount_Yz, 1,MPI_INT,rank_Yz,recvtag,MPI_COMM_WORLD,&req2[17]); + MPI_Isend(&sendCount_yz, 1,MPI_INT,rank_yz,sendtag,comm,&req1[14]); + MPI_Irecv(&recvCount_YZ, 1,MPI_INT,rank_YZ,recvtag,comm,&req2[14]); + MPI_Isend(&sendCount_YZ, 1,MPI_INT,rank_YZ,sendtag,comm,&req1[15]); + MPI_Irecv(&recvCount_yz, 1,MPI_INT,rank_yz,recvtag,comm,&req2[15]); + MPI_Isend(&sendCount_Yz, 1,MPI_INT,rank_Yz,sendtag,comm,&req1[16]); + MPI_Irecv(&recvCount_yZ, 1,MPI_INT,rank_yZ,recvtag,comm,&req2[16]); + MPI_Isend(&sendCount_yZ, 1,MPI_INT,rank_yZ,sendtag,comm,&req1[17]); + MPI_Irecv(&recvCount_Yz, 1,MPI_INT,rank_Yz,recvtag,comm,&req2[17]); MPI_Waitall(18,req1,stat1); MPI_Waitall(18,req2,stat2); - MPI_Barrier(MPI_COMM_WORLD); -/* MPI_Send(&sendCount_x,1,MPI_INT,rank_X,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_X,1,MPI_INT,rank_x,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_X,1,MPI_INT,rank_x,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_x,1,MPI_INT,rank_X,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_y,1,MPI_INT,rank_Y,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_Y,1,MPI_INT,rank_y,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_Y,1,MPI_INT,rank_y,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_y,1,MPI_INT,rank_Y,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_z,1,MPI_INT,rank_Z,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_Z,1,MPI_INT,rank_z,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_Z,1,MPI_INT,rank_z,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_z,1,MPI_INT,rank_Z,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + MPI_Barrier(comm); +/* MPI_Send(&sendCount_x,1,MPI_INT,rank_X,sendtag,comm); + MPI_Recv(&recvCount_X,1,MPI_INT,rank_x,recvtag,comm,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_X,1,MPI_INT,rank_x,sendtag,comm); + MPI_Recv(&recvCount_x,1,MPI_INT,rank_X,recvtag,comm,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_y,1,MPI_INT,rank_Y,sendtag,comm); + MPI_Recv(&recvCount_Y,1,MPI_INT,rank_y,recvtag,comm,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_Y,1,MPI_INT,rank_y,sendtag,comm); + MPI_Recv(&recvCount_y,1,MPI_INT,rank_Y,recvtag,comm,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_z,1,MPI_INT,rank_Z,sendtag,comm); + MPI_Recv(&recvCount_Z,1,MPI_INT,rank_z,recvtag,comm,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_Z,1,MPI_INT,rank_z,sendtag,comm); + MPI_Recv(&recvCount_z,1,MPI_INT,rank_Z,recvtag,comm,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_xy,1,MPI_INT,rank_XY,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_XY,1,MPI_INT,rank_xy,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_XY,1,MPI_INT,rank_xy,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_xy,1,MPI_INT,rank_XY,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_Xy,1,MPI_INT,rank_xY,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_xY,1,MPI_INT,rank_Xy,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_xY,1,MPI_INT,rank_Xy,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_Xy,1,MPI_INT,rank_xY,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_xy,1,MPI_INT,rank_XY,sendtag,comm); + MPI_Recv(&recvCount_XY,1,MPI_INT,rank_xy,recvtag,comm,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_XY,1,MPI_INT,rank_xy,sendtag,comm); + MPI_Recv(&recvCount_xy,1,MPI_INT,rank_XY,recvtag,comm,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_Xy,1,MPI_INT,rank_xY,sendtag,comm); + MPI_Recv(&recvCount_xY,1,MPI_INT,rank_Xy,recvtag,comm,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_xY,1,MPI_INT,rank_Xy,sendtag,comm); + MPI_Recv(&recvCount_Xy,1,MPI_INT,rank_xY,recvtag,comm,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_xz,1,MPI_INT,rank_XZ,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_XZ,1,MPI_INT,rank_xz,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_XZ,1,MPI_INT,rank_xz,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_xz,1,MPI_INT,rank_XZ,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_Xz,1,MPI_INT,rank_xZ,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_xZ,1,MPI_INT,rank_Xz,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_xZ,1,MPI_INT,rank_Xz,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_Xz,1,MPI_INT,rank_xZ,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_xz,1,MPI_INT,rank_XZ,sendtag,comm); + MPI_Recv(&recvCount_XZ,1,MPI_INT,rank_xz,recvtag,comm,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_XZ,1,MPI_INT,rank_xz,sendtag,comm); + MPI_Recv(&recvCount_xz,1,MPI_INT,rank_XZ,recvtag,comm,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_Xz,1,MPI_INT,rank_xZ,sendtag,comm); + MPI_Recv(&recvCount_xZ,1,MPI_INT,rank_Xz,recvtag,comm,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_xZ,1,MPI_INT,rank_Xz,sendtag,comm); + MPI_Recv(&recvCount_Xz,1,MPI_INT,rank_xZ,recvtag,comm,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_yz,1,MPI_INT,rank_YZ,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_YZ,1,MPI_INT,rank_yz,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_YZ,1,MPI_INT,rank_yz,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_yz,1,MPI_INT,rank_YZ,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_Yz,1,MPI_INT,rank_yZ,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_yZ,1,MPI_INT,rank_Yz,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); - MPI_Send(&sendCount_yZ,1,MPI_INT,rank_Yz,sendtag,MPI_COMM_WORLD); - MPI_Recv(&recvCount_Yz,1,MPI_INT,rank_yZ,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Send(&sendCount_yz,1,MPI_INT,rank_YZ,sendtag,comm); + MPI_Recv(&recvCount_YZ,1,MPI_INT,rank_yz,recvtag,comm,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_YZ,1,MPI_INT,rank_yz,sendtag,comm); + MPI_Recv(&recvCount_yz,1,MPI_INT,rank_YZ,recvtag,comm,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_Yz,1,MPI_INT,rank_yZ,sendtag,comm); + MPI_Recv(&recvCount_yZ,1,MPI_INT,rank_Yz,recvtag,comm,MPI_STATUS_IGNORE); + MPI_Send(&sendCount_yZ,1,MPI_INT,rank_Yz,sendtag,comm); + MPI_Recv(&recvCount_Yz,1,MPI_INT,rank_yZ,recvtag,comm,MPI_STATUS_IGNORE); + MPI_Barrier(comm); */ //********************************************************************************** //...................................................................................... int *recvList_x, *recvList_y, *recvList_z, *recvList_X, *recvList_Y, *recvList_Z; @@ -572,48 +573,48 @@ int main(int argc, char **argv) // Use MPI to fill in the appropriate values for recvList // Fill in the recieve lists using MPI sendtag = recvtag = 4; - MPI_Isend(sendList_x, sendCount_x,MPI_INT,rank_x,sendtag,MPI_COMM_WORLD,&req1[0]); - MPI_Irecv(recvList_X, recvCount_X,MPI_INT,rank_X,recvtag,MPI_COMM_WORLD,&req2[0]); - MPI_Isend(sendList_X, sendCount_X,MPI_INT,rank_X,sendtag,MPI_COMM_WORLD,&req1[1]); - MPI_Irecv(recvList_x, recvCount_x,MPI_INT,rank_x,recvtag,MPI_COMM_WORLD,&req2[1]); - MPI_Isend(sendList_y, sendCount_y,MPI_INT,rank_y,sendtag,MPI_COMM_WORLD,&req1[2]); - MPI_Irecv(recvList_Y, recvCount_Y,MPI_INT,rank_Y,recvtag,MPI_COMM_WORLD,&req2[2]); - MPI_Isend(sendList_Y, sendCount_Y,MPI_INT,rank_Y,sendtag,MPI_COMM_WORLD,&req1[3]); - MPI_Irecv(recvList_y, recvCount_y,MPI_INT,rank_y,recvtag,MPI_COMM_WORLD,&req2[3]); - MPI_Isend(sendList_z, sendCount_z,MPI_INT,rank_z,sendtag,MPI_COMM_WORLD,&req1[4]); - MPI_Irecv(recvList_Z, recvCount_Z,MPI_INT,rank_Z,recvtag,MPI_COMM_WORLD,&req2[4]); - MPI_Isend(sendList_Z, sendCount_Z,MPI_INT,rank_Z,sendtag,MPI_COMM_WORLD,&req1[5]); - MPI_Irecv(recvList_z, recvCount_z,MPI_INT,rank_z,recvtag,MPI_COMM_WORLD,&req2[5]); + MPI_Isend(sendList_x, sendCount_x,MPI_INT,rank_x,sendtag,comm,&req1[0]); + MPI_Irecv(recvList_X, recvCount_X,MPI_INT,rank_X,recvtag,comm,&req2[0]); + MPI_Isend(sendList_X, sendCount_X,MPI_INT,rank_X,sendtag,comm,&req1[1]); + MPI_Irecv(recvList_x, recvCount_x,MPI_INT,rank_x,recvtag,comm,&req2[1]); + MPI_Isend(sendList_y, sendCount_y,MPI_INT,rank_y,sendtag,comm,&req1[2]); + MPI_Irecv(recvList_Y, recvCount_Y,MPI_INT,rank_Y,recvtag,comm,&req2[2]); + MPI_Isend(sendList_Y, sendCount_Y,MPI_INT,rank_Y,sendtag,comm,&req1[3]); + MPI_Irecv(recvList_y, recvCount_y,MPI_INT,rank_y,recvtag,comm,&req2[3]); + MPI_Isend(sendList_z, sendCount_z,MPI_INT,rank_z,sendtag,comm,&req1[4]); + MPI_Irecv(recvList_Z, recvCount_Z,MPI_INT,rank_Z,recvtag,comm,&req2[4]); + MPI_Isend(sendList_Z, sendCount_Z,MPI_INT,rank_Z,sendtag,comm,&req1[5]); + MPI_Irecv(recvList_z, recvCount_z,MPI_INT,rank_z,recvtag,comm,&req2[5]); - MPI_Isend(sendList_xy, sendCount_xy,MPI_INT,rank_xy,sendtag,MPI_COMM_WORLD,&req1[6]); - MPI_Irecv(recvList_XY, recvCount_XY,MPI_INT,rank_XY,recvtag,MPI_COMM_WORLD,&req2[6]); - MPI_Isend(sendList_XY, sendCount_XY,MPI_INT,rank_XY,sendtag,MPI_COMM_WORLD,&req1[7]); - MPI_Irecv(recvList_xy, recvCount_xy,MPI_INT,rank_xy,recvtag,MPI_COMM_WORLD,&req2[7]); - MPI_Isend(sendList_Xy, sendCount_Xy,MPI_INT,rank_Xy,sendtag,MPI_COMM_WORLD,&req1[8]); - MPI_Irecv(recvList_xY, recvCount_xY,MPI_INT,rank_xY,recvtag,MPI_COMM_WORLD,&req2[8]); - MPI_Isend(sendList_xY, sendCount_xY,MPI_INT,rank_xY,sendtag,MPI_COMM_WORLD,&req1[9]); - MPI_Irecv(recvList_Xy, recvCount_Xy,MPI_INT,rank_Xy,recvtag,MPI_COMM_WORLD,&req2[9]); + MPI_Isend(sendList_xy, sendCount_xy,MPI_INT,rank_xy,sendtag,comm,&req1[6]); + MPI_Irecv(recvList_XY, recvCount_XY,MPI_INT,rank_XY,recvtag,comm,&req2[6]); + MPI_Isend(sendList_XY, sendCount_XY,MPI_INT,rank_XY,sendtag,comm,&req1[7]); + MPI_Irecv(recvList_xy, recvCount_xy,MPI_INT,rank_xy,recvtag,comm,&req2[7]); + MPI_Isend(sendList_Xy, sendCount_Xy,MPI_INT,rank_Xy,sendtag,comm,&req1[8]); + MPI_Irecv(recvList_xY, recvCount_xY,MPI_INT,rank_xY,recvtag,comm,&req2[8]); + MPI_Isend(sendList_xY, sendCount_xY,MPI_INT,rank_xY,sendtag,comm,&req1[9]); + MPI_Irecv(recvList_Xy, recvCount_Xy,MPI_INT,rank_Xy,recvtag,comm,&req2[9]); - MPI_Isend(sendList_xz, sendCount_xz,MPI_INT,rank_xz,sendtag,MPI_COMM_WORLD,&req1[10]); - MPI_Irecv(recvList_XZ, recvCount_XZ,MPI_INT,rank_XZ,recvtag,MPI_COMM_WORLD,&req2[10]); - MPI_Isend(sendList_XZ, sendCount_XZ,MPI_INT,rank_XZ,sendtag,MPI_COMM_WORLD,&req1[11]); - MPI_Irecv(recvList_xz, recvCount_xz,MPI_INT,rank_xz,recvtag,MPI_COMM_WORLD,&req2[11]); - MPI_Isend(sendList_Xz, sendCount_Xz,MPI_INT,rank_Xz,sendtag,MPI_COMM_WORLD,&req1[12]); - MPI_Irecv(recvList_xZ, recvCount_xZ,MPI_INT,rank_xZ,recvtag,MPI_COMM_WORLD,&req2[12]); - MPI_Isend(sendList_xZ, sendCount_xZ,MPI_INT,rank_xZ,sendtag,MPI_COMM_WORLD,&req1[13]); - MPI_Irecv(recvList_Xz, recvCount_Xz,MPI_INT,rank_Xz,recvtag,MPI_COMM_WORLD,&req2[13]); + MPI_Isend(sendList_xz, sendCount_xz,MPI_INT,rank_xz,sendtag,comm,&req1[10]); + MPI_Irecv(recvList_XZ, recvCount_XZ,MPI_INT,rank_XZ,recvtag,comm,&req2[10]); + MPI_Isend(sendList_XZ, sendCount_XZ,MPI_INT,rank_XZ,sendtag,comm,&req1[11]); + MPI_Irecv(recvList_xz, recvCount_xz,MPI_INT,rank_xz,recvtag,comm,&req2[11]); + MPI_Isend(sendList_Xz, sendCount_Xz,MPI_INT,rank_Xz,sendtag,comm,&req1[12]); + MPI_Irecv(recvList_xZ, recvCount_xZ,MPI_INT,rank_xZ,recvtag,comm,&req2[12]); + MPI_Isend(sendList_xZ, sendCount_xZ,MPI_INT,rank_xZ,sendtag,comm,&req1[13]); + MPI_Irecv(recvList_Xz, recvCount_Xz,MPI_INT,rank_Xz,recvtag,comm,&req2[13]); - MPI_Isend(sendList_yz, sendCount_yz,MPI_INT,rank_yz,sendtag,MPI_COMM_WORLD,&req1[14]); - MPI_Irecv(recvList_YZ, recvCount_YZ,MPI_INT,rank_YZ,recvtag,MPI_COMM_WORLD,&req2[14]); - MPI_Isend(sendList_YZ, sendCount_YZ,MPI_INT,rank_YZ,sendtag,MPI_COMM_WORLD,&req1[15]); - MPI_Irecv(recvList_yz, recvCount_yz,MPI_INT,rank_yz,recvtag,MPI_COMM_WORLD,&req2[15]); - MPI_Isend(sendList_Yz, sendCount_Yz,MPI_INT,rank_Yz,sendtag,MPI_COMM_WORLD,&req1[16]); - MPI_Irecv(recvList_yZ, recvCount_yZ,MPI_INT,rank_yZ,recvtag,MPI_COMM_WORLD,&req2[16]); - MPI_Isend(sendList_yZ, sendCount_yZ,MPI_INT,rank_yZ,sendtag,MPI_COMM_WORLD,&req1[17]); - MPI_Irecv(recvList_Yz, recvCount_Yz,MPI_INT,rank_Yz,recvtag,MPI_COMM_WORLD,&req2[17]); + MPI_Isend(sendList_yz, sendCount_yz,MPI_INT,rank_yz,sendtag,comm,&req1[14]); + MPI_Irecv(recvList_YZ, recvCount_YZ,MPI_INT,rank_YZ,recvtag,comm,&req2[14]); + MPI_Isend(sendList_YZ, sendCount_YZ,MPI_INT,rank_YZ,sendtag,comm,&req1[15]); + MPI_Irecv(recvList_yz, recvCount_yz,MPI_INT,rank_yz,recvtag,comm,&req2[15]); + MPI_Isend(sendList_Yz, sendCount_Yz,MPI_INT,rank_Yz,sendtag,comm,&req1[16]); + MPI_Irecv(recvList_yZ, recvCount_yZ,MPI_INT,rank_yZ,recvtag,comm,&req2[16]); + MPI_Isend(sendList_yZ, sendCount_yZ,MPI_INT,rank_yZ,sendtag,comm,&req1[17]); + MPI_Irecv(recvList_Yz, recvCount_Yz,MPI_INT,rank_Yz,recvtag,comm,&req2[17]); MPI_Waitall(18,req1,stat1); MPI_Waitall(18,req2,stat2); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); //...................................................................................... for (int idx=0; idx format(2,0); format[0] = 2; format[1] = 1; - IO::writeData( 0, meshData, format[0] ); - IO::writeData( 3, meshData, format[1] ); - MPI_Barrier(MPI_COMM_WORLD); + IO::writeData( 0, meshData, format[0], comm ); + IO::writeData( 3, meshData, format[1], comm ); + MPI_Barrier(comm); // Get a list of the timesteps std::vector timesteps = IO::readTimesteps("summary.LBM"); @@ -269,7 +270,7 @@ int main(int argc, char **argv) // Finished ut.report(); int N_errors = ut.NumFailGlobal(); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); MPI_Finalize(); return N_errors; } diff --git a/tests/hello_world.cpp b/tests/hello_world.cpp index b67e7e6c..d236bf0e 100644 --- a/tests/hello_world.cpp +++ b/tests/hello_world.cpp @@ -5,10 +5,9 @@ int main (int argc, char **argv) { - int rank,nprocs; MPI_Init(&argc,&argv); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + int rank = MPI_WORLD_RANK(); + int nprocs = MPI_WORLD_SIZE(); for (int i=0; i 0.0){ double Vn,pn,awn,ans,Jwn,Kwn,lwns,cwns,trawn,trJwn; Vn = Averages.ComponentAverages_NWP(1,b); @@ -298,10 +299,10 @@ int main(int argc, char **argv) double Length=1.0; if (rank==0) WriteBlobStates(Averages,Length,porosity); - //MPI_Barrier(MPI_COMM_WORLD); + //MPI_Barrier(comm); //printf("Exit, rank=%i \n",rank); // **************************************************** - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); MPI_Finalize(); // **************************************************** } diff --git a/tests/lbpm_captube_pp.cpp b/tests/lbpm_captube_pp.cpp index 7d32030f..950ef9a6 100644 --- a/tests/lbpm_captube_pp.cpp +++ b/tests/lbpm_captube_pp.cpp @@ -19,8 +19,9 @@ int main(int argc, char **argv) // Initialize MPI int rank,nprocs; MPI_Init(&argc,&argv); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm,&rank); + MPI_Comm_size(comm,&nprocs); // parallel domain size (# of sub-domains) int nprocx,nprocy,nprocz; int iproc,jproc,kproc; @@ -75,20 +76,20 @@ int main(int argc, char **argv) } // ************************************************************** // Broadcast simulation parameters from rank 0 to all other procs - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Computational domain - MPI_Bcast(&Nx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Ny,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Nz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocy,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nspheres,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Lx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Ly,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Lz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(&Nx,1,MPI_INT,0,comm); + MPI_Bcast(&Ny,1,MPI_INT,0,comm); + MPI_Bcast(&Nz,1,MPI_INT,0,comm); + MPI_Bcast(&nprocx,1,MPI_INT,0,comm); + MPI_Bcast(&nprocy,1,MPI_INT,0,comm); + MPI_Bcast(&nprocz,1,MPI_INT,0,comm); + MPI_Bcast(&nspheres,1,MPI_INT,0,comm); + MPI_Bcast(&Lx,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Ly,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Lz,1,MPI_DOUBLE,0,comm); //................................................. - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // ************************************************************** if (nprocs != nprocx*nprocy*nprocz){ @@ -107,7 +108,7 @@ int main(int argc, char **argv) // Initialized domain and averaging framework for Two-Phase Flow Domain Dm(Nx,Ny,Nz,rank,nprocx,nprocy,nprocz,Lx,Ly,Lz,BC); - Dm.CommInit(MPI_COMM_WORLD); + Dm.CommInit(comm); TwoPhase Averages(Dm); InitializeRanks( rank, nprocx, nprocy, nprocz, iproc, jproc, kproc, @@ -115,7 +116,7 @@ int main(int argc, char **argv) rank_xy, rank_XY, rank_xY, rank_Xy, rank_xz, rank_XZ, rank_xZ, rank_Xz, rank_yz, rank_YZ, rank_yZ, rank_Yz ); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); Nz += 2; Nx = Ny = Nz; // Cubic domain @@ -189,7 +190,7 @@ int main(int argc, char **argv) } } } - MPI_Allreduce(&sum_local,&pore_vol,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + MPI_Allreduce(&sum_local,&pore_vol,1,MPI_DOUBLE,MPI_SUM,comm); //......................................................... // don't perform computations at the eight corners @@ -208,7 +209,7 @@ int main(int argc, char **argv) fclose(ID); // **************************************************** - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); MPI_Finalize(); // **************************************************** } diff --git a/tests/lbpm_color_simulator.cpp b/tests/lbpm_color_simulator.cpp index ffe7a884..dff124d2 100644 --- a/tests/lbpm_color_simulator.cpp +++ b/tests/lbpm_color_simulator.cpp @@ -101,10 +101,15 @@ inline void ZeroHalo(double *Data, int Nx, int Ny, int Nz) int main(int argc, char **argv) { // Initialize MPI - int rank,nprocs; - MPI_Init(&argc,&argv); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + int provided_thread_support=-1; + //MPI_Init_thread(&argc,&argv,MPI_THREAD_MULTIPLE,&provided_thread_support); + MPI_Init_thread(&argc,&argv,MPI_THREAD_SINGLE,&provided_thread_support); + if ( provided_thread_supportSDs.get(), N); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); if (rank == 0) cout << "Domain set." << endl; //....................................................................... @@ -423,8 +428,8 @@ int main(int argc, char **argv) } } } - MPI_Allreduce(&sum_local,&pore_vol,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); -// MPI_Allreduce(&sum_local,&porosity,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + MPI_Allreduce(&sum_local,&pore_vol,1,MPI_DOUBLE,MPI_SUM,comm); +// MPI_Allreduce(&sum_local,&porosity,1,MPI_DOUBLE,MPI_SUM,comm); porosity = pore_vol*iVol_global; if (rank==0) printf("Media porosity = %f \n",porosity); //......................................................... @@ -459,7 +464,7 @@ int main(int argc, char **argv) // Initialize communication structures in averaging domain for (i=0; iSDs(i) -= (1.0); // @@ -587,7 +592,7 @@ int main(int argc, char **argv) ScaLBL_Comm.SendHalo(Phi); ScaLBL_Comm.RecvHalo(Phi); DeviceBarrier(); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); //************************************************************************* if (rank==0 && BoundaryCondition==1){ @@ -660,26 +665,33 @@ int main(int argc, char **argv) //.......create and start timer............ double starttime,stoptime,cputime; DeviceBarrier(); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); starttime = MPI_Wtime(); //......................................... err = 1.0; double sat_w_previous = 1.01; // slightly impossible value! if (rank==0) printf("Begin timesteps: error tolerance is %f \n", tol); + // Create the thread pool + int N_threads = 0; + if ( provided_thread_support 0 ) { + // Set the affinity + int N_procs = ThreadPool::getNumberOfProcessors(); + std::vector procs(N_procs); + for (int i=0; i procs(N_procs); - for (int i=0; i tol ) { PROFILE_START("Update"); @@ -704,7 +716,7 @@ int main(int argc, char **argv) //************************************************************************* DeviceBarrier(); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); //************************************************************************* // Swap the distributions for momentum transport //************************************************************************* @@ -712,7 +724,7 @@ int main(int argc, char **argv) //************************************************************************* DeviceBarrier(); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); //************************************************************************* // Wait for communications to complete and unpack the distributions ScaLBL_Comm.RecvD3Q19(f_even, f_odd); @@ -729,7 +741,7 @@ int main(int argc, char **argv) SwapD3Q7(ID, B_even, B_odd, Nx, Ny, Nz); DeviceBarrier(); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); //************************************************************************* // Wait for communication and unpack the D3Q7 distributions @@ -744,7 +756,7 @@ int main(int argc, char **argv) // Compute the phase indicator field //************************************************************************* DeviceBarrier(); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); ComputePhi(ID, Phi, Den, N); //************************************************************************* @@ -778,7 +790,7 @@ int main(int argc, char **argv) } //................................................................................... - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); PROFILE_STOP("Update"); // Timestep completed! @@ -794,7 +806,7 @@ int main(int argc, char **argv) PROFILE_STOP("Loop"); //************************************************************************ DeviceBarrier(); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); stoptime = MPI_Wtime(); if (rank==0) printf("-------------------------------------------------------------------\n"); // Compute the walltime per timestep @@ -823,7 +835,7 @@ int main(int argc, char **argv) CopyToHost(Averages->Phase.get(),Phi,N*sizeof(double)); */ // Create the MeshDataStruct - fillHalo fillData(Dm.rank_info,Nx-2,Ny-2,Nz-2,1,1,1,0,1); + fillHalo fillData(Dm.Comm,Dm.rank_info,Nx-2,Ny-2,Nz-2,1,1,1,0,1); std::vector meshData(1); meshData[0].meshName = "domain"; meshData[0].mesh = std::shared_ptr( new IO::DomainMesh(Dm.rank_info,Nx-2,Ny-2,Nz-2,Lx,Ly,Lz) ); @@ -855,7 +867,7 @@ int main(int argc, char **argv) fillData.copy(Averages->SDn,PhaseVar->data); fillData.copy(Averages->SDs,SignDistVar->data); fillData.copy(Averages->Label_NWP,BlobIDVar->data); - IO::writeData( 0, meshData, 2 ); + IO::writeData( 0, meshData, 2, comm ); /* Averages->WriteSurfaces(0); @@ -884,8 +896,9 @@ int main(int argc, char **argv) PROFILE_STOP("Main"); PROFILE_SAVE("lbpm_color_simulator",1); // **************************************************** - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); } // Limit scope so variables that contain communicators will free before MPI_Finialize + MPI_Comm_free(&comm); MPI_Finalize(); } diff --git a/tests/lbpm_color_simulator.h b/tests/lbpm_color_simulator.h index 1c31a4f3..b748511b 100644 --- a/tests/lbpm_color_simulator.h +++ b/tests/lbpm_color_simulator.h @@ -53,17 +53,19 @@ public: ThreadPool::WorkItem::d_state = 1; // Change state to in progress // Compute the global blob id and compare to the previous version PROFILE_START("Identify blobs and maps",1); + MPI_Comm newcomm; + MPI_Comm_dup(MPI_COMM_WORLD,&newcomm); double vF = 0.0; double vS = 0.0; IntArray& ids = new_index->second; - new_index->first = ComputeGlobalBlobIDs(Nx-2,Ny-2,Nz-2,rank_info,*phase,dist,vF,vS,ids); + new_index->first = ComputeGlobalBlobIDs(Nx-2,Ny-2,Nz-2,rank_info,*phase,dist,vF,vS,ids,newcomm); static int max_id = -1; new_id->first = new_index->first; new_id->second = new_index->second; if ( last_id!=NULL ) { // Compute the timestep-timestep map const IntArray& old_ids = last_id->second; - ID_map_struct map = computeIDMap(old_ids,ids); + ID_map_struct map = computeIDMap(old_ids,ids,newcomm); // Renumber the current timestep's ids getNewIDs(map,max_id,*new_list); renumberIDs(*new_list,new_id->second); @@ -74,6 +76,7 @@ public: getNewIDs(map,max_id,*new_list); writeIDMap(map,timestep,id_map_filename); } + MPI_Comm_free(&newcomm); PROFILE_STOP("Identify blobs and maps",1); ThreadPool::WorkItem::d_state = 2; // Change state to finished } @@ -240,8 +243,7 @@ void run_analysis( int timestep, int restart_interval, // Spawn a thread to write the restart file if ( (type&CreateRestart) != 0 ) { - int rank; - MPI_Comm_rank(MPI_COMM_WORLD,&rank); + int rank = MPI_WORLD_RANK(); if (pBC) { //err = fabs(sat_w - sat_w_previous); //sat_w_previous = sat_w; diff --git a/tests/lbpm_disc_pp.cpp b/tests/lbpm_disc_pp.cpp index 2edd33bb..e01c1547 100644 --- a/tests/lbpm_disc_pp.cpp +++ b/tests/lbpm_disc_pp.cpp @@ -136,8 +136,9 @@ int main(int argc, char **argv) // Initialize MPI int rank,nprocs; MPI_Init(&argc,&argv); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm,&rank); + MPI_Comm_size(comm,&nprocs); // parallel domain size (# of sub-domains) int nprocx,nprocy,nprocz; int iproc,jproc,kproc; @@ -189,21 +190,21 @@ int main(int argc, char **argv) } // ************************************************************** // Broadcast simulation parameters from rank 0 to all other procs - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); //................................................. // Computational domain - MPI_Bcast(&Nx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Ny,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Nz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocy,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&ndiscs,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Lx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Ly,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Lz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(&Nx,1,MPI_INT,0,comm); + MPI_Bcast(&Ny,1,MPI_INT,0,comm); + MPI_Bcast(&Nz,1,MPI_INT,0,comm); + MPI_Bcast(&nprocx,1,MPI_INT,0,comm); + MPI_Bcast(&nprocy,1,MPI_INT,0,comm); + MPI_Bcast(&nprocz,1,MPI_INT,0,comm); + MPI_Bcast(&ndiscs,1,MPI_INT,0,comm); + MPI_Bcast(&Lx,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Ly,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Lz,1,MPI_DOUBLE,0,comm); //................................................. - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // ************************************************************** if (argc > 1) depth=atoi(argv[1]); @@ -221,7 +222,7 @@ int main(int argc, char **argv) rank_xy, rank_XY, rank_xY, rank_Xy, rank_xz, rank_XZ, rank_xZ, rank_Xz, rank_yz, rank_YZ, rank_yZ, rank_Yz ); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); Nz += 2; Nx = Ny = Nz; // Cubic domain @@ -274,13 +275,13 @@ int main(int argc, char **argv) //....................................................................... if (rank == 0) printf("Reading the disc packing \n"); if (rank == 0) ReadDiscPacking(ndiscs,cx,cy,rad); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Broadcast the sphere packing to all processes - MPI_Bcast(cx,ndiscs,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(cy,ndiscs,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(rad,ndiscs,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(cx,ndiscs,MPI_DOUBLE,0,comm); + MPI_Bcast(cy,ndiscs,MPI_DOUBLE,0,comm); + MPI_Bcast(rad,ndiscs,MPI_DOUBLE,0,comm); //........................................................................... - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); if (rank == 0){ cout << "Domain set." << endl; printf("************ \n"); @@ -347,7 +348,7 @@ int main(int argc, char **argv) } } sum_local = 1.0*sum; - MPI_Allreduce(&sum_local,&porosity,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + MPI_Allreduce(&sum_local,&porosity,1,MPI_DOUBLE,MPI_SUM,comm); porosity = porosity*iVol_global; if (rank==0) printf("Media porosity = %f \n",porosity); @@ -363,7 +364,7 @@ int main(int argc, char **argv) } } } - MPI_Allreduce(&sum_local,&pore_vol,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + MPI_Allreduce(&sum_local,&pore_vol,1,MPI_DOUBLE,MPI_SUM,comm); //......................................................... // don't perform computations at the eight corners @@ -378,7 +379,7 @@ int main(int argc, char **argv) //...................................................................... // **************************************************** - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); MPI_Finalize(); // **************************************************** } diff --git a/tests/lbpm_permeability_simulator.cpp b/tests/lbpm_permeability_simulator.cpp index 4d4ed6aa..ae2d3f40 100644 --- a/tests/lbpm_permeability_simulator.cpp +++ b/tests/lbpm_permeability_simulator.cpp @@ -101,8 +101,9 @@ int main(int argc, char **argv) // Initialize MPI int rank,nprocs; MPI_Init(&argc,&argv); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm,&rank); + MPI_Comm_size(comm,&nprocs); // parallel domain size (# of sub-domains) int nprocx,nprocy,nprocz; int iproc,jproc,kproc; @@ -185,32 +186,32 @@ int main(int argc, char **argv) } // ************************************************************** // Broadcast simulation parameters from rank 0 to all other procs - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); //................................................. - MPI_Bcast(&tau,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&pBC,1,MPI_LOGICAL,0,MPI_COMM_WORLD); - MPI_Bcast(&Restart,1,MPI_LOGICAL,0,MPI_COMM_WORLD); - MPI_Bcast(&din,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&dout,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Fx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Fy,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Fz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(×tepMax,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&interval,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&tol,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(&tau,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&pBC,1,MPI_LOGICAL,0,comm); + MPI_Bcast(&Restart,1,MPI_LOGICAL,0,comm); + MPI_Bcast(&din,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&dout,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Fx,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Fy,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Fz,1,MPI_DOUBLE,0,comm); + MPI_Bcast(×tepMax,1,MPI_INT,0,comm); + MPI_Bcast(&interval,1,MPI_INT,0,comm); + MPI_Bcast(&tol,1,MPI_DOUBLE,0,comm); // Computational domain - MPI_Bcast(&Nx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Ny,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Nz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocy,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nspheres,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Lx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Ly,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Lz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(&Nx,1,MPI_INT,0,comm); + MPI_Bcast(&Ny,1,MPI_INT,0,comm); + MPI_Bcast(&Nz,1,MPI_INT,0,comm); + MPI_Bcast(&nprocx,1,MPI_INT,0,comm); + MPI_Bcast(&nprocy,1,MPI_INT,0,comm); + MPI_Bcast(&nprocz,1,MPI_INT,0,comm); + MPI_Bcast(&nspheres,1,MPI_INT,0,comm); + MPI_Bcast(&Lx,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Ly,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Lz,1,MPI_DOUBLE,0,comm); //................................................. - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); RESTART_INTERVAL=interval; // ************************************************************** @@ -247,7 +248,7 @@ int main(int argc, char **argv) rank_xy, rank_XY, rank_xY, rank_Xy, rank_xz, rank_XZ, rank_xZ, rank_Xz, rank_yz, rank_YZ, rank_yZ, rank_Yz ); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); Nz += 2; Nx = Ny = Nz; // Cubic domain @@ -288,7 +289,7 @@ int main(int argc, char **argv) // WriteLocalSolidID(LocalRankFilename, id, N); sprintf(LocalRankFilename,"%s%s","SignDist.",LocalRankString); ReadBinaryFile(LocalRankFilename, Averages.SDs.get(), N); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); if (rank == 0) cout << "Domain set." << endl; //....................................................................... @@ -338,8 +339,8 @@ int main(int argc, char **argv) } } } - MPI_Allreduce(&sum_local,&pore_vol,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); -// MPI_Allreduce(&sum_local,&porosity,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + MPI_Allreduce(&sum_local,&pore_vol,1,MPI_DOUBLE,MPI_SUM,comm); +// MPI_Allreduce(&sum_local,&porosity,1,MPI_DOUBLE,MPI_SUM,comm); porosity = pore_vol*iVol_global; if (rank==0) printf("Media porosity = %f \n",porosity); //......................................................... @@ -373,7 +374,7 @@ int main(int argc, char **argv) //......................................................... // Initialize communication structures in averaging domain for (i=0; i> Lz; } - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Computational domain - MPI_Bcast(&nx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&ny,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocy,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nspheres,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Lx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Ly,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Lz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(&nx,1,MPI_INT,0,comm); + MPI_Bcast(&ny,1,MPI_INT,0,comm); + MPI_Bcast(&nz,1,MPI_INT,0,comm); + MPI_Bcast(&nprocx,1,MPI_INT,0,comm); + MPI_Bcast(&nprocy,1,MPI_INT,0,comm); + MPI_Bcast(&nprocz,1,MPI_INT,0,comm); + MPI_Bcast(&nspheres,1,MPI_INT,0,comm); + MPI_Bcast(&Lx,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Ly,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Lz,1,MPI_DOUBLE,0,comm); //................................................. - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Check that the number of processors >= the number of ranks if ( rank==0 ) { @@ -140,7 +141,7 @@ int main(int argc, char **argv) } } } - Dm.CommInit(MPI_COMM_WORLD); + Dm.CommInit(comm); DoubleArray SignDist(nx,ny,nz); // Read the signed distance from file @@ -164,9 +165,9 @@ int main(int argc, char **argv) } } // total Global is the number of nodes in the pore-space - MPI_Allreduce(&count,&totalGlobal,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); + MPI_Allreduce(&count,&totalGlobal,1,MPI_INT,MPI_SUM,comm); - Dm.CommInit(MPI_COMM_WORLD); + Dm.CommInit(comm); int iproc = Dm.iproc; int jproc = Dm.jproc; int kproc = Dm.kproc; @@ -212,12 +213,12 @@ int main(int argc, char **argv) sizeY = SizeY[bin]; sizeZ = SizeZ[bin]; } - MPI_Bcast(&x,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&y,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&z,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&sizeX,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&sizeY,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&sizeZ,1,MPI_INT,0,MPI_COMM_WORLD); + MPI_Bcast(&x,1,MPI_INT,0,comm); + MPI_Bcast(&y,1,MPI_INT,0,comm); + MPI_Bcast(&z,1,MPI_INT,0,comm); + MPI_Bcast(&sizeX,1,MPI_INT,0,comm); + MPI_Bcast(&sizeY,1,MPI_INT,0,comm); + MPI_Bcast(&sizeZ,1,MPI_INT,0,comm); //if (rank==0) printf("Broadcast block at %i,%i,%i \n",x,y,z); @@ -265,7 +266,7 @@ int main(int argc, char **argv) } } } - MPI_Allreduce(&count,&countGlobal,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); + MPI_Allreduce(&count,&countGlobal,1,MPI_INT,MPI_SUM,comm); sat = float(countGlobal)/totalGlobal; //if (rank==0) printf("New count=%i\n",countGlobal); //if (rank==0) printf("New saturation=%f\n",sat); @@ -342,41 +343,41 @@ int main(int argc, char **argv) PackID(Dm.sendList_YZ, Dm.sendCount_YZ ,sendID_YZ, id); //...................................................................................... MPI_Sendrecv(sendID_x,Dm.sendCount_x,MPI_CHAR,Dm.rank_x,sendtag, - recvID_X,Dm.recvCount_X,MPI_CHAR,Dm.rank_X,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_X,Dm.recvCount_X,MPI_CHAR,Dm.rank_X,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_X,Dm.sendCount_X,MPI_CHAR,Dm.rank_X,sendtag, - recvID_x,Dm.recvCount_x,MPI_CHAR,Dm.rank_x,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_x,Dm.recvCount_x,MPI_CHAR,Dm.rank_x,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_y,Dm.sendCount_y,MPI_CHAR,Dm.rank_y,sendtag, - recvID_Y,Dm.recvCount_Y,MPI_CHAR,Dm.rank_Y,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_Y,Dm.recvCount_Y,MPI_CHAR,Dm.rank_Y,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_Y,Dm.sendCount_Y,MPI_CHAR,Dm.rank_Y,sendtag, - recvID_y,Dm.recvCount_y,MPI_CHAR,Dm.rank_y,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_y,Dm.recvCount_y,MPI_CHAR,Dm.rank_y,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_z,Dm.sendCount_z,MPI_CHAR,Dm.rank_z,sendtag, - recvID_Z,Dm.recvCount_Z,MPI_CHAR,Dm.rank_Z,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_Z,Dm.recvCount_Z,MPI_CHAR,Dm.rank_Z,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_Z,Dm.sendCount_Z,MPI_CHAR,Dm.rank_Z,sendtag, - recvID_z,Dm.recvCount_z,MPI_CHAR,Dm.rank_z,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_z,Dm.recvCount_z,MPI_CHAR,Dm.rank_z,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_xy,Dm.sendCount_xy,MPI_CHAR,Dm.rank_xy,sendtag, - recvID_XY,Dm.recvCount_XY,MPI_CHAR,Dm.rank_XY,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_XY,Dm.recvCount_XY,MPI_CHAR,Dm.rank_XY,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_XY,Dm.sendCount_XY,MPI_CHAR,Dm.rank_XY,sendtag, - recvID_xy,Dm.recvCount_xy,MPI_CHAR,Dm.rank_xy,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_xy,Dm.recvCount_xy,MPI_CHAR,Dm.rank_xy,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_Xy,Dm.sendCount_Xy,MPI_CHAR,Dm.rank_Xy,sendtag, - recvID_xY,Dm.recvCount_xY,MPI_CHAR,Dm.rank_xY,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_xY,Dm.recvCount_xY,MPI_CHAR,Dm.rank_xY,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_xY,Dm.sendCount_xY,MPI_CHAR,Dm.rank_xY,sendtag, - recvID_Xy,Dm.recvCount_Xy,MPI_CHAR,Dm.rank_Xy,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_Xy,Dm.recvCount_Xy,MPI_CHAR,Dm.rank_Xy,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_xz,Dm.sendCount_xz,MPI_CHAR,Dm.rank_xz,sendtag, - recvID_XZ,Dm.recvCount_XZ,MPI_CHAR,Dm.rank_XZ,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_XZ,Dm.recvCount_XZ,MPI_CHAR,Dm.rank_XZ,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_XZ,Dm.sendCount_XZ,MPI_CHAR,Dm.rank_XZ,sendtag, - recvID_xz,Dm.recvCount_xz,MPI_CHAR,Dm.rank_xz,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_xz,Dm.recvCount_xz,MPI_CHAR,Dm.rank_xz,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_Xz,Dm.sendCount_Xz,MPI_CHAR,Dm.rank_Xz,sendtag, - recvID_xZ,Dm.recvCount_xZ,MPI_CHAR,Dm.rank_xZ,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_xZ,Dm.recvCount_xZ,MPI_CHAR,Dm.rank_xZ,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_xZ,Dm.sendCount_xZ,MPI_CHAR,Dm.rank_xZ,sendtag, - recvID_Xz,Dm.recvCount_Xz,MPI_CHAR,Dm.rank_Xz,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_Xz,Dm.recvCount_Xz,MPI_CHAR,Dm.rank_Xz,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_yz,Dm.sendCount_yz,MPI_CHAR,Dm.rank_yz,sendtag, - recvID_YZ,Dm.recvCount_YZ,MPI_CHAR,Dm.rank_YZ,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_YZ,Dm.recvCount_YZ,MPI_CHAR,Dm.rank_YZ,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_YZ,Dm.sendCount_YZ,MPI_CHAR,Dm.rank_YZ,sendtag, - recvID_yz,Dm.recvCount_yz,MPI_CHAR,Dm.rank_yz,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_yz,Dm.recvCount_yz,MPI_CHAR,Dm.rank_yz,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_Yz,Dm.sendCount_Yz,MPI_CHAR,Dm.rank_Yz,sendtag, - recvID_yZ,Dm.recvCount_yZ,MPI_CHAR,Dm.rank_yZ,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_yZ,Dm.recvCount_yZ,MPI_CHAR,Dm.rank_yZ,recvtag,comm,MPI_STATUS_IGNORE); MPI_Sendrecv(sendID_yZ,Dm.sendCount_yZ,MPI_CHAR,Dm.rank_yZ,sendtag, - recvID_Yz,Dm.recvCount_Yz,MPI_CHAR,Dm.rank_Yz,recvtag,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + recvID_Yz,Dm.recvCount_Yz,MPI_CHAR,Dm.rank_Yz,recvtag,comm,MPI_STATUS_IGNORE); //...................................................................................... UnpackID(Dm.recvList_x, Dm.recvCount_x ,recvID_x, id); UnpackID(Dm.recvList_X, Dm.recvCount_X ,recvID_X, id); @@ -409,7 +410,7 @@ int main(int argc, char **argv) } } } - MPI_Allreduce(&count,&countGlobal,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD); + MPI_Allreduce(&count,&countGlobal,1,MPI_INT,MPI_SUM,comm); sat = float(countGlobal)/totalGlobal; if (rank==0) printf("Final saturation=%f\n",sat); @@ -418,7 +419,7 @@ int main(int argc, char **argv) fwrite(id,1,N,ID); fclose(ID); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); MPI_Finalize(); return 0; } diff --git a/tests/lbpm_segmented_decomp.cpp b/tests/lbpm_segmented_decomp.cpp index c1f2ae1a..5fd12d39 100644 --- a/tests/lbpm_segmented_decomp.cpp +++ b/tests/lbpm_segmented_decomp.cpp @@ -18,8 +18,9 @@ int main(int argc, char **argv) // Initialize MPI int rank, nprocs; MPI_Init(&argc,&argv); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm,&rank); + MPI_Comm_size(comm,&nprocs); //....................................................................... // Reading the domain information file @@ -60,28 +61,28 @@ int main(int argc, char **argv) image >> zStart; } - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Computational domain //................................................. - MPI_Bcast(&nx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&ny,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocy,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nspheres,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Lx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Ly,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Lz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(&nx,1,MPI_INT,0,comm); + MPI_Bcast(&ny,1,MPI_INT,0,comm); + MPI_Bcast(&nz,1,MPI_INT,0,comm); + MPI_Bcast(&nprocx,1,MPI_INT,0,comm); + MPI_Bcast(&nprocy,1,MPI_INT,0,comm); + MPI_Bcast(&nprocz,1,MPI_INT,0,comm); + MPI_Bcast(&nspheres,1,MPI_INT,0,comm); + MPI_Bcast(&Lx,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Ly,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Lz,1,MPI_DOUBLE,0,comm); //................................................. - MPI_Bcast(&Ny,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Ny,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Nz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&xStart,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&yStart,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&zStart,1,MPI_INT,0,MPI_COMM_WORLD); + MPI_Bcast(&Ny,1,MPI_INT,0,comm); + MPI_Bcast(&Ny,1,MPI_INT,0,comm); + MPI_Bcast(&Nz,1,MPI_INT,0,comm); + MPI_Bcast(&xStart,1,MPI_INT,0,comm); + MPI_Bcast(&yStart,1,MPI_INT,0,comm); + MPI_Bcast(&zStart,1,MPI_INT,0,comm); //................................................. - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Check that the number of processors >= the number of ranks if ( rank==0 ) { @@ -103,7 +104,7 @@ int main(int argc, char **argv) fclose(SEGDAT); printf("Read segmented data from %s \n",Filename); } - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Get the rank info int N = (nx+2)*(ny+2)*(nz+2); @@ -116,7 +117,7 @@ int main(int argc, char **argv) } } } - Dm.CommInit(MPI_COMM_WORLD); + Dm.CommInit(comm); // number of sites to use for periodic boundary condition transition zone int z_transition_size = (nprocz*nz - (Nz - zStart))/2; @@ -163,7 +164,7 @@ int main(int argc, char **argv) } else{ printf("Sending data to process %i \n", rnk); - MPI_Send(tmp,N,MPI_CHAR,rnk,15,MPI_COMM_WORLD); + MPI_Send(tmp,N,MPI_CHAR,rnk,15,comm); } } } @@ -172,9 +173,9 @@ int main(int argc, char **argv) else{ // Recieve the subdomain from rank = 0 printf("Ready to recieve data %i at process %i \n", N,rank); - MPI_Recv(Dm.id,N,MPI_CHAR,0,15,MPI_COMM_WORLD,MPI_STATUS_IGNORE); + MPI_Recv(Dm.id,N,MPI_CHAR,0,15,comm,MPI_STATUS_IGNORE); } - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); nx+=2; ny+=2; nz+=2; int count = 0; @@ -226,7 +227,7 @@ int main(int argc, char **argv) fwrite(symid,1,N,SYMID); fclose(SYMID); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); MPI_Finalize(); return 0; diff --git a/tests/lbpm_segmented_pp.cpp b/tests/lbpm_segmented_pp.cpp index 8f499157..6818a35e 100644 --- a/tests/lbpm_segmented_pp.cpp +++ b/tests/lbpm_segmented_pp.cpp @@ -31,8 +31,9 @@ int main(int argc, char **argv) // Initialize MPI int rank, nprocs; MPI_Init(&argc,&argv); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm,&rank); + MPI_Comm_size(comm,&nprocs); //....................................................................... // Reading the domain information file @@ -73,20 +74,20 @@ int main(int argc, char **argv) image >> zStart; } - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Computational domain - MPI_Bcast(&nx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&ny,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocy,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nspheres,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Lx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Ly,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Lz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(&nx,1,MPI_INT,0,comm); + MPI_Bcast(&ny,1,MPI_INT,0,comm); + MPI_Bcast(&nz,1,MPI_INT,0,comm); + MPI_Bcast(&nprocx,1,MPI_INT,0,comm); + MPI_Bcast(&nprocy,1,MPI_INT,0,comm); + MPI_Bcast(&nprocz,1,MPI_INT,0,comm); + MPI_Bcast(&nspheres,1,MPI_INT,0,comm); + MPI_Bcast(&Lx,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Ly,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Lz,1,MPI_DOUBLE,0,comm); //................................................. - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Check that the number of processors >= the number of ranks if ( rank==0 ) { @@ -108,7 +109,7 @@ int main(int argc, char **argv) fread(Dm.id,1,N,ID); fclose(ID); // Initialize the domain and communication - Dm.CommInit(MPI_COMM_WORLD); + Dm.CommInit(comm); nx+=2; ny+=2; nz+=2; int count = 0; @@ -209,7 +210,7 @@ int main(int argc, char **argv) } // Create the MeshDataStruct - fillHalo fillData(Dm.rank_info,Nx-2,Ny-2,Nz-2,1,1,1,0,1); + fillHalo fillData(Dm.Comm,Dm.rank_info,Nx-2,Ny-2,Nz-2,1,1,1,0,1); std::vector meshData(1); meshData[0].meshName = "domain"; meshData[0].mesh = std::shared_ptr( new IO::DomainMesh(Dm.rank_info,Nx-2,Ny-2,Nz-2,Lx,Ly,Lz) ); @@ -235,7 +236,7 @@ int main(int argc, char **argv) fillData.copy(Averages.SDn,PhaseVar->data); fillData.copy(Averages.SDs,SignDistVar->data); fillData.copy(Averages.Label_NWP,BlobIDVar->data); - IO::writeData( 0, meshData, 2 ); + IO::writeData( 0, meshData, 2, comm ); // sprintf(LocalRankFilename,"Phase.%05i",rank); // FILE *PHASE = fopen(LocalRankFilename,"wb"); @@ -261,7 +262,7 @@ int main(int argc, char **argv) Averages.SortBlobs(); Averages.PrintComponents(timestep); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); MPI_Finalize(); return 0; diff --git a/tests/lbpm_sphere_pp.cpp b/tests/lbpm_sphere_pp.cpp index bfef9b7f..b83abb5b 100644 --- a/tests/lbpm_sphere_pp.cpp +++ b/tests/lbpm_sphere_pp.cpp @@ -28,8 +28,9 @@ int main(int argc, char **argv) // Initialize MPI int rank,nprocs; MPI_Init(&argc,&argv); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm,&rank); + MPI_Comm_size(comm,&nprocs); // parallel domain size (# of sub-domains) int nprocx,nprocy,nprocz; int iproc,jproc,kproc; @@ -80,21 +81,21 @@ int main(int argc, char **argv) } // ************************************************************** // Broadcast simulation parameters from rank 0 to all other procs - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); //................................................. // Computational domain - MPI_Bcast(&Nx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Ny,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Nz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocx,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocy,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nprocz,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&nspheres,1,MPI_INT,0,MPI_COMM_WORLD); - MPI_Bcast(&Lx,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Ly,1,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(&Lz,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(&Nx,1,MPI_INT,0,comm); + MPI_Bcast(&Ny,1,MPI_INT,0,comm); + MPI_Bcast(&Nz,1,MPI_INT,0,comm); + MPI_Bcast(&nprocx,1,MPI_INT,0,comm); + MPI_Bcast(&nprocy,1,MPI_INT,0,comm); + MPI_Bcast(&nprocz,1,MPI_INT,0,comm); + MPI_Bcast(&nspheres,1,MPI_INT,0,comm); + MPI_Bcast(&Lx,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Ly,1,MPI_DOUBLE,0,comm); + MPI_Bcast(&Lz,1,MPI_DOUBLE,0,comm); //................................................. - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // ************************************************************** @@ -110,7 +111,7 @@ int main(int argc, char **argv) rank_xy, rank_XY, rank_xY, rank_Xy, rank_xz, rank_XZ, rank_xZ, rank_Xz, rank_yz, rank_YZ, rank_yZ, rank_Yz ); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); Nz += 2; Nx = Ny = Nz; // Cubic domain @@ -160,14 +161,14 @@ int main(int argc, char **argv) //....................................................................... if (rank == 0) printf("Reading the sphere packing \n"); if (rank == 0) ReadSpherePacking(nspheres,cx,cy,cz,rad); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); // Broadcast the sphere packing to all processes - MPI_Bcast(cx,nspheres,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(cy,nspheres,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(cz,nspheres,MPI_DOUBLE,0,MPI_COMM_WORLD); - MPI_Bcast(rad,nspheres,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(cx,nspheres,MPI_DOUBLE,0,comm); + MPI_Bcast(cy,nspheres,MPI_DOUBLE,0,comm); + MPI_Bcast(cz,nspheres,MPI_DOUBLE,0,comm); + MPI_Bcast(rad,nspheres,MPI_DOUBLE,0,comm); //........................................................................... - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); if (rank == 0) cout << "Domain set." << endl; if (rank == 0){ // Compute the Sauter mean diameter @@ -181,7 +182,7 @@ int main(int argc, char **argv) D = 6.0*(Nx-2)*nprocx*totVol / totArea / Lx; printf("Sauter Mean Diameter (computed from sphere packing) = %f \n",D); } - MPI_Bcast(&D,1,MPI_DOUBLE,0,MPI_COMM_WORLD); + MPI_Bcast(&D,1,MPI_DOUBLE,0,comm); //....................................................................... SignedDistance(SignDist.get(),nspheres,cx,cy,cz,rad,Lx,Ly,Lz,Nx,Ny,Nz, @@ -214,7 +215,7 @@ int main(int argc, char **argv) } } sum_local = 1.0*sum; - MPI_Allreduce(&sum_local,&porosity,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + MPI_Allreduce(&sum_local,&porosity,1,MPI_DOUBLE,MPI_SUM,comm); porosity = porosity*iVol_global; if (rank==0) printf("Media porosity = %f \n",porosity); @@ -230,7 +231,7 @@ int main(int argc, char **argv) } } } - MPI_Allreduce(&sum_local,&pore_vol,1,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD); + MPI_Allreduce(&sum_local,&pore_vol,1,MPI_DOUBLE,MPI_SUM,comm); //......................................................... // don't perform computations at the eight corners @@ -245,7 +246,7 @@ int main(int argc, char **argv) //...................................................................... // **************************************************** - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); MPI_Finalize(); // **************************************************** } diff --git a/tests/testCommunication.cpp b/tests/testCommunication.cpp index 56257159..04b83719 100644 --- a/tests/testCommunication.cpp +++ b/tests/testCommunication.cpp @@ -221,7 +221,7 @@ int testHalo( MPI_Comm comm, int nprocx, int nprocy, int nprocz, int depth ) } // Communicate the halo - fillHalo fillData(rank_info,nx,ny,nz,1,1,1,0,depth); + fillHalo fillData(comm,rank_info,nx,ny,nz,1,1,1,0,depth); fillData.fill(array); // Check the results @@ -257,41 +257,42 @@ int main(int argc, char **argv) // Initialize MPI int rank,nprocs; MPI_Init(&argc,&argv); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); - MPI_Comm_size(MPI_COMM_WORLD,&nprocs); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm,&rank); + MPI_Comm_size(comm,&nprocs); // Run the test with different domains int N_errors = 0; - N_errors += test_communication( MPI_COMM_WORLD, nprocs, 1, 1 ); - N_errors += test_communication( MPI_COMM_WORLD, 1, nprocs, 1 ); - N_errors += test_communication( MPI_COMM_WORLD, 1, 1, nprocs ); + N_errors += test_communication( comm, nprocs, 1, 1 ); + N_errors += test_communication( comm, 1, nprocs, 1 ); + N_errors += test_communication( comm, 1, 1, nprocs ); if ( nprocs==4 ) { - N_errors += test_communication( MPI_COMM_WORLD, 2, 2, 1 ); - N_errors += test_communication( MPI_COMM_WORLD, 2, 1, 2 ); - N_errors += test_communication( MPI_COMM_WORLD, 1, 2, 2 ); + N_errors += test_communication( comm, 2, 2, 1 ); + N_errors += test_communication( comm, 2, 1, 2 ); + N_errors += test_communication( comm, 1, 2, 2 ); } // Run the halo tests with different domains - N_errors += testHalo( MPI_COMM_WORLD, nprocs, 1, 1, 1 ); - N_errors += testHalo( MPI_COMM_WORLD, 1, nprocs, 1, 1 ); - N_errors += testHalo( MPI_COMM_WORLD, 1, 1, nprocs, 1 ); - N_errors += testHalo( MPI_COMM_WORLD, nprocs, 1, 1, 3 ); - N_errors += testHalo( MPI_COMM_WORLD, 1, nprocs, 1, 3 ); - N_errors += testHalo( MPI_COMM_WORLD, 1, 1, nprocs, 3 ); + N_errors += testHalo( comm, nprocs, 1, 1, 1 ); + N_errors += testHalo( comm, 1, nprocs, 1, 1 ); + N_errors += testHalo( comm, 1, 1, nprocs, 1 ); + N_errors += testHalo( comm, nprocs, 1, 1, 3 ); + N_errors += testHalo( comm, 1, nprocs, 1, 3 ); + N_errors += testHalo( comm, 1, 1, nprocs, 3 ); if ( nprocs==4 ) { - N_errors += testHalo( MPI_COMM_WORLD, 2, 2, 1, 1 ); - N_errors += testHalo( MPI_COMM_WORLD, 2, 1, 2, 1 ); - N_errors += testHalo( MPI_COMM_WORLD, 1, 2, 2, 1 ); + N_errors += testHalo( comm, 2, 2, 1, 1 ); + N_errors += testHalo( comm, 2, 1, 2, 1 ); + N_errors += testHalo( comm, 1, 2, 2, 1 ); } if ( nprocs==8 ) { - N_errors += testHalo( MPI_COMM_WORLD, 2, 2, 2, 1 ); + N_errors += testHalo( comm, 2, 2, 2, 1 ); } // Finished - MPI_Barrier(MPI_COMM_WORLD); + MPI_Barrier(comm); int N_errors_global=0; - MPI_Allreduce( &N_errors, &N_errors_global, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); - MPI_Barrier(MPI_COMM_WORLD); + MPI_Allreduce( &N_errors, &N_errors_global, 1, MPI_INT, MPI_SUM, comm ); + MPI_Barrier(comm); MPI_Finalize(); if ( rank==0 ) { if ( N_errors_global==0 ) diff --git a/tests/testUtilities.cpp b/tests/testUtilities.cpp index 895e66bf..72c637d1 100644 --- a/tests/testUtilities.cpp +++ b/tests/testUtilities.cpp @@ -45,7 +45,8 @@ int main(int argc, char *argv[]) { int rank = 0; MPI_Init(&argc,&argv); - MPI_Comm_rank(MPI_COMM_WORLD,&rank); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm,&rank); UnitTest ut; Utilities::setAbortBehavior( true, true, true );