diff --git a/analysis/Minkowski.cpp b/analysis/Minkowski.cpp index 668875b9..9dfff477 100644 --- a/analysis/Minkowski.cpp +++ b/analysis/Minkowski.cpp @@ -4,7 +4,7 @@ #include "common/Domain.h" #include "common/Communication.h" #include "common/Utilities.h" -#include "common/MPI_Helpers.h" +#include "common/MPI.h" #include "IO/MeshDatabase.h" #include "IO/Reader.h" #include "IO/Writer.h" @@ -123,13 +123,13 @@ void Minkowski::ComputeScalar(const DoubleArray& Field, const double isovalue) // convert X for 2D manifold to 3D object Xi *= 0.5; - MPI_Barrier(Dm->Comm); + Dm->Comm.barrier(); // Phase averages - MPI_Allreduce(&Vi,&Vi_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&Xi,&Xi_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&Ai,&Ai_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&Ji,&Ji_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Barrier(Dm->Comm); + Vi_global = Dm->Comm.sumReduce( Vi ); + Xi_global = Dm->Comm.sumReduce( Xi ); + Ai_global = Dm->Comm.sumReduce( Ai ); + Ji_global = Dm->Comm.sumReduce( Ji ); + Dm->Comm.barrier(); PROFILE_STOP("ComputeScalar"); } @@ -220,7 +220,7 @@ int Minkowski::MeasureConnectedPathway(){ double vF=0.0; n_connected_components = ComputeGlobalBlobIDs(Nx-2,Ny-2,Nz-2,Dm->rank_info,distance,distance,vF,vF,label,Dm->Comm); // int n_connected_components = ComputeGlobalPhaseComponent(Nx-2,Ny-2,Nz-2,Dm->rank_info,const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, Dm->Comm ) - MPI_Barrier(Dm->Comm); + Dm->Comm.barrier(); for (int k=0; krank_info,distance,distance,vF,vF,label,Dm->Comm); // int n_connected_components = ComputeGlobalPhaseComponent(Nx-2,Ny-2,Nz-2,Dm->rank_info,const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, Dm->Comm ) - MPI_Barrier(Dm->Comm); + Dm->Comm.barrier(); + for (int k=0; kComm); + Dm->Comm.barrier(); Dm->AggregateLabels( filename ); diff --git a/analysis/TwoPhase.cpp b/analysis/TwoPhase.cpp index 9b2e5fd8..d878a663 100644 --- a/analysis/TwoPhase.cpp +++ b/analysis/TwoPhase.cpp @@ -5,7 +5,7 @@ #include "common/Domain.h" #include "common/Communication.h" #include "common/Utilities.h" -#include "common/MPI_Helpers.h" +#include "common/MPI.h" #include "IO/MeshDatabase.h" #include "IO/Reader.h" #include "IO/Writer.h" @@ -882,7 +882,7 @@ void TwoPhase::ComponentAverages() } } - MPI_Barrier(Dm->Comm); + Dm->Comm.barrier(); if (Dm->rank()==0){ printf("Component averages computed locally -- reducing result... \n"); } @@ -890,14 +890,14 @@ void TwoPhase::ComponentAverages() RecvBuffer.resize(BLOB_AVG_COUNT,NumberComponents_NWP); /* for (int b=0; bComm); - MPI_Allreduce(&ComponentAverages_NWP(0,b),&RecvBuffer(0),BLOB_AVG_COUNT,MPI_DOUBLE,MPI_SUM,Dm->Comm); + Dm->Comm.barrier(); + Dm->Comm.sumReduce(&ComponentAverages_NWP(0,b),&RecvBuffer(0),BLOB_AVG_COUNT); for (int idx=0; idxComm); - MPI_Allreduce(ComponentAverages_NWP.data(),RecvBuffer.data(),BLOB_AVG_COUNT*NumberComponents_NWP, MPI_DOUBLE,MPI_SUM,Dm->Comm); - // MPI_Reduce(ComponentAverages_NWP.data(),RecvBuffer.data(),BLOB_AVG_COUNT,MPI_DOUBLE,MPI_SUM,0,Dm->Comm); + Dm->Comm.barrier(); + Dm->Comm.sumReduce(ComponentAverages_NWP.data(),RecvBuffer.data(),BLOB_AVG_COUNT*NumberComponents_NWP); + // Dm->Comm.sumReduce(ComponentAverages_NWP.data(),RecvBuffer.data(),BLOB_AVG_COUNT); if (Dm->rank()==0){ printf("rescaling... \n"); @@ -993,9 +993,8 @@ void TwoPhase::ComponentAverages() // reduce the wetting phase averages for (int b=0; bComm); -// MPI_Allreduce(&ComponentAverages_WP(0,b),RecvBuffer.data(),BLOB_AVG_COUNT,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Reduce(&ComponentAverages_WP(0,b),RecvBuffer.data(),BLOB_AVG_COUNT,MPI_DOUBLE,MPI_SUM,0,Dm->Comm); + Dm->Comm.barrier(); + Dm->Comm.sumReduce(&ComponentAverages_WP(0,b),RecvBuffer.data(),BLOB_AVG_COUNT); for (int idx=0; idxComm); - MPI_Allreduce(&nwp_volume,&nwp_volume_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&wp_volume,&wp_volume_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&awn,&awn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&ans,&ans_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&aws,&aws_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&lwns,&lwns_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&As,&As_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&Jwn,&Jwn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&Kwn,&Kwn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&KGwns,&KGwns_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&KNwns,&KNwns_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&efawns,&efawns_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&wwndnw,&wwndnw_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&wwnsdnwn,&wwnsdnwn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&Jwnwwndnw,&Jwnwwndnw_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); + Dm->Comm.barrier(); + nwp_volume_global = Dm->Comm.sumReduce( nwp_volume ); + wp_volume_global = Dm->Comm.sumReduce( wp_volume ); + awn_global = Dm->Comm.sumReduce( awn ); + ans_global = Dm->Comm.sumReduce( ans ); + aws_global = Dm->Comm.sumReduce( aws ); + lwns_global = Dm->Comm.sumReduce( lwns ); + As_global = Dm->Comm.sumReduce( As ); + Jwn_global = Dm->Comm.sumReduce( Jwn ); + Kwn_global = Dm->Comm.sumReduce( Kwn ); + KGwns_global = Dm->Comm.sumReduce( KGwns ); + KNwns_global = Dm->Comm.sumReduce( KNwns ); + efawns_global = Dm->Comm.sumReduce( efawns ); + wwndnw_global = Dm->Comm.sumReduce( wwndnw ); + wwnsdnwn_global = Dm->Comm.sumReduce( wwnsdnwn ); + Jwnwwndnw_global = Dm->Comm.sumReduce( Jwnwwndnw ); // Phase averages - MPI_Allreduce(&vol_w,&vol_w_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&vol_n,&vol_n_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&paw,&paw_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&pan,&pan_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&vaw(0),&vaw_global(0),3,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&van(0),&van_global(0),3,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&vawn(0),&vawn_global(0),3,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&vawns(0),&vawns_global(0),3,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&Gwn(0),&Gwn_global(0),6,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&Gns(0),&Gns_global(0),6,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&Gws(0),&Gws_global(0),6,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&trawn,&trawn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&trJwn,&trJwn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&trRwn,&trRwn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&euler,&euler_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&An,&An_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&Jn,&Jn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - MPI_Allreduce(&Kn,&Kn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm); - - MPI_Barrier(Dm->Comm); + vol_w_global = Dm->Comm.sumReduce( vol_w ); + vol_n_global = Dm->Comm.sumReduce( vol_n ); + paw_global = Dm->Comm.sumReduce( paw ); + pan_global = Dm->Comm.sumReduce( pan ); + for (int idx=0; idx<3; idx++) + vaw_global(idx) = Dm->Comm.sumReduce( vaw(idx) ); + for (int idx=0; idx<3; idx++) + van_global(idx) = Dm->Comm.sumReduce( van(idx)); + for (int idx=0; idx<3; idx++) + vawn_global(idx) = Dm->Comm.sumReduce( vawn(idx) ); + for (int idx=0; idx<3; idx++) + vawns_global(idx) = Dm->Comm.sumReduce( vawns(idx) ); + for (int idx=0; idx<6; idx++){ + Gwn_global(idx) = Dm->Comm.sumReduce( Gwn(idx) ); + Gns_global(idx) = Dm->Comm.sumReduce( Gns(idx) ); + Gws_global(idx) = Dm->Comm.sumReduce( Gws(idx) ); + } + trawn_global = Dm->Comm.sumReduce( trawn ); + trJwn_global = Dm->Comm.sumReduce( trJwn ); + trRwn_global = Dm->Comm.sumReduce( trRwn ); + euler_global = Dm->Comm.sumReduce( euler ); + An_global = Dm->Comm.sumReduce( An ); + Jn_global = Dm->Comm.sumReduce( Jn ); + Kn_global = Dm->Comm.sumReduce( Kn ); + Dm->Comm.barrier(); // Normalize the phase averages // (density of both components = 1.0) diff --git a/analysis/TwoPhase.h b/analysis/TwoPhase.h index fddd04e8..4d500a89 100644 --- a/analysis/TwoPhase.h +++ b/analysis/TwoPhase.h @@ -12,7 +12,7 @@ #include "common/Domain.h" #include "common/Communication.h" #include "common/Utilities.h" -#include "common/MPI_Helpers.h" +#include "common/MPI.h" #include "IO/MeshDatabase.h" #include "IO/Reader.h" #include "IO/Writer.h" diff --git a/analysis/analysis.cpp b/analysis/analysis.cpp index 7587f3c5..4298750e 100644 --- a/analysis/analysis.cpp +++ b/analysis/analysis.cpp @@ -188,7 +188,7 @@ int ComputeLocalPhaseComponent(const IntArray &PhaseID, int &VALUE, BlobIDArray /****************************************************************** * Reorder the global blob ids * ******************************************************************/ -static int ReorderBlobIDs2( BlobIDArray& ID, int N_blobs, int ngx, int ngy, int ngz, MPI_Comm comm ) +static int ReorderBlobIDs2( BlobIDArray& ID, int N_blobs, int ngx, int ngy, int ngz, const Utilities::MPI& comm ) { if ( N_blobs==0 ) return 0; @@ -212,7 +212,7 @@ static int ReorderBlobIDs2( BlobIDArray& ID, int N_blobs, int ngx, int ngy, int } } ASSERT(max_id > map1(N_blobs); int N_blobs2 = 0; for (int i=0; i& N_recv, int64_t *send_buf, std::vector& recv_buf, std::map& remote_map, - MPI_Comm comm ) + const Utilities::MPI& comm ) { std::vector send_req(neighbors.size()); std::vector recv_req(neighbors.size()); - std::vector status(neighbors.size()); - std::map::const_iterator it = map.begin(); + auto it = map.begin(); ASSERT(N_send==(int)map.size()); for (size_t i=0; ifirst; send_buf[2*i+1] = it->second.new_id; } for (size_t i=0; ifirst] = it->second.new_id; } for (size_t i=0; i& remote_map, @@ -304,18 +303,18 @@ static bool updateLocalIds( const std::map& remote_map, return changed; } static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info, - int nblobs, BlobIDArray& IDs, MPI_Comm comm ) + int nblobs, BlobIDArray& IDs, const Utilities::MPI& comm ) { PROFILE_START("LocalToGlobalIDs",1); const int rank = rank_info.rank[1][1][1]; - int nprocs = comm_size(comm); + int nprocs = comm.getSize(); const int ngx = (IDs.size(0)-nx)/2; const int ngy = (IDs.size(1)-ny)/2; const int ngz = (IDs.size(2)-nz)/2; // Get the number of blobs for each rank std::vector N_blobs(nprocs,0); PROFILE_START("LocalToGlobalIDs-Allgather",1); - MPI_Allgather(&nblobs,1,MPI_INT,getPtr(N_blobs),1,MPI_INT,comm); + comm.allGather(nblobs,getPtr(N_blobs)); PROFILE_STOP("LocalToGlobalIDs-Allgather",1); int64_t N_blobs_tot = 0; int offset = 0; @@ -363,13 +362,12 @@ static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_ std::vector N_recv(neighbors.size(),0); std::vector send_req(neighbors.size()); std::vector recv_req(neighbors.size()); - std::vector status(neighbors.size()); for (size_t i=0; i recv_buf(neighbors.size()); @@ -398,8 +396,7 @@ static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_ bool changed = updateLocalIds( remote_map, map ); // Check if we are finished int test = changed ? 1:0; - int result = 0; - MPI_Allreduce(&test,&result,1,MPI_INT,MPI_SUM,comm); + int result = comm.sumReduce( test ); if ( result==0 ) break; } @@ -435,7 +432,7 @@ static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_ } int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info, const DoubleArray& Phase, const DoubleArray& SignDist, double vF, double vS, - BlobIDArray& GlobalBlobID, MPI_Comm comm ) + BlobIDArray& GlobalBlobID, const Utilities::MPI& comm ) { PROFILE_START("ComputeGlobalBlobIDs"); // First compute the local ids @@ -446,7 +443,7 @@ int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_inf return nglobal; } int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& rank_info, - const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, MPI_Comm comm ) + const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, const Utilities::MPI& comm ) { PROFILE_START("ComputeGlobalPhaseComponent"); // First compute the local ids @@ -462,37 +459,27 @@ int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& r * Compute the mapping of blob ids between timesteps * ******************************************************************/ typedef std::map > map_type; -template inline MPI_Datatype getMPIType(); -template<> inline MPI_Datatype getMPIType() { return MPI_INT; } -template<> inline MPI_Datatype getMPIType() { - if ( sizeof(int64_t)==sizeof(long int) ) - return MPI_LONG; - else if ( sizeof(int64_t)==sizeof(double) ) - return MPI_DOUBLE; -} template -void gatherSet( std::set& set, MPI_Comm comm ) +void gatherSet( std::set& set, const Utilities::MPI& comm ) { - int nprocs = comm_size(comm); - MPI_Datatype type = getMPIType(); + int nprocs = comm.getSize(); std::vector send_data(set.begin(),set.end()); int send_count = send_data.size(); std::vector recv_count(nprocs,0), recv_disp(nprocs,0); - MPI_Allgather(&send_count,1,MPI_INT,getPtr(recv_count),1,MPI_INT,comm); + comm.allGather( send_count, getPtr(recv_count) ); for (int i=1; i recv_data(recv_disp[nprocs-1]+recv_count[nprocs-1]); - MPI_Allgatherv(getPtr(send_data),send_count,type, - getPtr(recv_data),getPtr(recv_count),getPtr(recv_disp),type,comm); + comm.allGather( getPtr(send_data), send_count, getPtr(recv_data), + getPtr(recv_count), getPtr(recv_disp), true ); for (size_t i=0; i(); + int nprocs = comm.getSize(); std::vector send_data; - for (map_type::const_iterator it=src_map.begin(); it!=src_map.end(); ++it) { + for (auto it=src_map.begin(); it!=src_map.end(); ++it) { int id = it->first; const std::map& src_ids = it->second; send_data.push_back(id); @@ -505,21 +492,21 @@ void gatherSrcIDMap( map_type& src_map, MPI_Comm comm ) } int send_count = send_data.size(); std::vector recv_count(nprocs,0), recv_disp(nprocs,0); - MPI_Allgather(&send_count,1,MPI_INT,getPtr(recv_count),1,MPI_INT,comm); + comm.allGather(send_count,getPtr(recv_count)); for (int i=1; i recv_data(recv_disp[nprocs-1]+recv_count[nprocs-1]); - MPI_Allgatherv(getPtr(send_data),send_count,type, - getPtr(recv_data),getPtr(recv_count),getPtr(recv_disp),type,comm); + comm.allGather(getPtr(send_data),send_count, + getPtr(recv_data),getPtr(recv_count),getPtr(recv_disp),true); size_t i=0; src_map.clear(); while ( i < recv_data.size() ) { BlobIDType id = recv_data[i]; size_t count = recv_data[i+1]; i += 2; - std::map& src_ids = src_map[id]; + auto& src_ids = src_map[id]; for (size_t j=0; j::iterator it = src_ids.find(recv_data[i]); + auto it = src_ids.find(recv_data[i]); if ( it == src_ids.end() ) src_ids.insert(std::pair(recv_data[i],recv_data[i+1])); else @@ -538,7 +525,7 @@ void addSrcDstIDs( BlobIDType src_id, map_type& src_map, map_type& dst_map, } } ID_map_struct computeIDMap( int nx, int ny, int nz, - const BlobIDArray& ID1, const BlobIDArray& ID2, MPI_Comm comm ) + const BlobIDArray& ID1, const BlobIDArray& ID2, const Utilities::MPI& comm ) { ASSERT(ID1.size()==ID2.size()); PROFILE_START("computeIDMap"); @@ -780,7 +767,7 @@ void renumberIDs( const std::vector& new_ids, BlobIDArray& IDs ) ******************************************************************/ void writeIDMap( const ID_map_struct& map, long long int timestep, const std::string& filename ) { - int rank = MPI_WORLD_RANK(); + int rank = Utilities::MPI( MPI_COMM_WORLD ).getRank(); if ( rank!=0 ) return; bool empty = map.created.empty() && map.destroyed.empty() && diff --git a/analysis/analysis.h b/analysis/analysis.h index 2ce531b1..ec377995 100644 --- a/analysis/analysis.h +++ b/analysis/analysis.h @@ -58,7 +58,7 @@ int ComputeLocalPhaseComponent( const IntArray &PhaseID, int &VALUE, IntArray &C */ int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info, const DoubleArray& Phase, const DoubleArray& SignDist, double vF, double vS, - BlobIDArray& GlobalBlobID, MPI_Comm comm ); + BlobIDArray& GlobalBlobID, const Utilities::MPI& comm ); /*! @@ -75,7 +75,7 @@ int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_inf * @return Return the number of components in the specified phase */ int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& rank_info, - const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, MPI_Comm comm ); + const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, const Utilities::MPI& comm ); /*! @@ -87,7 +87,7 @@ int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& r * @param[in] nz Number of elements in the z-direction * @param[in/out] ID The ids of the blobs */ -void ReorderBlobIDs( BlobIDArray& ID, MPI_Comm comm ); +void ReorderBlobIDs( BlobIDArray& ID, const Utilities::MPI& comm ); typedef std::pair > BlobIDSplitStruct; @@ -120,7 +120,7 @@ struct ID_map_struct { * @param[in] ID1 The blob ids at the first timestep * @param[in] ID2 The blob ids at the second timestep */ -ID_map_struct computeIDMap( int nx, int ny, int nz, const BlobIDArray& ID1, const BlobIDArray& ID2, MPI_Comm comm ); +ID_map_struct computeIDMap( int nx, int ny, int nz, const BlobIDArray& ID1, const BlobIDArray& ID2, const Utilities::MPI& comm ); /*! diff --git a/analysis/distance.cpp b/analysis/distance.cpp index fd48f7c7..9c605e1e 100644 --- a/analysis/distance.cpp +++ b/analysis/distance.cpp @@ -176,154 +176,12 @@ void CalcVecDist( Array &d, const Array &ID0, const Domain &Dm, // Update distance double err = calcVecUpdateInterior( d, dx[0], dx[1], dx[2] ); // Check if we are finished - err = maxReduce( Dm.Comm, err ); + err = Dm.Comm.maxReduce( err ); if ( err < tol ) break; } } -double Eikonal(DoubleArray &Distance, const Array &ID, Domain &Dm, int timesteps, const std::array& periodic){ - - /* - * This routine converts the data in the Distance array to a signed distance - * by solving the equation df/dt = sign(1-|grad f|), where Distance provides - * the values of f on the mesh associated with domain Dm - * It has been tested with segmented data initialized to values [-1,1] - * and will converge toward the signed distance to the surface bounding the associated phases - * - * Reference: - * Min C (2010) On reinitializing level set functions, Journal of Computational Physics229 - */ - - int i,j,k; - double dt=0.1; - double Dx,Dy,Dz; - double Dxp,Dxm,Dyp,Dym,Dzp,Dzm; - double Dxxp,Dxxm,Dyyp,Dyym,Dzzp,Dzzm; - double sign,norm; - double LocalVar,GlobalVar,LocalMax,GlobalMax; - - int xdim,ydim,zdim; - xdim=Dm.Nx-2; - ydim=Dm.Ny-2; - zdim=Dm.Nz-2; - //fillHalo fillData(Dm.Comm, Dm.rank_info,xdim,ydim,zdim,1,1,1,0,1); - fillHalo fillData( Dm.Comm, Dm.rank_info, {xdim, ydim, zdim}, {1,1,1}, 50, 1, {true,true,true}, periodic ); - - // Arrays to store the second derivatives - DoubleArray Dxx(Dm.Nx,Dm.Ny,Dm.Nz); - DoubleArray Dyy(Dm.Nx,Dm.Ny,Dm.Nz); - DoubleArray Dzz(Dm.Nx,Dm.Ny,Dm.Nz); - - int count = 0; - while (count < timesteps){ - - // Communicate the halo of values - fillData.fill(Distance); - - // Compute second order derivatives - for (k=1;k 0.f) Dx = Dxp*Dxp; - else Dx = Dxm*Dxm; - - if (Dyp + Dym > 0.f) Dy = Dyp*Dyp; - else Dy = Dym*Dym; - - if (Dzp + Dzm > 0.f) Dz = Dzp*Dzp; - else Dz = Dzm*Dzm; - } - else{ - - if (Dxp + Dxm < 0.f) Dx = Dxp*Dxp; - else Dx = Dxm*Dxm; - - if (Dyp + Dym < 0.f) Dy = Dyp*Dyp; - else Dy = Dym*Dym; - - if (Dzp + Dzm < 0.f) Dz = Dzp*Dzp; - else Dz = Dzm*Dzm; - } - - //Dx = max(Dxp*Dxp,Dxm*Dxm); - //Dy = max(Dyp*Dyp,Dym*Dym); - //Dz = max(Dzp*Dzp,Dzm*Dzm); - - norm=sqrt(Dx + Dy + Dz); - if (norm > 1.0) norm=1.0; - - Distance(i,j,k) += dt*sign*(1.0 - norm); - LocalVar += dt*sign*(1.0 - norm); - - if (fabs(dt*sign*(1.0 - norm)) > LocalMax) - LocalMax = fabs(dt*sign*(1.0 - norm)); - } - } - } - - MPI_Allreduce(&LocalVar,&GlobalVar,1,MPI_DOUBLE,MPI_SUM,Dm.Comm); - MPI_Allreduce(&LocalMax,&GlobalMax,1,MPI_DOUBLE,MPI_MAX,Dm.Comm); - GlobalVar /= Dm.Volume; - count++; - - if (count%50 == 0 && Dm.rank()==0 ) - printf("Time=%i, Max variation=%f, Global variation=%f \n",count,GlobalMax,GlobalVar); - - if (fabs(GlobalMax) < 1e-5){ - if (Dm.rank()==0) printf("Exiting with max tolerance of 1e-5 \n"); - count=timesteps; - } - } - return GlobalVar; -} // Explicit instantiations template void CalcDist( Array&, const Array&, const Domain&, const std::array&, const std::array& ); diff --git a/analysis/distance.h b/analysis/distance.h index d6c2740c..b3fc870e 100644 --- a/analysis/distance.h +++ b/analysis/distance.h @@ -16,16 +16,6 @@ struct Vec { }; inline bool operator<(const Vec& l, const Vec& r){ return l.x*l.x+l.y*l.y+l.z*l.z < r.x*r.x+r.y*r.y+r.z*r.z; } -inline double minmod(double &a, double &b){ - - double value; - - value = a; - if ( a*b < 0.0) value=0.0; - else if (fabs(a) > fabs(b)) value = b; - - return value; -} /*! * @brief Calculate the distance using a simple method @@ -50,16 +40,4 @@ void CalcDist( Array &Distance, const Array &ID, const Domain &Dm, void CalcVecDist( Array &Distance, const Array &ID, const Domain &Dm, const std::array& periodic = {true,true,true}, const std::array& dx = {1,1,1} ); - -/*! - * @brief Calculate the distance based on solution of Eikonal equation - * @details This routine calculates the signed distance to the nearest domain surface. - * @param[out] Distance Distance function - * @param[in] ID Domain id - * @param[in] Dm Domain information - * @param[in] timesteps number of timesteps to run for Eikonal solver - * @param[in] periodic Directions that are periodic - */ -double Eikonal(DoubleArray &Distance, const Array &ID, Domain &Dm, int timesteps, const std::array& periodic); - #endif diff --git a/common/MPI_Helpers.cpp b/common/MPI_Helpers.cpp deleted file mode 100644 index 736a2f02..00000000 --- a/common/MPI_Helpers.cpp +++ /dev/null @@ -1,266 +0,0 @@ -#include "common/MPI_Helpers.h" -#include "common/Utilities.h" - - -/******************************************************** -* Return the MPI data type * -********************************************************/ -template<> MPI_Datatype getMPItype() { - return MPI_CHAR; -} -template<> MPI_Datatype getMPItype() { - return MPI_UNSIGNED_CHAR; -} -template<> MPI_Datatype getMPItype() { - return MPI_INT; -} -template<> MPI_Datatype getMPItype() { - return MPI_LONG; -} -template<> MPI_Datatype getMPItype() { - return MPI_UNSIGNED_LONG; -} -template<> MPI_Datatype getMPItype() { - return MPI_LONG_LONG; -} -template<> MPI_Datatype getMPItype() { - return MPI_FLOAT; -} -template<> MPI_Datatype getMPItype() { - return MPI_DOUBLE; -} - - -/******************************************************** -* Concrete implimentations for packing/unpacking * -********************************************************/ -// unsigned char -template<> -size_t packsize( const unsigned char& ) -{ - return sizeof(unsigned char); -} -template<> -void pack( const unsigned char& rhs, char *buffer ) -{ - memcpy(buffer,&rhs,sizeof(unsigned char)); -} -template<> -void unpack( unsigned char& data, const char *buffer ) -{ - memcpy(&data,buffer,sizeof(unsigned char)); -} -// char -template<> -size_t packsize( const char& ) -{ - return sizeof(char); -} -template<> -void pack( const char& rhs, char *buffer ) -{ - memcpy(buffer,&rhs,sizeof(char)); -} -template<> -void unpack( char& data, const char *buffer ) -{ - memcpy(&data,buffer,sizeof(char)); -} -// int -template<> -size_t packsize( const int& ) -{ - return sizeof(int); -} -template<> -void pack( const int& rhs, char *buffer ) -{ - memcpy(buffer,&rhs,sizeof(int)); -} -template<> -void unpack( int& data, const char *buffer ) -{ - memcpy(&data,buffer,sizeof(int)); -} -// unsigned int -template<> -size_t packsize( const unsigned int& ) -{ - return sizeof(unsigned int); -} -template<> -void pack( const unsigned int& rhs, char *buffer ) -{ - memcpy(buffer,&rhs,sizeof(int)); -} -template<> -void unpack( unsigned int& data, const char *buffer ) -{ - memcpy(&data,buffer,sizeof(int)); -} -// size_t -template<> -size_t packsize( const size_t& ) -{ - return sizeof(size_t); -} -template<> -void pack( const size_t& rhs, char *buffer ) -{ - memcpy(buffer,&rhs,sizeof(size_t)); -} -template<> -void unpack( size_t& data, const char *buffer ) -{ - memcpy(&data,buffer,sizeof(size_t)); -} -// std::string -template<> -size_t packsize( const std::string& rhs ) -{ - return rhs.size()+1; -} -template<> -void pack( const std::string& rhs, char *buffer ) -{ - memcpy(buffer,rhs.c_str(),rhs.size()+1); -} -template<> -void unpack( std::string& data, const char *buffer ) -{ - data = std::string(buffer); -} - - -/******************************************************** -* Fake MPI routines * -********************************************************/ -#ifndef USE_MPI -int MPI_Init(int*,char***) -{ - return 0; -} -int MPI_Init_thread(int*,char***, int required, int *provided ) -{ - *provided = required; - return 0; -} -int MPI_Finalize() -{ - return 0; -} -int MPI_Comm_size( MPI_Comm, int *size ) -{ - *size = 1; - return 0; -} -int MPI_Comm_rank( MPI_Comm, int *rank ) -{ - *rank = 0; - return 0; -} -int MPI_Barrier( MPI_Comm ) -{ - return 0; -} -int MPI_Waitall( int, MPI_Request[], MPI_Status[] ) -{ - return 0; -} -int MPI_Wait( MPI_Request*, MPI_Status* ) -{ - return 0; -} -int MPI_Bcast( void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm ) -{ - return 0; -} -int MPI_Send(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, - MPI_Comm comm) -{ - ERROR("Not implimented yet"); - return 0; -} -int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, - MPI_Comm comm, MPI_Status *status) -{ - ERROR("Not implimented yet"); - return 0; -} -int MPI_Isend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, - MPI_Comm comm, MPI_Request *request) -{ - ERROR("Not implimented yet"); - return 0; -} -int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int source, - int tag, MPI_Comm comm, MPI_Request *request) -{ - ERROR("Not implimented yet"); - return 0; -} -int MPI_Allreduce(const void *sendbuf, void *recvbuf, int count, - MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) -{ - ERROR("Not implimented yet"); - return 0; -} -int MPI_Allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, - void *recvbuf, int recvcount, MPI_Datatype recvtype, - MPI_Comm comm) -{ - ERROR("Not implimented yet"); - return 0; -} -int MPI_Allgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, - void *recvbuf, const int *recvcounts, const int *displs, - MPI_Datatype recvtype, MPI_Comm comm) -{ - ERROR("Not implimented yet"); - return 0; -} -int MPI_Sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, - int dest, int sendtag, - void *recvbuf, int recvcount, MPI_Datatype recvtype, - int source, int recvtag, - MPI_Comm comm, MPI_Status *status) -{ - ERROR("Not implimented yet"); - return 0; -} -int MPI_Reduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, - MPI_Op op, int root, MPI_Comm comm) -{ - ERROR("Not implimented yet"); - return 0; -} -int MPI_Comm_group(MPI_Comm comm, MPI_Group *group) -{ - ERROR("Not implimented yet"); - return 0; -} -int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm) -{ - ERROR("Not implimented yet"); - return 0; -} -int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm) -{ - *newcomm = comm; - return 0; -} -double MPI_Wtime( void ) -{ - return 0.0; -} -int MPI_Comm_free(MPI_Comm *group) -{ - return 0; -} -int MPI_Group_free(MPI_Group *group) -{ - return 0; -} -#endif - - diff --git a/common/MPI_Helpers.h b/common/MPI_Helpers.h deleted file mode 100644 index 1d20318e..00000000 --- a/common/MPI_Helpers.h +++ /dev/null @@ -1,239 +0,0 @@ -// This file contains wrappers for MPI routines and functions to pack/unpack data structures -#ifndef MPI_WRAPPERS_INC -#define MPI_WRAPPERS_INC - -#include -#include -#include -#include - -#ifdef USE_MPI - // Inlcude MPI - #include "mpi.h" -#else - // Create fake MPI types - typedef int MPI_Comm; - typedef int MPI_Request; - typedef int MPI_Status; - #define MPI_COMM_WORLD 0 - #define MPI_COMM_SELF 0 - #define MPI_COMM_NULL -1 - #define MPI_GROUP_NULL -2 - #define MPI_STATUS_IGNORE NULL - enum MPI_Datatype { MPI_LOGICAL, MPI_CHAR, MPI_UNSIGNED_CHAR, MPI_INT, - MPI_UNSIGNED, MPI_LONG, MPI_UNSIGNED_LONG, MPI_LONG_LONG, MPI_FLOAT, MPI_DOUBLE }; - enum MPI_Op { MPI_MIN, MPI_MAX, MPI_SUM }; - typedef int MPI_Group; - #define MPI_THREAD_SINGLE 0 - #define MPI_THREAD_FUNNELED 1 - #define MPI_THREAD_SERIALIZED 2 - #define MPI_THREAD_MULTIPLE 3 - // Fake MPI functions - int MPI_Init(int*,char***); - int MPI_Init_thread( int *argc, char ***argv, int required, int *provided ); - int MPI_Finalize(); - int MPI_Comm_size( MPI_Comm, int *size ); - int MPI_Comm_rank( MPI_Comm, int *rank ); - int MPI_Barrier(MPI_Comm); - int MPI_Wait(MPI_Request*,MPI_Status*); - int MPI_Waitall(int,MPI_Request[],MPI_Status[]); - int MPI_Bcast(void*,int,MPI_Datatype,int,MPI_Comm); - int MPI_Send(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, - MPI_Comm comm); - int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, - MPI_Comm comm, MPI_Status *status); - int MPI_Isend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, - MPI_Comm comm, MPI_Request *request); - int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int source, - int tag, MPI_Comm comm, MPI_Request *request); - int MPI_Allreduce(const void *sendbuf, void *recvbuf, int count, - MPI_Datatype datatype, MPI_Op op, MPI_Comm comm); - int MPI_Allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, - void *recvbuf, int recvcount, MPI_Datatype recvtype, - MPI_Comm comm); - int MPI_Allgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, - void *recvbuf, const int *recvcounts, const int *displs, - MPI_Datatype recvtype, MPI_Comm comm); - int MPI_Sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, - int dest, int sendtag, - void *recvbuf, int recvcount, MPI_Datatype recvtype, - int source, int recvtag, - MPI_Comm comm, MPI_Status *status); - int MPI_Reduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, - MPI_Op op, int root, MPI_Comm comm); - double MPI_Wtime( void ); - int MPI_Comm_group(MPI_Comm comm, MPI_Group *group); - int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm); - int MPI_Comm_free(MPI_Comm *group); - int MPI_Group_free(MPI_Group *group); - int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm); -#endif - - -//! Get the size of the MPI_Comm -// Note: this is a thread and interrupt safe function -inline int comm_size( MPI_Comm comm ) { - int size = 1; - MPI_Comm_size( comm, &size ); - return size; -} - - -//! Get the rank of the MPI_Comm -// Note: this is a thread and interrupt safe function -inline int comm_rank( MPI_Comm comm ) { - int rank = 1; - MPI_Comm_rank( comm, &rank ); - return rank; -} - - -//! Get the size of MPI_COMM_WORLD -inline int MPI_WORLD_SIZE( ) { - return comm_size( MPI_COMM_WORLD ); -} - -//! Get the size of MPI_COMM_WORLD -inline int MPI_WORLD_RANK( ) { - return comm_rank( MPI_COMM_WORLD ); -} - -//! Return the appropriate MPI datatype for a class -template -MPI_Datatype getMPItype(); - - -//! Template function to return the buffer size required to pack a class -template -size_t packsize( const TYPE& rhs ); - -//! Template function to pack a class to a buffer -template -void pack( const TYPE& rhs, char *buffer ); - -//! Template function to unpack a class from a buffer -template -void unpack( TYPE& data, const char *buffer ); - - -//! Template function to return the buffer size required to pack a std::vector -template -size_t packsize( const std::vector& rhs ); - -//! Template function to pack a class to a buffer -template -void pack( const std::vector& rhs, char *buffer ); - -//! Template function to pack a class to a buffer -template -void unpack( std::vector& data, const char *buffer ); - - -//! Template function to return the buffer size required to pack a std::pair -template -size_t packsize( const std::pair& rhs ); - -//! Template function to pack a class to a buffer -template -void pack( const std::pair& rhs, char *buffer ); - -//! Template function to pack a class to a buffer -template -void unpack( std::pair& data, const char *buffer ); - - -//! Template function to return the buffer size required to pack a std::map -template -size_t packsize( const std::map& rhs ); - -//! Template function to pack a class to a buffer -template -void pack( const std::map& rhs, char *buffer ); - -//! Template function to pack a class to a buffer -template -void unpack( std::map& data, const char *buffer ); - - -//! Template function to return the buffer size required to pack a std::set -template -size_t packsize( const std::set& rhs ); - -//! Template function to pack a class to a buffer -template -void pack( const std::set& rhs, char *buffer ); - -//! Template function to pack a class to a buffer -template -void unpack( std::set& data, const char *buffer ); - - - -// Helper functions -inline double sumReduce( MPI_Comm comm, double x ) -{ - double y = 0; - MPI_Allreduce(&x,&y,1,MPI_DOUBLE,MPI_SUM,comm); - return y; -} -inline float sumReduce( MPI_Comm comm, float x ) -{ - float y = 0; - MPI_Allreduce(&x,&y,1,MPI_FLOAT,MPI_SUM,comm); - return y; -} -inline int sumReduce( MPI_Comm comm, int x ) -{ - int y = 0; - MPI_Allreduce(&x,&y,1,MPI_INT,MPI_SUM,comm); - return y; -} -inline long long sumReduce( MPI_Comm comm, long long x ) -{ - long long y = 0; - MPI_Allreduce(&x,&y,1,MPI_LONG_LONG,MPI_SUM,comm); - return y; -} -inline bool sumReduce( MPI_Comm comm, bool x ) -{ - int y = sumReduce( comm, x?1:0 ); - return y>0; -} -inline std::vector sumReduce( MPI_Comm comm, const std::vector& x ) -{ - auto y = x; - MPI_Allreduce(x.data(),y.data(),x.size(),MPI_FLOAT,MPI_SUM,comm); - return y; -} -inline std::vector sumReduce( MPI_Comm comm, const std::vector& x ) -{ - auto y = x; - MPI_Allreduce(x.data(),y.data(),x.size(),MPI_INT,MPI_SUM,comm); - return y; -} -inline double maxReduce( MPI_Comm comm, double x ) -{ - double y = 0; - MPI_Allreduce(&x,&y,1,MPI_DOUBLE,MPI_MAX,comm); - return y; -} -inline float maxReduce( MPI_Comm comm, float x ) -{ - float y = 0; - MPI_Allreduce(&x,&y,1,MPI_FLOAT,MPI_MAX,comm); - return y; -} -inline int maxReduce( MPI_Comm comm, int x ) -{ - int y = 0; - MPI_Allreduce(&x,&y,1,MPI_INT,MPI_MAX,comm); - return y; -} - - -#endif - - -#include "common/MPI_Helpers.hpp" - - diff --git a/common/MPI_Helpers.hpp b/common/MPI_Helpers.hpp deleted file mode 100644 index 85261cf1..00000000 --- a/common/MPI_Helpers.hpp +++ /dev/null @@ -1,154 +0,0 @@ -// This file contains wrappers for MPI routines and functions to pack/unpack data structures -#ifndef MPI_WRAPPERS_HPP -#define MPI_WRAPPERS_HPP - -#include "common/MPI_Helpers.h" -#include -#include -#include -#include - - - -/******************************************************** -* Default instantiations for std::vector * -********************************************************/ -template -size_t packsize( const std::vector& rhs ) -{ - size_t bytes = sizeof(size_t); - for (size_t i=0; i -void pack( const std::vector& rhs, char *buffer ) -{ - size_t size = rhs.size(); - memcpy(buffer,&size,sizeof(size_t)); - size_t pos = sizeof(size_t); - for (size_t i=0; i -void unpack( std::vector& data, const char *buffer ) -{ - size_t size; - memcpy(&size,buffer,sizeof(size_t)); - data.clear(); - data.resize(size); - size_t pos = sizeof(size_t); - for (size_t i=0; i -size_t packsize( const std::pair& rhs ) -{ - return packsize(rhs.first)+packsize(rhs.second); -} -template -void pack( const std::pair& rhs, char *buffer ) -{ - pack(rhs.first,buffer); - pack(rhs.second,&buffer[packsize(rhs.first)]); -} -template -void unpack( std::pair& data, const char *buffer ) -{ - unpack(data.first,buffer); - unpack(data.second,&buffer[packsize(data.first)]); -} - - -/******************************************************** -* Default instantiations for std::map * -********************************************************/ -template -size_t packsize( const std::map& rhs ) -{ - size_t bytes = sizeof(size_t); - typename std::map::const_iterator it; - for (it=rhs.begin(); it!=rhs.end(); ++it) { - bytes += packsize(it->first); - bytes += packsize(it->second); - } - return bytes; -} -template -void pack( const std::map& rhs, char *buffer ) -{ - size_t N = rhs.size(); - pack(N,buffer); - size_t pos = sizeof(size_t); - typename std::map::const_iterator it; - for (it=rhs.begin(); it!=rhs.end(); ++it) { - pack(it->first,&buffer[pos]); pos+=packsize(it->first); - pack(it->second,&buffer[pos]); pos+=packsize(it->second); - } -} -template -void unpack( std::map& data, const char *buffer ) -{ - size_t N = 0; - unpack(N,buffer); - size_t pos = sizeof(size_t); - data.clear(); - for (size_t i=0; i tmp; - unpack(tmp.first,&buffer[pos]); pos+=packsize(tmp.first); - unpack(tmp.second,&buffer[pos]); pos+=packsize(tmp.second); - data.insert(tmp); - } -} - - -/******************************************************** -* Default instantiations for std::set * -********************************************************/ -template -size_t packsize( const std::set& rhs ) -{ - size_t bytes = sizeof(size_t); - typename std::set::const_iterator it; - for (it=rhs.begin(); it!=rhs.end(); ++it) { - bytes += packsize(*it); - } - return bytes; -} -template -void pack( const std::set& rhs, char *buffer ) -{ - size_t N = rhs.size(); - pack(N,buffer); - size_t pos = sizeof(size_t); - typename std::set::const_iterator it; - for (it=rhs.begin(); it!=rhs.end(); ++it) { - pack(*it); pos+=packsize(*it); - } -} -template -void unpack( std::set& data, const char *buffer ) -{ - size_t N = 0; - unpack(N,buffer); - size_t pos = sizeof(size_t); - data.clear(); - for (size_t i=0; i