merging with FOM
This commit is contained in:
parent
81953c2fce
commit
2f5fc9ead1
@ -4,7 +4,7 @@
|
|||||||
#include "common/Domain.h"
|
#include "common/Domain.h"
|
||||||
#include "common/Communication.h"
|
#include "common/Communication.h"
|
||||||
#include "common/Utilities.h"
|
#include "common/Utilities.h"
|
||||||
#include "common/MPI_Helpers.h"
|
#include "common/MPI.h"
|
||||||
#include "IO/MeshDatabase.h"
|
#include "IO/MeshDatabase.h"
|
||||||
#include "IO/Reader.h"
|
#include "IO/Reader.h"
|
||||||
#include "IO/Writer.h"
|
#include "IO/Writer.h"
|
||||||
@ -123,13 +123,13 @@ void Minkowski::ComputeScalar(const DoubleArray& Field, const double isovalue)
|
|||||||
// convert X for 2D manifold to 3D object
|
// convert X for 2D manifold to 3D object
|
||||||
Xi *= 0.5;
|
Xi *= 0.5;
|
||||||
|
|
||||||
MPI_Barrier(Dm->Comm);
|
Dm->Comm.barrier();
|
||||||
// Phase averages
|
// Phase averages
|
||||||
MPI_Allreduce(&Vi,&Vi_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
Vi_global = Dm->Comm.sumReduce( Vi );
|
||||||
MPI_Allreduce(&Xi,&Xi_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
Xi_global = Dm->Comm.sumReduce( Xi );
|
||||||
MPI_Allreduce(&Ai,&Ai_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
Ai_global = Dm->Comm.sumReduce( Ai );
|
||||||
MPI_Allreduce(&Ji,&Ji_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
Ji_global = Dm->Comm.sumReduce( Ji );
|
||||||
MPI_Barrier(Dm->Comm);
|
Dm->Comm.barrier();
|
||||||
PROFILE_STOP("ComputeScalar");
|
PROFILE_STOP("ComputeScalar");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -220,7 +220,7 @@ int Minkowski::MeasureConnectedPathway(){
|
|||||||
double vF=0.0;
|
double vF=0.0;
|
||||||
n_connected_components = ComputeGlobalBlobIDs(Nx-2,Ny-2,Nz-2,Dm->rank_info,distance,distance,vF,vF,label,Dm->Comm);
|
n_connected_components = ComputeGlobalBlobIDs(Nx-2,Ny-2,Nz-2,Dm->rank_info,distance,distance,vF,vF,label,Dm->Comm);
|
||||||
// int n_connected_components = ComputeGlobalPhaseComponent(Nx-2,Ny-2,Nz-2,Dm->rank_info,const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, Dm->Comm )
|
// int n_connected_components = ComputeGlobalPhaseComponent(Nx-2,Ny-2,Nz-2,Dm->rank_info,const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, Dm->Comm )
|
||||||
MPI_Barrier(Dm->Comm);
|
Dm->Comm.barrier();
|
||||||
|
|
||||||
for (int k=0; k<Nz; k++){
|
for (int k=0; k<Nz; k++){
|
||||||
for (int j=0; j<Ny; j++){
|
for (int j=0; j<Ny; j++){
|
||||||
@ -261,10 +261,11 @@ int Minkowski::MeasureConnectedPathway(double factor, const DoubleArray &Phi){
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Extract only the connected part of NWP
|
// Extract only the connected part of NWP
|
||||||
double vF=0.0;
|
double vF=0.0;
|
||||||
n_connected_components = ComputeGlobalBlobIDs(Nx-2,Ny-2,Nz-2,Dm->rank_info,distance,distance,vF,vF,label,Dm->Comm);
|
n_connected_components = ComputeGlobalBlobIDs(Nx-2,Ny-2,Nz-2,Dm->rank_info,distance,distance,vF,vF,label,Dm->Comm);
|
||||||
// int n_connected_components = ComputeGlobalPhaseComponent(Nx-2,Ny-2,Nz-2,Dm->rank_info,const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, Dm->Comm )
|
// int n_connected_components = ComputeGlobalPhaseComponent(Nx-2,Ny-2,Nz-2,Dm->rank_info,const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, Dm->Comm )
|
||||||
MPI_Barrier(Dm->Comm);
|
Dm->Comm.barrier();
|
||||||
|
|
||||||
|
|
||||||
for (int k=0; k<Nz; k++){
|
for (int k=0; k<Nz; k++){
|
||||||
for (int j=0; j<Ny; j++){
|
for (int j=0; j<Ny; j++){
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
#include "analysis/filters.h"
|
#include "analysis/filters.h"
|
||||||
|
|
||||||
#include "common/Utilities.h"
|
#include "common/Utilities.h"
|
||||||
#include "common/MPI_Helpers.h"
|
#include "common/MPI.h"
|
||||||
#include "IO/MeshDatabase.h"
|
#include "IO/MeshDatabase.h"
|
||||||
#include "IO/Reader.h"
|
#include "IO/Reader.h"
|
||||||
#include "IO/Writer.h"
|
#include "IO/Writer.h"
|
||||||
|
@ -720,7 +720,7 @@ void SubPhase::AggregateLabels( const std::string& filename )
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
MPI_Barrier(Dm->Comm);
|
Dm->Comm.barrier();
|
||||||
|
|
||||||
Dm->AggregateLabels( filename );
|
Dm->AggregateLabels( filename );
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
#include "common/Domain.h"
|
#include "common/Domain.h"
|
||||||
#include "common/Communication.h"
|
#include "common/Communication.h"
|
||||||
#include "common/Utilities.h"
|
#include "common/Utilities.h"
|
||||||
#include "common/MPI_Helpers.h"
|
#include "common/MPI.h"
|
||||||
#include "IO/MeshDatabase.h"
|
#include "IO/MeshDatabase.h"
|
||||||
#include "IO/Reader.h"
|
#include "IO/Reader.h"
|
||||||
#include "IO/Writer.h"
|
#include "IO/Writer.h"
|
||||||
@ -882,7 +882,7 @@ void TwoPhase::ComponentAverages()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MPI_Barrier(Dm->Comm);
|
Dm->Comm.barrier();
|
||||||
if (Dm->rank()==0){
|
if (Dm->rank()==0){
|
||||||
printf("Component averages computed locally -- reducing result... \n");
|
printf("Component averages computed locally -- reducing result... \n");
|
||||||
}
|
}
|
||||||
@ -890,14 +890,14 @@ void TwoPhase::ComponentAverages()
|
|||||||
RecvBuffer.resize(BLOB_AVG_COUNT,NumberComponents_NWP);
|
RecvBuffer.resize(BLOB_AVG_COUNT,NumberComponents_NWP);
|
||||||
|
|
||||||
/* for (int b=0; b<NumberComponents_NWP; b++){
|
/* for (int b=0; b<NumberComponents_NWP; b++){
|
||||||
MPI_Barrier(Dm->Comm);
|
Dm->Comm.barrier();
|
||||||
MPI_Allreduce(&ComponentAverages_NWP(0,b),&RecvBuffer(0),BLOB_AVG_COUNT,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
Dm->Comm.sumReduce(&ComponentAverages_NWP(0,b),&RecvBuffer(0),BLOB_AVG_COUNT);
|
||||||
for (int idx=0; idx<BLOB_AVG_COUNT; idx++) ComponentAverages_NWP(idx,b)=RecvBuffer(idx);
|
for (int idx=0; idx<BLOB_AVG_COUNT; idx++) ComponentAverages_NWP(idx,b)=RecvBuffer(idx);
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
MPI_Barrier(Dm->Comm);
|
Dm->Comm.barrier();
|
||||||
MPI_Allreduce(ComponentAverages_NWP.data(),RecvBuffer.data(),BLOB_AVG_COUNT*NumberComponents_NWP, MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
Dm->Comm.sumReduce(ComponentAverages_NWP.data(),RecvBuffer.data(),BLOB_AVG_COUNT*NumberComponents_NWP);
|
||||||
// MPI_Reduce(ComponentAverages_NWP.data(),RecvBuffer.data(),BLOB_AVG_COUNT,MPI_DOUBLE,MPI_SUM,0,Dm->Comm);
|
// Dm->Comm.sumReduce(ComponentAverages_NWP.data(),RecvBuffer.data(),BLOB_AVG_COUNT);
|
||||||
|
|
||||||
if (Dm->rank()==0){
|
if (Dm->rank()==0){
|
||||||
printf("rescaling... \n");
|
printf("rescaling... \n");
|
||||||
@ -993,9 +993,8 @@ void TwoPhase::ComponentAverages()
|
|||||||
|
|
||||||
// reduce the wetting phase averages
|
// reduce the wetting phase averages
|
||||||
for (int b=0; b<NumberComponents_WP; b++){
|
for (int b=0; b<NumberComponents_WP; b++){
|
||||||
MPI_Barrier(Dm->Comm);
|
Dm->Comm.barrier();
|
||||||
// MPI_Allreduce(&ComponentAverages_WP(0,b),RecvBuffer.data(),BLOB_AVG_COUNT,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
Dm->Comm.sumReduce(&ComponentAverages_WP(0,b),RecvBuffer.data(),BLOB_AVG_COUNT);
|
||||||
MPI_Reduce(&ComponentAverages_WP(0,b),RecvBuffer.data(),BLOB_AVG_COUNT,MPI_DOUBLE,MPI_SUM,0,Dm->Comm);
|
|
||||||
for (int idx=0; idx<BLOB_AVG_COUNT; idx++) ComponentAverages_WP(idx,b)=RecvBuffer(idx);
|
for (int idx=0; idx<BLOB_AVG_COUNT; idx++) ComponentAverages_WP(idx,b)=RecvBuffer(idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1078,43 +1077,48 @@ void TwoPhase::Reduce()
|
|||||||
int i;
|
int i;
|
||||||
double iVol_global=1.0/Volume;
|
double iVol_global=1.0/Volume;
|
||||||
//...........................................................................
|
//...........................................................................
|
||||||
MPI_Barrier(Dm->Comm);
|
Dm->Comm.barrier();
|
||||||
MPI_Allreduce(&nwp_volume,&nwp_volume_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
nwp_volume_global = Dm->Comm.sumReduce( nwp_volume );
|
||||||
MPI_Allreduce(&wp_volume,&wp_volume_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
wp_volume_global = Dm->Comm.sumReduce( wp_volume );
|
||||||
MPI_Allreduce(&awn,&awn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
awn_global = Dm->Comm.sumReduce( awn );
|
||||||
MPI_Allreduce(&ans,&ans_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
ans_global = Dm->Comm.sumReduce( ans );
|
||||||
MPI_Allreduce(&aws,&aws_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
aws_global = Dm->Comm.sumReduce( aws );
|
||||||
MPI_Allreduce(&lwns,&lwns_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
lwns_global = Dm->Comm.sumReduce( lwns );
|
||||||
MPI_Allreduce(&As,&As_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
As_global = Dm->Comm.sumReduce( As );
|
||||||
MPI_Allreduce(&Jwn,&Jwn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
Jwn_global = Dm->Comm.sumReduce( Jwn );
|
||||||
MPI_Allreduce(&Kwn,&Kwn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
Kwn_global = Dm->Comm.sumReduce( Kwn );
|
||||||
MPI_Allreduce(&KGwns,&KGwns_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
KGwns_global = Dm->Comm.sumReduce( KGwns );
|
||||||
MPI_Allreduce(&KNwns,&KNwns_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
KNwns_global = Dm->Comm.sumReduce( KNwns );
|
||||||
MPI_Allreduce(&efawns,&efawns_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
efawns_global = Dm->Comm.sumReduce( efawns );
|
||||||
MPI_Allreduce(&wwndnw,&wwndnw_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
wwndnw_global = Dm->Comm.sumReduce( wwndnw );
|
||||||
MPI_Allreduce(&wwnsdnwn,&wwnsdnwn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
wwnsdnwn_global = Dm->Comm.sumReduce( wwnsdnwn );
|
||||||
MPI_Allreduce(&Jwnwwndnw,&Jwnwwndnw_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
Jwnwwndnw_global = Dm->Comm.sumReduce( Jwnwwndnw );
|
||||||
// Phase averages
|
// Phase averages
|
||||||
MPI_Allreduce(&vol_w,&vol_w_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
vol_w_global = Dm->Comm.sumReduce( vol_w );
|
||||||
MPI_Allreduce(&vol_n,&vol_n_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
vol_n_global = Dm->Comm.sumReduce( vol_n );
|
||||||
MPI_Allreduce(&paw,&paw_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
paw_global = Dm->Comm.sumReduce( paw );
|
||||||
MPI_Allreduce(&pan,&pan_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
pan_global = Dm->Comm.sumReduce( pan );
|
||||||
MPI_Allreduce(&vaw(0),&vaw_global(0),3,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
for (int idx=0; idx<3; idx++)
|
||||||
MPI_Allreduce(&van(0),&van_global(0),3,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
vaw_global(idx) = Dm->Comm.sumReduce( vaw(idx) );
|
||||||
MPI_Allreduce(&vawn(0),&vawn_global(0),3,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
for (int idx=0; idx<3; idx++)
|
||||||
MPI_Allreduce(&vawns(0),&vawns_global(0),3,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
van_global(idx) = Dm->Comm.sumReduce( van(idx));
|
||||||
MPI_Allreduce(&Gwn(0),&Gwn_global(0),6,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
for (int idx=0; idx<3; idx++)
|
||||||
MPI_Allreduce(&Gns(0),&Gns_global(0),6,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
vawn_global(idx) = Dm->Comm.sumReduce( vawn(idx) );
|
||||||
MPI_Allreduce(&Gws(0),&Gws_global(0),6,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
for (int idx=0; idx<3; idx++)
|
||||||
MPI_Allreduce(&trawn,&trawn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
vawns_global(idx) = Dm->Comm.sumReduce( vawns(idx) );
|
||||||
MPI_Allreduce(&trJwn,&trJwn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
for (int idx=0; idx<6; idx++){
|
||||||
MPI_Allreduce(&trRwn,&trRwn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
Gwn_global(idx) = Dm->Comm.sumReduce( Gwn(idx) );
|
||||||
MPI_Allreduce(&euler,&euler_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
Gns_global(idx) = Dm->Comm.sumReduce( Gns(idx) );
|
||||||
MPI_Allreduce(&An,&An_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
Gws_global(idx) = Dm->Comm.sumReduce( Gws(idx) );
|
||||||
MPI_Allreduce(&Jn,&Jn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
}
|
||||||
MPI_Allreduce(&Kn,&Kn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
trawn_global = Dm->Comm.sumReduce( trawn );
|
||||||
|
trJwn_global = Dm->Comm.sumReduce( trJwn );
|
||||||
MPI_Barrier(Dm->Comm);
|
trRwn_global = Dm->Comm.sumReduce( trRwn );
|
||||||
|
euler_global = Dm->Comm.sumReduce( euler );
|
||||||
|
An_global = Dm->Comm.sumReduce( An );
|
||||||
|
Jn_global = Dm->Comm.sumReduce( Jn );
|
||||||
|
Kn_global = Dm->Comm.sumReduce( Kn );
|
||||||
|
Dm->Comm.barrier();
|
||||||
|
|
||||||
// Normalize the phase averages
|
// Normalize the phase averages
|
||||||
// (density of both components = 1.0)
|
// (density of both components = 1.0)
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
#include "common/Domain.h"
|
#include "common/Domain.h"
|
||||||
#include "common/Communication.h"
|
#include "common/Communication.h"
|
||||||
#include "common/Utilities.h"
|
#include "common/Utilities.h"
|
||||||
#include "common/MPI_Helpers.h"
|
#include "common/MPI.h"
|
||||||
#include "IO/MeshDatabase.h"
|
#include "IO/MeshDatabase.h"
|
||||||
#include "IO/Reader.h"
|
#include "IO/Reader.h"
|
||||||
#include "IO/Writer.h"
|
#include "IO/Writer.h"
|
||||||
|
@ -188,7 +188,7 @@ int ComputeLocalPhaseComponent(const IntArray &PhaseID, int &VALUE, BlobIDArray
|
|||||||
/******************************************************************
|
/******************************************************************
|
||||||
* Reorder the global blob ids *
|
* Reorder the global blob ids *
|
||||||
******************************************************************/
|
******************************************************************/
|
||||||
static int ReorderBlobIDs2( BlobIDArray& ID, int N_blobs, int ngx, int ngy, int ngz, MPI_Comm comm )
|
static int ReorderBlobIDs2( BlobIDArray& ID, int N_blobs, int ngx, int ngy, int ngz, const Utilities::MPI& comm )
|
||||||
{
|
{
|
||||||
if ( N_blobs==0 )
|
if ( N_blobs==0 )
|
||||||
return 0;
|
return 0;
|
||||||
@ -212,7 +212,7 @@ static int ReorderBlobIDs2( BlobIDArray& ID, int N_blobs, int ngx, int ngy, int
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
ASSERT(max_id<N_blobs);
|
ASSERT(max_id<N_blobs);
|
||||||
MPI_Allreduce(local_size,global_size,N_blobs,MPI_DOUBLE,MPI_SUM,comm);
|
comm.sumReduce(local_size,global_size,N_blobs);
|
||||||
std::vector<std::pair<double,int> > map1(N_blobs);
|
std::vector<std::pair<double,int> > map1(N_blobs);
|
||||||
int N_blobs2 = 0;
|
int N_blobs2 = 0;
|
||||||
for (int i=0; i<N_blobs; i++) {
|
for (int i=0; i<N_blobs; i++) {
|
||||||
@ -235,12 +235,12 @@ static int ReorderBlobIDs2( BlobIDArray& ID, int N_blobs, int ngx, int ngy, int
|
|||||||
PROFILE_STOP("ReorderBlobIDs2",1);
|
PROFILE_STOP("ReorderBlobIDs2",1);
|
||||||
return N_blobs2;
|
return N_blobs2;
|
||||||
}
|
}
|
||||||
void ReorderBlobIDs( BlobIDArray& ID, MPI_Comm comm )
|
void ReorderBlobIDs( BlobIDArray& ID, const Utilities::MPI& comm )
|
||||||
{
|
{
|
||||||
PROFILE_START("ReorderBlobIDs");
|
PROFILE_START("ReorderBlobIDs");
|
||||||
int tmp = ID.max()+1;
|
int tmp = ID.max()+1;
|
||||||
int N_blobs = 0;
|
int N_blobs = 0;
|
||||||
MPI_Allreduce(&tmp,&N_blobs,1,MPI_INT,MPI_MAX,comm);
|
N_blobs = comm.maxReduce( tmp );
|
||||||
ReorderBlobIDs2(ID,N_blobs,1,1,1,comm);
|
ReorderBlobIDs2(ID,N_blobs,1,1,1,comm);
|
||||||
PROFILE_STOP("ReorderBlobIDs");
|
PROFILE_STOP("ReorderBlobIDs");
|
||||||
}
|
}
|
||||||
@ -260,30 +260,29 @@ static void updateRemoteIds(
|
|||||||
int N_send, const std::vector<int>& N_recv,
|
int N_send, const std::vector<int>& N_recv,
|
||||||
int64_t *send_buf, std::vector<int64_t*>& recv_buf,
|
int64_t *send_buf, std::vector<int64_t*>& recv_buf,
|
||||||
std::map<int64_t,int64_t>& remote_map,
|
std::map<int64_t,int64_t>& remote_map,
|
||||||
MPI_Comm comm )
|
const Utilities::MPI& comm )
|
||||||
{
|
{
|
||||||
std::vector<MPI_Request> send_req(neighbors.size());
|
std::vector<MPI_Request> send_req(neighbors.size());
|
||||||
std::vector<MPI_Request> recv_req(neighbors.size());
|
std::vector<MPI_Request> recv_req(neighbors.size());
|
||||||
std::vector<MPI_Status> status(neighbors.size());
|
auto it = map.begin();
|
||||||
std::map<int64_t,global_id_info_struct>::const_iterator it = map.begin();
|
|
||||||
ASSERT(N_send==(int)map.size());
|
ASSERT(N_send==(int)map.size());
|
||||||
for (size_t i=0; i<map.size(); i++, ++it) {
|
for (size_t i=0; i<map.size(); i++, ++it) {
|
||||||
send_buf[2*i+0] = it->first;
|
send_buf[2*i+0] = it->first;
|
||||||
send_buf[2*i+1] = it->second.new_id;
|
send_buf[2*i+1] = it->second.new_id;
|
||||||
}
|
}
|
||||||
for (size_t i=0; i<neighbors.size(); i++) {
|
for (size_t i=0; i<neighbors.size(); i++) {
|
||||||
MPI_Isend( send_buf, 2*N_send, MPI_LONG_LONG, neighbors[i], 0, comm, &send_req[i] );
|
send_req[i] = comm.Isend( send_buf, 2*N_send, neighbors[i], 0 );
|
||||||
MPI_Irecv( recv_buf[i], 2*N_recv[i], MPI_LONG_LONG, neighbors[i], 0, comm, &recv_req[i] );
|
recv_req[i] = comm.Irecv( recv_buf[i], 2*N_recv[i], neighbors[i], 0 );
|
||||||
}
|
}
|
||||||
for (it=map.begin(); it!=map.end(); ++it) {
|
for (it=map.begin(); it!=map.end(); ++it) {
|
||||||
remote_map[it->first] = it->second.new_id;
|
remote_map[it->first] = it->second.new_id;
|
||||||
}
|
}
|
||||||
for (size_t i=0; i<neighbors.size(); i++) {
|
for (size_t i=0; i<neighbors.size(); i++) {
|
||||||
MPI_Wait(&recv_req[i],&status[i]);
|
comm.wait( recv_req[i] );
|
||||||
for (int j=0; j<N_recv[i]; j++)
|
for (int j=0; j<N_recv[i]; j++)
|
||||||
remote_map[recv_buf[i][2*j+0]] = recv_buf[i][2*j+1];
|
remote_map[recv_buf[i][2*j+0]] = recv_buf[i][2*j+1];
|
||||||
}
|
}
|
||||||
MPI_Waitall(neighbors.size(),getPtr(send_req),getPtr(status));
|
comm.waitAll(neighbors.size(),getPtr(send_req));
|
||||||
}
|
}
|
||||||
// Compute a new local id for each local id
|
// Compute a new local id for each local id
|
||||||
static bool updateLocalIds( const std::map<int64_t,int64_t>& remote_map,
|
static bool updateLocalIds( const std::map<int64_t,int64_t>& remote_map,
|
||||||
@ -304,18 +303,18 @@ static bool updateLocalIds( const std::map<int64_t,int64_t>& remote_map,
|
|||||||
return changed;
|
return changed;
|
||||||
}
|
}
|
||||||
static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info,
|
static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info,
|
||||||
int nblobs, BlobIDArray& IDs, MPI_Comm comm )
|
int nblobs, BlobIDArray& IDs, const Utilities::MPI& comm )
|
||||||
{
|
{
|
||||||
PROFILE_START("LocalToGlobalIDs",1);
|
PROFILE_START("LocalToGlobalIDs",1);
|
||||||
const int rank = rank_info.rank[1][1][1];
|
const int rank = rank_info.rank[1][1][1];
|
||||||
int nprocs = comm_size(comm);
|
int nprocs = comm.getSize();
|
||||||
const int ngx = (IDs.size(0)-nx)/2;
|
const int ngx = (IDs.size(0)-nx)/2;
|
||||||
const int ngy = (IDs.size(1)-ny)/2;
|
const int ngy = (IDs.size(1)-ny)/2;
|
||||||
const int ngz = (IDs.size(2)-nz)/2;
|
const int ngz = (IDs.size(2)-nz)/2;
|
||||||
// Get the number of blobs for each rank
|
// Get the number of blobs for each rank
|
||||||
std::vector<int> N_blobs(nprocs,0);
|
std::vector<int> N_blobs(nprocs,0);
|
||||||
PROFILE_START("LocalToGlobalIDs-Allgather",1);
|
PROFILE_START("LocalToGlobalIDs-Allgather",1);
|
||||||
MPI_Allgather(&nblobs,1,MPI_INT,getPtr(N_blobs),1,MPI_INT,comm);
|
comm.allGather(nblobs,getPtr(N_blobs));
|
||||||
PROFILE_STOP("LocalToGlobalIDs-Allgather",1);
|
PROFILE_STOP("LocalToGlobalIDs-Allgather",1);
|
||||||
int64_t N_blobs_tot = 0;
|
int64_t N_blobs_tot = 0;
|
||||||
int offset = 0;
|
int offset = 0;
|
||||||
@ -363,13 +362,12 @@ static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_
|
|||||||
std::vector<int> N_recv(neighbors.size(),0);
|
std::vector<int> N_recv(neighbors.size(),0);
|
||||||
std::vector<MPI_Request> send_req(neighbors.size());
|
std::vector<MPI_Request> send_req(neighbors.size());
|
||||||
std::vector<MPI_Request> recv_req(neighbors.size());
|
std::vector<MPI_Request> recv_req(neighbors.size());
|
||||||
std::vector<MPI_Status> status(neighbors.size());
|
|
||||||
for (size_t i=0; i<neighbors.size(); i++) {
|
for (size_t i=0; i<neighbors.size(); i++) {
|
||||||
MPI_Isend( &N_send, 1, MPI_INT, neighbors[i], 0, comm, &send_req[i] );
|
send_req[i] = comm.Isend( &N_send, 1, neighbors[i], 0 );
|
||||||
MPI_Irecv( &N_recv[i], 1, MPI_INT, neighbors[i], 0, comm, &recv_req[i] );
|
recv_req[i] = comm.Irecv( &N_recv[i], 1, neighbors[i], 0 );
|
||||||
}
|
}
|
||||||
MPI_Waitall(neighbors.size(),getPtr(send_req),getPtr(status));
|
comm.waitAll(neighbors.size(),getPtr(send_req));
|
||||||
MPI_Waitall(neighbors.size(),getPtr(recv_req),getPtr(status));
|
comm.waitAll(neighbors.size(),getPtr(recv_req));
|
||||||
// Allocate memory for communication
|
// Allocate memory for communication
|
||||||
int64_t *send_buf = new int64_t[2*N_send];
|
int64_t *send_buf = new int64_t[2*N_send];
|
||||||
std::vector<int64_t*> recv_buf(neighbors.size());
|
std::vector<int64_t*> recv_buf(neighbors.size());
|
||||||
@ -398,8 +396,7 @@ static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_
|
|||||||
bool changed = updateLocalIds( remote_map, map );
|
bool changed = updateLocalIds( remote_map, map );
|
||||||
// Check if we are finished
|
// Check if we are finished
|
||||||
int test = changed ? 1:0;
|
int test = changed ? 1:0;
|
||||||
int result = 0;
|
int result = comm.sumReduce( test );
|
||||||
MPI_Allreduce(&test,&result,1,MPI_INT,MPI_SUM,comm);
|
|
||||||
if ( result==0 )
|
if ( result==0 )
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -435,7 +432,7 @@ static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_
|
|||||||
}
|
}
|
||||||
int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info,
|
int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info,
|
||||||
const DoubleArray& Phase, const DoubleArray& SignDist, double vF, double vS,
|
const DoubleArray& Phase, const DoubleArray& SignDist, double vF, double vS,
|
||||||
BlobIDArray& GlobalBlobID, MPI_Comm comm )
|
BlobIDArray& GlobalBlobID, const Utilities::MPI& comm )
|
||||||
{
|
{
|
||||||
PROFILE_START("ComputeGlobalBlobIDs");
|
PROFILE_START("ComputeGlobalBlobIDs");
|
||||||
// First compute the local ids
|
// First compute the local ids
|
||||||
@ -446,7 +443,7 @@ int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_inf
|
|||||||
return nglobal;
|
return nglobal;
|
||||||
}
|
}
|
||||||
int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& rank_info,
|
int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& rank_info,
|
||||||
const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, MPI_Comm comm )
|
const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, const Utilities::MPI& comm )
|
||||||
{
|
{
|
||||||
PROFILE_START("ComputeGlobalPhaseComponent");
|
PROFILE_START("ComputeGlobalPhaseComponent");
|
||||||
// First compute the local ids
|
// First compute the local ids
|
||||||
@ -462,37 +459,27 @@ int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& r
|
|||||||
* Compute the mapping of blob ids between timesteps *
|
* Compute the mapping of blob ids between timesteps *
|
||||||
******************************************************************/
|
******************************************************************/
|
||||||
typedef std::map<BlobIDType,std::map<BlobIDType,int64_t> > map_type;
|
typedef std::map<BlobIDType,std::map<BlobIDType,int64_t> > map_type;
|
||||||
template<class TYPE> inline MPI_Datatype getMPIType();
|
|
||||||
template<> inline MPI_Datatype getMPIType<int32_t>() { return MPI_INT; }
|
|
||||||
template<> inline MPI_Datatype getMPIType<int64_t>() {
|
|
||||||
if ( sizeof(int64_t)==sizeof(long int) )
|
|
||||||
return MPI_LONG;
|
|
||||||
else if ( sizeof(int64_t)==sizeof(double) )
|
|
||||||
return MPI_DOUBLE;
|
|
||||||
}
|
|
||||||
template<class TYPE>
|
template<class TYPE>
|
||||||
void gatherSet( std::set<TYPE>& set, MPI_Comm comm )
|
void gatherSet( std::set<TYPE>& set, const Utilities::MPI& comm )
|
||||||
{
|
{
|
||||||
int nprocs = comm_size(comm);
|
int nprocs = comm.getSize();
|
||||||
MPI_Datatype type = getMPIType<TYPE>();
|
|
||||||
std::vector<TYPE> send_data(set.begin(),set.end());
|
std::vector<TYPE> send_data(set.begin(),set.end());
|
||||||
int send_count = send_data.size();
|
int send_count = send_data.size();
|
||||||
std::vector<int> recv_count(nprocs,0), recv_disp(nprocs,0);
|
std::vector<int> recv_count(nprocs,0), recv_disp(nprocs,0);
|
||||||
MPI_Allgather(&send_count,1,MPI_INT,getPtr(recv_count),1,MPI_INT,comm);
|
comm.allGather( send_count, getPtr(recv_count) );
|
||||||
for (int i=1; i<nprocs; i++)
|
for (int i=1; i<nprocs; i++)
|
||||||
recv_disp[i] = recv_disp[i-1] + recv_count[i-1];
|
recv_disp[i] = recv_disp[i-1] + recv_count[i-1];
|
||||||
std::vector<TYPE> recv_data(recv_disp[nprocs-1]+recv_count[nprocs-1]);
|
std::vector<TYPE> recv_data(recv_disp[nprocs-1]+recv_count[nprocs-1]);
|
||||||
MPI_Allgatherv(getPtr(send_data),send_count,type,
|
comm.allGather( getPtr(send_data), send_count, getPtr(recv_data),
|
||||||
getPtr(recv_data),getPtr(recv_count),getPtr(recv_disp),type,comm);
|
getPtr(recv_count), getPtr(recv_disp), true );
|
||||||
for (size_t i=0; i<recv_data.size(); i++)
|
for (size_t i=0; i<recv_data.size(); i++)
|
||||||
set.insert(recv_data[i]);
|
set.insert(recv_data[i]);
|
||||||
}
|
}
|
||||||
void gatherSrcIDMap( map_type& src_map, MPI_Comm comm )
|
void gatherSrcIDMap( map_type& src_map, const Utilities::MPI& comm )
|
||||||
{
|
{
|
||||||
int nprocs = comm_size(comm);
|
int nprocs = comm.getSize();
|
||||||
MPI_Datatype type = getMPIType<int64_t>();
|
|
||||||
std::vector<int64_t> send_data;
|
std::vector<int64_t> send_data;
|
||||||
for (map_type::const_iterator it=src_map.begin(); it!=src_map.end(); ++it) {
|
for (auto it=src_map.begin(); it!=src_map.end(); ++it) {
|
||||||
int id = it->first;
|
int id = it->first;
|
||||||
const std::map<BlobIDType,int64_t>& src_ids = it->second;
|
const std::map<BlobIDType,int64_t>& src_ids = it->second;
|
||||||
send_data.push_back(id);
|
send_data.push_back(id);
|
||||||
@ -505,21 +492,21 @@ void gatherSrcIDMap( map_type& src_map, MPI_Comm comm )
|
|||||||
}
|
}
|
||||||
int send_count = send_data.size();
|
int send_count = send_data.size();
|
||||||
std::vector<int> recv_count(nprocs,0), recv_disp(nprocs,0);
|
std::vector<int> recv_count(nprocs,0), recv_disp(nprocs,0);
|
||||||
MPI_Allgather(&send_count,1,MPI_INT,getPtr(recv_count),1,MPI_INT,comm);
|
comm.allGather(send_count,getPtr(recv_count));
|
||||||
for (int i=1; i<nprocs; i++)
|
for (int i=1; i<nprocs; i++)
|
||||||
recv_disp[i] = recv_disp[i-1] + recv_count[i-1];
|
recv_disp[i] = recv_disp[i-1] + recv_count[i-1];
|
||||||
std::vector<int64_t> recv_data(recv_disp[nprocs-1]+recv_count[nprocs-1]);
|
std::vector<int64_t> recv_data(recv_disp[nprocs-1]+recv_count[nprocs-1]);
|
||||||
MPI_Allgatherv(getPtr(send_data),send_count,type,
|
comm.allGather(getPtr(send_data),send_count,
|
||||||
getPtr(recv_data),getPtr(recv_count),getPtr(recv_disp),type,comm);
|
getPtr(recv_data),getPtr(recv_count),getPtr(recv_disp),true);
|
||||||
size_t i=0;
|
size_t i=0;
|
||||||
src_map.clear();
|
src_map.clear();
|
||||||
while ( i < recv_data.size() ) {
|
while ( i < recv_data.size() ) {
|
||||||
BlobIDType id = recv_data[i];
|
BlobIDType id = recv_data[i];
|
||||||
size_t count = recv_data[i+1];
|
size_t count = recv_data[i+1];
|
||||||
i += 2;
|
i += 2;
|
||||||
std::map<BlobIDType,int64_t>& src_ids = src_map[id];
|
auto& src_ids = src_map[id];
|
||||||
for (size_t j=0; j<count; j++,i+=2) {
|
for (size_t j=0; j<count; j++,i+=2) {
|
||||||
std::map<BlobIDType,int64_t>::iterator it = src_ids.find(recv_data[i]);
|
auto it = src_ids.find(recv_data[i]);
|
||||||
if ( it == src_ids.end() )
|
if ( it == src_ids.end() )
|
||||||
src_ids.insert(std::pair<BlobIDType,int64_t>(recv_data[i],recv_data[i+1]));
|
src_ids.insert(std::pair<BlobIDType,int64_t>(recv_data[i],recv_data[i+1]));
|
||||||
else
|
else
|
||||||
@ -538,7 +525,7 @@ void addSrcDstIDs( BlobIDType src_id, map_type& src_map, map_type& dst_map,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
ID_map_struct computeIDMap( int nx, int ny, int nz,
|
ID_map_struct computeIDMap( int nx, int ny, int nz,
|
||||||
const BlobIDArray& ID1, const BlobIDArray& ID2, MPI_Comm comm )
|
const BlobIDArray& ID1, const BlobIDArray& ID2, const Utilities::MPI& comm )
|
||||||
{
|
{
|
||||||
ASSERT(ID1.size()==ID2.size());
|
ASSERT(ID1.size()==ID2.size());
|
||||||
PROFILE_START("computeIDMap");
|
PROFILE_START("computeIDMap");
|
||||||
@ -780,7 +767,7 @@ void renumberIDs( const std::vector<BlobIDType>& new_ids, BlobIDArray& IDs )
|
|||||||
******************************************************************/
|
******************************************************************/
|
||||||
void writeIDMap( const ID_map_struct& map, long long int timestep, const std::string& filename )
|
void writeIDMap( const ID_map_struct& map, long long int timestep, const std::string& filename )
|
||||||
{
|
{
|
||||||
int rank = MPI_WORLD_RANK();
|
int rank = Utilities::MPI( MPI_COMM_WORLD ).getRank();
|
||||||
if ( rank!=0 )
|
if ( rank!=0 )
|
||||||
return;
|
return;
|
||||||
bool empty = map.created.empty() && map.destroyed.empty() &&
|
bool empty = map.created.empty() && map.destroyed.empty() &&
|
||||||
|
@ -58,7 +58,7 @@ int ComputeLocalPhaseComponent( const IntArray &PhaseID, int &VALUE, IntArray &C
|
|||||||
*/
|
*/
|
||||||
int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info,
|
int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info,
|
||||||
const DoubleArray& Phase, const DoubleArray& SignDist, double vF, double vS,
|
const DoubleArray& Phase, const DoubleArray& SignDist, double vF, double vS,
|
||||||
BlobIDArray& GlobalBlobID, MPI_Comm comm );
|
BlobIDArray& GlobalBlobID, const Utilities::MPI& comm );
|
||||||
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
@ -75,7 +75,7 @@ int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_inf
|
|||||||
* @return Return the number of components in the specified phase
|
* @return Return the number of components in the specified phase
|
||||||
*/
|
*/
|
||||||
int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& rank_info,
|
int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& rank_info,
|
||||||
const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, MPI_Comm comm );
|
const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, const Utilities::MPI& comm );
|
||||||
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
@ -87,7 +87,7 @@ int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& r
|
|||||||
* @param[in] nz Number of elements in the z-direction
|
* @param[in] nz Number of elements in the z-direction
|
||||||
* @param[in/out] ID The ids of the blobs
|
* @param[in/out] ID The ids of the blobs
|
||||||
*/
|
*/
|
||||||
void ReorderBlobIDs( BlobIDArray& ID, MPI_Comm comm );
|
void ReorderBlobIDs( BlobIDArray& ID, const Utilities::MPI& comm );
|
||||||
|
|
||||||
|
|
||||||
typedef std::pair<BlobIDType,std::vector<BlobIDType> > BlobIDSplitStruct;
|
typedef std::pair<BlobIDType,std::vector<BlobIDType> > BlobIDSplitStruct;
|
||||||
@ -120,7 +120,7 @@ struct ID_map_struct {
|
|||||||
* @param[in] ID1 The blob ids at the first timestep
|
* @param[in] ID1 The blob ids at the first timestep
|
||||||
* @param[in] ID2 The blob ids at the second timestep
|
* @param[in] ID2 The blob ids at the second timestep
|
||||||
*/
|
*/
|
||||||
ID_map_struct computeIDMap( int nx, int ny, int nz, const BlobIDArray& ID1, const BlobIDArray& ID2, MPI_Comm comm );
|
ID_map_struct computeIDMap( int nx, int ny, int nz, const BlobIDArray& ID1, const BlobIDArray& ID2, const Utilities::MPI& comm );
|
||||||
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
|
@ -176,154 +176,12 @@ void CalcVecDist( Array<Vec> &d, const Array<int> &ID0, const Domain &Dm,
|
|||||||
// Update distance
|
// Update distance
|
||||||
double err = calcVecUpdateInterior( d, dx[0], dx[1], dx[2] );
|
double err = calcVecUpdateInterior( d, dx[0], dx[1], dx[2] );
|
||||||
// Check if we are finished
|
// Check if we are finished
|
||||||
err = maxReduce( Dm.Comm, err );
|
err = Dm.Comm.maxReduce( err );
|
||||||
if ( err < tol )
|
if ( err < tol )
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double Eikonal(DoubleArray &Distance, const Array<char> &ID, Domain &Dm, int timesteps, const std::array<bool,3>& periodic){
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This routine converts the data in the Distance array to a signed distance
|
|
||||||
* by solving the equation df/dt = sign(1-|grad f|), where Distance provides
|
|
||||||
* the values of f on the mesh associated with domain Dm
|
|
||||||
* It has been tested with segmented data initialized to values [-1,1]
|
|
||||||
* and will converge toward the signed distance to the surface bounding the associated phases
|
|
||||||
*
|
|
||||||
* Reference:
|
|
||||||
* Min C (2010) On reinitializing level set functions, Journal of Computational Physics229
|
|
||||||
*/
|
|
||||||
|
|
||||||
int i,j,k;
|
|
||||||
double dt=0.1;
|
|
||||||
double Dx,Dy,Dz;
|
|
||||||
double Dxp,Dxm,Dyp,Dym,Dzp,Dzm;
|
|
||||||
double Dxxp,Dxxm,Dyyp,Dyym,Dzzp,Dzzm;
|
|
||||||
double sign,norm;
|
|
||||||
double LocalVar,GlobalVar,LocalMax,GlobalMax;
|
|
||||||
|
|
||||||
int xdim,ydim,zdim;
|
|
||||||
xdim=Dm.Nx-2;
|
|
||||||
ydim=Dm.Ny-2;
|
|
||||||
zdim=Dm.Nz-2;
|
|
||||||
//fillHalo<double> fillData(Dm.Comm, Dm.rank_info,xdim,ydim,zdim,1,1,1,0,1);
|
|
||||||
fillHalo<double> fillData( Dm.Comm, Dm.rank_info, {xdim, ydim, zdim}, {1,1,1}, 50, 1, {true,true,true}, periodic );
|
|
||||||
|
|
||||||
// Arrays to store the second derivatives
|
|
||||||
DoubleArray Dxx(Dm.Nx,Dm.Ny,Dm.Nz);
|
|
||||||
DoubleArray Dyy(Dm.Nx,Dm.Ny,Dm.Nz);
|
|
||||||
DoubleArray Dzz(Dm.Nx,Dm.Ny,Dm.Nz);
|
|
||||||
|
|
||||||
int count = 0;
|
|
||||||
while (count < timesteps){
|
|
||||||
|
|
||||||
// Communicate the halo of values
|
|
||||||
fillData.fill(Distance);
|
|
||||||
|
|
||||||
// Compute second order derivatives
|
|
||||||
for (k=1;k<Dm.Nz-1;k++){
|
|
||||||
for (j=1;j<Dm.Ny-1;j++){
|
|
||||||
for (i=1;i<Dm.Nx-1;i++){
|
|
||||||
Dxx(i,j,k) = Distance(i+1,j,k) + Distance(i-1,j,k) - 2*Distance(i,j,k);
|
|
||||||
Dyy(i,j,k) = Distance(i,j+1,k) + Distance(i,j-1,k) - 2*Distance(i,j,k);
|
|
||||||
Dzz(i,j,k) = Distance(i,j,k+1) + Distance(i,j,k-1) - 2*Distance(i,j,k);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fillData.fill(Dxx);
|
|
||||||
fillData.fill(Dyy);
|
|
||||||
fillData.fill(Dzz);
|
|
||||||
|
|
||||||
LocalMax=LocalVar=0.0;
|
|
||||||
// Execute the next timestep
|
|
||||||
for (k=1;k<Dm.Nz-1;k++){
|
|
||||||
for (j=1;j<Dm.Ny-1;j++){
|
|
||||||
for (i=1;i<Dm.Nx-1;i++){
|
|
||||||
|
|
||||||
int n = k*Dm.Nx*Dm.Ny + j*Dm.Nx + i;
|
|
||||||
|
|
||||||
sign = -1;
|
|
||||||
if (ID(i,j,k) == 1) sign = 1;
|
|
||||||
|
|
||||||
// local second derivative terms
|
|
||||||
Dxxp = minmod(Dxx(i,j,k),Dxx(i+1,j,k));
|
|
||||||
Dyyp = minmod(Dyy(i,j,k),Dyy(i,j+1,k));
|
|
||||||
Dzzp = minmod(Dzz(i,j,k),Dzz(i,j,k+1));
|
|
||||||
Dxxm = minmod(Dxx(i,j,k),Dxx(i-1,j,k));
|
|
||||||
Dyym = minmod(Dyy(i,j,k),Dyy(i,j-1,k));
|
|
||||||
Dzzm = minmod(Dzz(i,j,k),Dzz(i,j,k-1));
|
|
||||||
|
|
||||||
/* //............Compute upwind derivatives ...................
|
|
||||||
Dxp = Distance(i+1,j,k) - Distance(i,j,k) + 0.5*Dxxp;
|
|
||||||
Dyp = Distance(i,j+1,k) - Distance(i,j,k) + 0.5*Dyyp;
|
|
||||||
Dzp = Distance(i,j,k+1) - Distance(i,j,k) + 0.5*Dzzp;
|
|
||||||
Dxm = Distance(i,j,k) - Distance(i-1,j,k) + 0.5*Dxxm;
|
|
||||||
Dym = Distance(i,j,k) - Distance(i,j-1,k) + 0.5*Dyym;
|
|
||||||
Dzm = Distance(i,j,k) - Distance(i,j,k-1) + 0.5*Dzzm;
|
|
||||||
*/
|
|
||||||
Dxp = Distance(i+1,j,k)- Distance(i,j,k) - 0.5*Dxxp;
|
|
||||||
Dyp = Distance(i,j+1,k)- Distance(i,j,k) - 0.5*Dyyp;
|
|
||||||
Dzp = Distance(i,j,k+1)- Distance(i,j,k) - 0.5*Dzzp;
|
|
||||||
|
|
||||||
Dxm = Distance(i,j,k) - Distance(i-1,j,k) + 0.5*Dxxm;
|
|
||||||
Dym = Distance(i,j,k) - Distance(i,j-1,k) + 0.5*Dyym;
|
|
||||||
Dzm = Distance(i,j,k) - Distance(i,j,k-1) + 0.5*Dzzm;
|
|
||||||
|
|
||||||
// Compute upwind derivatives for Godunov Hamiltonian
|
|
||||||
if (sign < 0.0){
|
|
||||||
if (Dxp + Dxm > 0.f) Dx = Dxp*Dxp;
|
|
||||||
else Dx = Dxm*Dxm;
|
|
||||||
|
|
||||||
if (Dyp + Dym > 0.f) Dy = Dyp*Dyp;
|
|
||||||
else Dy = Dym*Dym;
|
|
||||||
|
|
||||||
if (Dzp + Dzm > 0.f) Dz = Dzp*Dzp;
|
|
||||||
else Dz = Dzm*Dzm;
|
|
||||||
}
|
|
||||||
else{
|
|
||||||
|
|
||||||
if (Dxp + Dxm < 0.f) Dx = Dxp*Dxp;
|
|
||||||
else Dx = Dxm*Dxm;
|
|
||||||
|
|
||||||
if (Dyp + Dym < 0.f) Dy = Dyp*Dyp;
|
|
||||||
else Dy = Dym*Dym;
|
|
||||||
|
|
||||||
if (Dzp + Dzm < 0.f) Dz = Dzp*Dzp;
|
|
||||||
else Dz = Dzm*Dzm;
|
|
||||||
}
|
|
||||||
|
|
||||||
//Dx = max(Dxp*Dxp,Dxm*Dxm);
|
|
||||||
//Dy = max(Dyp*Dyp,Dym*Dym);
|
|
||||||
//Dz = max(Dzp*Dzp,Dzm*Dzm);
|
|
||||||
|
|
||||||
norm=sqrt(Dx + Dy + Dz);
|
|
||||||
if (norm > 1.0) norm=1.0;
|
|
||||||
|
|
||||||
Distance(i,j,k) += dt*sign*(1.0 - norm);
|
|
||||||
LocalVar += dt*sign*(1.0 - norm);
|
|
||||||
|
|
||||||
if (fabs(dt*sign*(1.0 - norm)) > LocalMax)
|
|
||||||
LocalMax = fabs(dt*sign*(1.0 - norm));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
MPI_Allreduce(&LocalVar,&GlobalVar,1,MPI_DOUBLE,MPI_SUM,Dm.Comm);
|
|
||||||
MPI_Allreduce(&LocalMax,&GlobalMax,1,MPI_DOUBLE,MPI_MAX,Dm.Comm);
|
|
||||||
GlobalVar /= Dm.Volume;
|
|
||||||
count++;
|
|
||||||
|
|
||||||
if (count%50 == 0 && Dm.rank()==0 )
|
|
||||||
printf("Time=%i, Max variation=%f, Global variation=%f \n",count,GlobalMax,GlobalVar);
|
|
||||||
|
|
||||||
if (fabs(GlobalMax) < 1e-5){
|
|
||||||
if (Dm.rank()==0) printf("Exiting with max tolerance of 1e-5 \n");
|
|
||||||
count=timesteps;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return GlobalVar;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Explicit instantiations
|
// Explicit instantiations
|
||||||
template void CalcDist<float>( Array<float>&, const Array<char>&, const Domain&, const std::array<bool,3>&, const std::array<double,3>& );
|
template void CalcDist<float>( Array<float>&, const Array<char>&, const Domain&, const std::array<bool,3>&, const std::array<double,3>& );
|
||||||
|
@ -16,16 +16,6 @@ struct Vec {
|
|||||||
};
|
};
|
||||||
inline bool operator<(const Vec& l, const Vec& r){ return l.x*l.x+l.y*l.y+l.z*l.z < r.x*r.x+r.y*r.y+r.z*r.z; }
|
inline bool operator<(const Vec& l, const Vec& r){ return l.x*l.x+l.y*l.y+l.z*l.z < r.x*r.x+r.y*r.y+r.z*r.z; }
|
||||||
|
|
||||||
inline double minmod(double &a, double &b){
|
|
||||||
|
|
||||||
double value;
|
|
||||||
|
|
||||||
value = a;
|
|
||||||
if ( a*b < 0.0) value=0.0;
|
|
||||||
else if (fabs(a) > fabs(b)) value = b;
|
|
||||||
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*!
|
/*!
|
||||||
* @brief Calculate the distance using a simple method
|
* @brief Calculate the distance using a simple method
|
||||||
@ -50,16 +40,4 @@ void CalcDist( Array<TYPE> &Distance, const Array<char> &ID, const Domain &Dm,
|
|||||||
void CalcVecDist( Array<Vec> &Distance, const Array<int> &ID, const Domain &Dm,
|
void CalcVecDist( Array<Vec> &Distance, const Array<int> &ID, const Domain &Dm,
|
||||||
const std::array<bool,3>& periodic = {true,true,true}, const std::array<double,3>& dx = {1,1,1} );
|
const std::array<bool,3>& periodic = {true,true,true}, const std::array<double,3>& dx = {1,1,1} );
|
||||||
|
|
||||||
|
|
||||||
/*!
|
|
||||||
* @brief Calculate the distance based on solution of Eikonal equation
|
|
||||||
* @details This routine calculates the signed distance to the nearest domain surface.
|
|
||||||
* @param[out] Distance Distance function
|
|
||||||
* @param[in] ID Domain id
|
|
||||||
* @param[in] Dm Domain information
|
|
||||||
* @param[in] timesteps number of timesteps to run for Eikonal solver
|
|
||||||
* @param[in] periodic Directions that are periodic
|
|
||||||
*/
|
|
||||||
double Eikonal(DoubleArray &Distance, const Array<char> &ID, Domain &Dm, int timesteps, const std::array<bool,3>& periodic);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -1,266 +0,0 @@
|
|||||||
#include "common/MPI_Helpers.h"
|
|
||||||
#include "common/Utilities.h"
|
|
||||||
|
|
||||||
|
|
||||||
/********************************************************
|
|
||||||
* Return the MPI data type *
|
|
||||||
********************************************************/
|
|
||||||
template<> MPI_Datatype getMPItype<char>() {
|
|
||||||
return MPI_CHAR;
|
|
||||||
}
|
|
||||||
template<> MPI_Datatype getMPItype<unsigned char>() {
|
|
||||||
return MPI_UNSIGNED_CHAR;
|
|
||||||
}
|
|
||||||
template<> MPI_Datatype getMPItype<int>() {
|
|
||||||
return MPI_INT;
|
|
||||||
}
|
|
||||||
template<> MPI_Datatype getMPItype<long>() {
|
|
||||||
return MPI_LONG;
|
|
||||||
}
|
|
||||||
template<> MPI_Datatype getMPItype<unsigned long>() {
|
|
||||||
return MPI_UNSIGNED_LONG;
|
|
||||||
}
|
|
||||||
template<> MPI_Datatype getMPItype<long long>() {
|
|
||||||
return MPI_LONG_LONG;
|
|
||||||
}
|
|
||||||
template<> MPI_Datatype getMPItype<float>() {
|
|
||||||
return MPI_FLOAT;
|
|
||||||
}
|
|
||||||
template<> MPI_Datatype getMPItype<double>() {
|
|
||||||
return MPI_DOUBLE;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/********************************************************
|
|
||||||
* Concrete implimentations for packing/unpacking *
|
|
||||||
********************************************************/
|
|
||||||
// unsigned char
|
|
||||||
template<>
|
|
||||||
size_t packsize<unsigned char>( const unsigned char& )
|
|
||||||
{
|
|
||||||
return sizeof(unsigned char);
|
|
||||||
}
|
|
||||||
template<>
|
|
||||||
void pack<unsigned char>( const unsigned char& rhs, char *buffer )
|
|
||||||
{
|
|
||||||
memcpy(buffer,&rhs,sizeof(unsigned char));
|
|
||||||
}
|
|
||||||
template<>
|
|
||||||
void unpack<unsigned char>( unsigned char& data, const char *buffer )
|
|
||||||
{
|
|
||||||
memcpy(&data,buffer,sizeof(unsigned char));
|
|
||||||
}
|
|
||||||
// char
|
|
||||||
template<>
|
|
||||||
size_t packsize<char>( const char& )
|
|
||||||
{
|
|
||||||
return sizeof(char);
|
|
||||||
}
|
|
||||||
template<>
|
|
||||||
void pack<char>( const char& rhs, char *buffer )
|
|
||||||
{
|
|
||||||
memcpy(buffer,&rhs,sizeof(char));
|
|
||||||
}
|
|
||||||
template<>
|
|
||||||
void unpack<char>( char& data, const char *buffer )
|
|
||||||
{
|
|
||||||
memcpy(&data,buffer,sizeof(char));
|
|
||||||
}
|
|
||||||
// int
|
|
||||||
template<>
|
|
||||||
size_t packsize<int>( const int& )
|
|
||||||
{
|
|
||||||
return sizeof(int);
|
|
||||||
}
|
|
||||||
template<>
|
|
||||||
void pack<int>( const int& rhs, char *buffer )
|
|
||||||
{
|
|
||||||
memcpy(buffer,&rhs,sizeof(int));
|
|
||||||
}
|
|
||||||
template<>
|
|
||||||
void unpack<int>( int& data, const char *buffer )
|
|
||||||
{
|
|
||||||
memcpy(&data,buffer,sizeof(int));
|
|
||||||
}
|
|
||||||
// unsigned int
|
|
||||||
template<>
|
|
||||||
size_t packsize<unsigned int>( const unsigned int& )
|
|
||||||
{
|
|
||||||
return sizeof(unsigned int);
|
|
||||||
}
|
|
||||||
template<>
|
|
||||||
void pack<unsigned int>( const unsigned int& rhs, char *buffer )
|
|
||||||
{
|
|
||||||
memcpy(buffer,&rhs,sizeof(int));
|
|
||||||
}
|
|
||||||
template<>
|
|
||||||
void unpack<unsigned int>( unsigned int& data, const char *buffer )
|
|
||||||
{
|
|
||||||
memcpy(&data,buffer,sizeof(int));
|
|
||||||
}
|
|
||||||
// size_t
|
|
||||||
template<>
|
|
||||||
size_t packsize<size_t>( const size_t& )
|
|
||||||
{
|
|
||||||
return sizeof(size_t);
|
|
||||||
}
|
|
||||||
template<>
|
|
||||||
void pack<size_t>( const size_t& rhs, char *buffer )
|
|
||||||
{
|
|
||||||
memcpy(buffer,&rhs,sizeof(size_t));
|
|
||||||
}
|
|
||||||
template<>
|
|
||||||
void unpack<size_t>( size_t& data, const char *buffer )
|
|
||||||
{
|
|
||||||
memcpy(&data,buffer,sizeof(size_t));
|
|
||||||
}
|
|
||||||
// std::string
|
|
||||||
template<>
|
|
||||||
size_t packsize<std::string>( const std::string& rhs )
|
|
||||||
{
|
|
||||||
return rhs.size()+1;
|
|
||||||
}
|
|
||||||
template<>
|
|
||||||
void pack<std::string>( const std::string& rhs, char *buffer )
|
|
||||||
{
|
|
||||||
memcpy(buffer,rhs.c_str(),rhs.size()+1);
|
|
||||||
}
|
|
||||||
template<>
|
|
||||||
void unpack<std::string>( std::string& data, const char *buffer )
|
|
||||||
{
|
|
||||||
data = std::string(buffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/********************************************************
|
|
||||||
* Fake MPI routines *
|
|
||||||
********************************************************/
|
|
||||||
#ifndef USE_MPI
|
|
||||||
int MPI_Init(int*,char***)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Init_thread(int*,char***, int required, int *provided )
|
|
||||||
{
|
|
||||||
*provided = required;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Finalize()
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Comm_size( MPI_Comm, int *size )
|
|
||||||
{
|
|
||||||
*size = 1;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Comm_rank( MPI_Comm, int *rank )
|
|
||||||
{
|
|
||||||
*rank = 0;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Barrier( MPI_Comm )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Waitall( int, MPI_Request[], MPI_Status[] )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Wait( MPI_Request*, MPI_Status* )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Bcast( void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm )
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Send(const void *buf, int count, MPI_Datatype datatype, int dest, int tag,
|
|
||||||
MPI_Comm comm)
|
|
||||||
{
|
|
||||||
ERROR("Not implimented yet");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag,
|
|
||||||
MPI_Comm comm, MPI_Status *status)
|
|
||||||
{
|
|
||||||
ERROR("Not implimented yet");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Isend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag,
|
|
||||||
MPI_Comm comm, MPI_Request *request)
|
|
||||||
{
|
|
||||||
ERROR("Not implimented yet");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int source,
|
|
||||||
int tag, MPI_Comm comm, MPI_Request *request)
|
|
||||||
{
|
|
||||||
ERROR("Not implimented yet");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Allreduce(const void *sendbuf, void *recvbuf, int count,
|
|
||||||
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
|
|
||||||
{
|
|
||||||
ERROR("Not implimented yet");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
|
|
||||||
void *recvbuf, int recvcount, MPI_Datatype recvtype,
|
|
||||||
MPI_Comm comm)
|
|
||||||
{
|
|
||||||
ERROR("Not implimented yet");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Allgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
|
|
||||||
void *recvbuf, const int *recvcounts, const int *displs,
|
|
||||||
MPI_Datatype recvtype, MPI_Comm comm)
|
|
||||||
{
|
|
||||||
ERROR("Not implimented yet");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
|
|
||||||
int dest, int sendtag,
|
|
||||||
void *recvbuf, int recvcount, MPI_Datatype recvtype,
|
|
||||||
int source, int recvtag,
|
|
||||||
MPI_Comm comm, MPI_Status *status)
|
|
||||||
{
|
|
||||||
ERROR("Not implimented yet");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Reduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
|
|
||||||
MPI_Op op, int root, MPI_Comm comm)
|
|
||||||
{
|
|
||||||
ERROR("Not implimented yet");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Comm_group(MPI_Comm comm, MPI_Group *group)
|
|
||||||
{
|
|
||||||
ERROR("Not implimented yet");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm)
|
|
||||||
{
|
|
||||||
ERROR("Not implimented yet");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm)
|
|
||||||
{
|
|
||||||
*newcomm = comm;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
double MPI_Wtime( void )
|
|
||||||
{
|
|
||||||
return 0.0;
|
|
||||||
}
|
|
||||||
int MPI_Comm_free(MPI_Comm *group)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
int MPI_Group_free(MPI_Group *group)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
@ -1,239 +0,0 @@
|
|||||||
// This file contains wrappers for MPI routines and functions to pack/unpack data structures
|
|
||||||
#ifndef MPI_WRAPPERS_INC
|
|
||||||
#define MPI_WRAPPERS_INC
|
|
||||||
|
|
||||||
#include <string.h>
|
|
||||||
#include <vector>
|
|
||||||
#include <set>
|
|
||||||
#include <map>
|
|
||||||
|
|
||||||
#ifdef USE_MPI
|
|
||||||
// Inlcude MPI
|
|
||||||
#include "mpi.h"
|
|
||||||
#else
|
|
||||||
// Create fake MPI types
|
|
||||||
typedef int MPI_Comm;
|
|
||||||
typedef int MPI_Request;
|
|
||||||
typedef int MPI_Status;
|
|
||||||
#define MPI_COMM_WORLD 0
|
|
||||||
#define MPI_COMM_SELF 0
|
|
||||||
#define MPI_COMM_NULL -1
|
|
||||||
#define MPI_GROUP_NULL -2
|
|
||||||
#define MPI_STATUS_IGNORE NULL
|
|
||||||
enum MPI_Datatype { MPI_LOGICAL, MPI_CHAR, MPI_UNSIGNED_CHAR, MPI_INT,
|
|
||||||
MPI_UNSIGNED, MPI_LONG, MPI_UNSIGNED_LONG, MPI_LONG_LONG, MPI_FLOAT, MPI_DOUBLE };
|
|
||||||
enum MPI_Op { MPI_MIN, MPI_MAX, MPI_SUM };
|
|
||||||
typedef int MPI_Group;
|
|
||||||
#define MPI_THREAD_SINGLE 0
|
|
||||||
#define MPI_THREAD_FUNNELED 1
|
|
||||||
#define MPI_THREAD_SERIALIZED 2
|
|
||||||
#define MPI_THREAD_MULTIPLE 3
|
|
||||||
// Fake MPI functions
|
|
||||||
int MPI_Init(int*,char***);
|
|
||||||
int MPI_Init_thread( int *argc, char ***argv, int required, int *provided );
|
|
||||||
int MPI_Finalize();
|
|
||||||
int MPI_Comm_size( MPI_Comm, int *size );
|
|
||||||
int MPI_Comm_rank( MPI_Comm, int *rank );
|
|
||||||
int MPI_Barrier(MPI_Comm);
|
|
||||||
int MPI_Wait(MPI_Request*,MPI_Status*);
|
|
||||||
int MPI_Waitall(int,MPI_Request[],MPI_Status[]);
|
|
||||||
int MPI_Bcast(void*,int,MPI_Datatype,int,MPI_Comm);
|
|
||||||
int MPI_Send(const void *buf, int count, MPI_Datatype datatype, int dest, int tag,
|
|
||||||
MPI_Comm comm);
|
|
||||||
int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int source, int tag,
|
|
||||||
MPI_Comm comm, MPI_Status *status);
|
|
||||||
int MPI_Isend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag,
|
|
||||||
MPI_Comm comm, MPI_Request *request);
|
|
||||||
int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int source,
|
|
||||||
int tag, MPI_Comm comm, MPI_Request *request);
|
|
||||||
int MPI_Allreduce(const void *sendbuf, void *recvbuf, int count,
|
|
||||||
MPI_Datatype datatype, MPI_Op op, MPI_Comm comm);
|
|
||||||
int MPI_Allgather(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
|
|
||||||
void *recvbuf, int recvcount, MPI_Datatype recvtype,
|
|
||||||
MPI_Comm comm);
|
|
||||||
int MPI_Allgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
|
|
||||||
void *recvbuf, const int *recvcounts, const int *displs,
|
|
||||||
MPI_Datatype recvtype, MPI_Comm comm);
|
|
||||||
int MPI_Sendrecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
|
|
||||||
int dest, int sendtag,
|
|
||||||
void *recvbuf, int recvcount, MPI_Datatype recvtype,
|
|
||||||
int source, int recvtag,
|
|
||||||
MPI_Comm comm, MPI_Status *status);
|
|
||||||
int MPI_Reduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
|
|
||||||
MPI_Op op, int root, MPI_Comm comm);
|
|
||||||
double MPI_Wtime( void );
|
|
||||||
int MPI_Comm_group(MPI_Comm comm, MPI_Group *group);
|
|
||||||
int MPI_Comm_create(MPI_Comm comm, MPI_Group group, MPI_Comm *newcomm);
|
|
||||||
int MPI_Comm_free(MPI_Comm *group);
|
|
||||||
int MPI_Group_free(MPI_Group *group);
|
|
||||||
int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
//! Get the size of the MPI_Comm
|
|
||||||
// Note: this is a thread and interrupt safe function
|
|
||||||
inline int comm_size( MPI_Comm comm ) {
|
|
||||||
int size = 1;
|
|
||||||
MPI_Comm_size( comm, &size );
|
|
||||||
return size;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//! Get the rank of the MPI_Comm
|
|
||||||
// Note: this is a thread and interrupt safe function
|
|
||||||
inline int comm_rank( MPI_Comm comm ) {
|
|
||||||
int rank = 1;
|
|
||||||
MPI_Comm_rank( comm, &rank );
|
|
||||||
return rank;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
//! Get the size of MPI_COMM_WORLD
|
|
||||||
inline int MPI_WORLD_SIZE( ) {
|
|
||||||
return comm_size( MPI_COMM_WORLD );
|
|
||||||
}
|
|
||||||
|
|
||||||
//! Get the size of MPI_COMM_WORLD
|
|
||||||
inline int MPI_WORLD_RANK( ) {
|
|
||||||
return comm_rank( MPI_COMM_WORLD );
|
|
||||||
}
|
|
||||||
|
|
||||||
//! Return the appropriate MPI datatype for a class
|
|
||||||
template<class TYPE>
|
|
||||||
MPI_Datatype getMPItype();
|
|
||||||
|
|
||||||
|
|
||||||
//! Template function to return the buffer size required to pack a class
|
|
||||||
template<class TYPE>
|
|
||||||
size_t packsize( const TYPE& rhs );
|
|
||||||
|
|
||||||
//! Template function to pack a class to a buffer
|
|
||||||
template<class TYPE>
|
|
||||||
void pack( const TYPE& rhs, char *buffer );
|
|
||||||
|
|
||||||
//! Template function to unpack a class from a buffer
|
|
||||||
template<class TYPE>
|
|
||||||
void unpack( TYPE& data, const char *buffer );
|
|
||||||
|
|
||||||
|
|
||||||
//! Template function to return the buffer size required to pack a std::vector
|
|
||||||
template<class TYPE>
|
|
||||||
size_t packsize( const std::vector<TYPE>& rhs );
|
|
||||||
|
|
||||||
//! Template function to pack a class to a buffer
|
|
||||||
template<class TYPE>
|
|
||||||
void pack( const std::vector<TYPE>& rhs, char *buffer );
|
|
||||||
|
|
||||||
//! Template function to pack a class to a buffer
|
|
||||||
template<class TYPE>
|
|
||||||
void unpack( std::vector<TYPE>& data, const char *buffer );
|
|
||||||
|
|
||||||
|
|
||||||
//! Template function to return the buffer size required to pack a std::pair
|
|
||||||
template<class TYPE1, class TYPE2>
|
|
||||||
size_t packsize( const std::pair<TYPE1,TYPE2>& rhs );
|
|
||||||
|
|
||||||
//! Template function to pack a class to a buffer
|
|
||||||
template<class TYPE1, class TYPE2>
|
|
||||||
void pack( const std::pair<TYPE1,TYPE2>& rhs, char *buffer );
|
|
||||||
|
|
||||||
//! Template function to pack a class to a buffer
|
|
||||||
template<class TYPE1, class TYPE2>
|
|
||||||
void unpack( std::pair<TYPE1,TYPE2>& data, const char *buffer );
|
|
||||||
|
|
||||||
|
|
||||||
//! Template function to return the buffer size required to pack a std::map
|
|
||||||
template<class TYPE1, class TYPE2>
|
|
||||||
size_t packsize( const std::map<TYPE1,TYPE2>& rhs );
|
|
||||||
|
|
||||||
//! Template function to pack a class to a buffer
|
|
||||||
template<class TYPE1, class TYPE2>
|
|
||||||
void pack( const std::map<TYPE1,TYPE2>& rhs, char *buffer );
|
|
||||||
|
|
||||||
//! Template function to pack a class to a buffer
|
|
||||||
template<class TYPE1, class TYPE2>
|
|
||||||
void unpack( std::map<TYPE1,TYPE2>& data, const char *buffer );
|
|
||||||
|
|
||||||
|
|
||||||
//! Template function to return the buffer size required to pack a std::set
|
|
||||||
template<class TYPE>
|
|
||||||
size_t packsize( const std::set<TYPE>& rhs );
|
|
||||||
|
|
||||||
//! Template function to pack a class to a buffer
|
|
||||||
template<class TYPE>
|
|
||||||
void pack( const std::set<TYPE>& rhs, char *buffer );
|
|
||||||
|
|
||||||
//! Template function to pack a class to a buffer
|
|
||||||
template<class TYPE>
|
|
||||||
void unpack( std::set<TYPE>& data, const char *buffer );
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// Helper functions
|
|
||||||
inline double sumReduce( MPI_Comm comm, double x )
|
|
||||||
{
|
|
||||||
double y = 0;
|
|
||||||
MPI_Allreduce(&x,&y,1,MPI_DOUBLE,MPI_SUM,comm);
|
|
||||||
return y;
|
|
||||||
}
|
|
||||||
inline float sumReduce( MPI_Comm comm, float x )
|
|
||||||
{
|
|
||||||
float y = 0;
|
|
||||||
MPI_Allreduce(&x,&y,1,MPI_FLOAT,MPI_SUM,comm);
|
|
||||||
return y;
|
|
||||||
}
|
|
||||||
inline int sumReduce( MPI_Comm comm, int x )
|
|
||||||
{
|
|
||||||
int y = 0;
|
|
||||||
MPI_Allreduce(&x,&y,1,MPI_INT,MPI_SUM,comm);
|
|
||||||
return y;
|
|
||||||
}
|
|
||||||
inline long long sumReduce( MPI_Comm comm, long long x )
|
|
||||||
{
|
|
||||||
long long y = 0;
|
|
||||||
MPI_Allreduce(&x,&y,1,MPI_LONG_LONG,MPI_SUM,comm);
|
|
||||||
return y;
|
|
||||||
}
|
|
||||||
inline bool sumReduce( MPI_Comm comm, bool x )
|
|
||||||
{
|
|
||||||
int y = sumReduce( comm, x?1:0 );
|
|
||||||
return y>0;
|
|
||||||
}
|
|
||||||
inline std::vector<float> sumReduce( MPI_Comm comm, const std::vector<float>& x )
|
|
||||||
{
|
|
||||||
auto y = x;
|
|
||||||
MPI_Allreduce(x.data(),y.data(),x.size(),MPI_FLOAT,MPI_SUM,comm);
|
|
||||||
return y;
|
|
||||||
}
|
|
||||||
inline std::vector<int> sumReduce( MPI_Comm comm, const std::vector<int>& x )
|
|
||||||
{
|
|
||||||
auto y = x;
|
|
||||||
MPI_Allreduce(x.data(),y.data(),x.size(),MPI_INT,MPI_SUM,comm);
|
|
||||||
return y;
|
|
||||||
}
|
|
||||||
inline double maxReduce( MPI_Comm comm, double x )
|
|
||||||
{
|
|
||||||
double y = 0;
|
|
||||||
MPI_Allreduce(&x,&y,1,MPI_DOUBLE,MPI_MAX,comm);
|
|
||||||
return y;
|
|
||||||
}
|
|
||||||
inline float maxReduce( MPI_Comm comm, float x )
|
|
||||||
{
|
|
||||||
float y = 0;
|
|
||||||
MPI_Allreduce(&x,&y,1,MPI_FLOAT,MPI_MAX,comm);
|
|
||||||
return y;
|
|
||||||
}
|
|
||||||
inline int maxReduce( MPI_Comm comm, int x )
|
|
||||||
{
|
|
||||||
int y = 0;
|
|
||||||
MPI_Allreduce(&x,&y,1,MPI_INT,MPI_MAX,comm);
|
|
||||||
return y;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
#include "common/MPI_Helpers.hpp"
|
|
||||||
|
|
||||||
|
|
@ -1,154 +0,0 @@
|
|||||||
// This file contains wrappers for MPI routines and functions to pack/unpack data structures
|
|
||||||
#ifndef MPI_WRAPPERS_HPP
|
|
||||||
#define MPI_WRAPPERS_HPP
|
|
||||||
|
|
||||||
#include "common/MPI_Helpers.h"
|
|
||||||
#include <string.h>
|
|
||||||
#include <vector>
|
|
||||||
#include <set>
|
|
||||||
#include <map>
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/********************************************************
|
|
||||||
* Default instantiations for std::vector *
|
|
||||||
********************************************************/
|
|
||||||
template<class TYPE>
|
|
||||||
size_t packsize( const std::vector<TYPE>& rhs )
|
|
||||||
{
|
|
||||||
size_t bytes = sizeof(size_t);
|
|
||||||
for (size_t i=0; i<rhs.size(); i++)
|
|
||||||
bytes += packsize(rhs[i]);
|
|
||||||
return bytes;
|
|
||||||
}
|
|
||||||
template<class TYPE>
|
|
||||||
void pack( const std::vector<TYPE>& rhs, char *buffer )
|
|
||||||
{
|
|
||||||
size_t size = rhs.size();
|
|
||||||
memcpy(buffer,&size,sizeof(size_t));
|
|
||||||
size_t pos = sizeof(size_t);
|
|
||||||
for (size_t i=0; i<rhs.size(); i++) {
|
|
||||||
pack(rhs[i],&buffer[pos]);
|
|
||||||
pos += packsize(rhs[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
template<class TYPE>
|
|
||||||
void unpack( std::vector<TYPE>& data, const char *buffer )
|
|
||||||
{
|
|
||||||
size_t size;
|
|
||||||
memcpy(&size,buffer,sizeof(size_t));
|
|
||||||
data.clear();
|
|
||||||
data.resize(size);
|
|
||||||
size_t pos = sizeof(size_t);
|
|
||||||
for (size_t i=0; i<data.size(); i++) {
|
|
||||||
unpack(data[i],&buffer[pos]);
|
|
||||||
pos += packsize(data[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/********************************************************
|
|
||||||
* Default instantiations for std::pair *
|
|
||||||
********************************************************/
|
|
||||||
template<class TYPE1, class TYPE2>
|
|
||||||
size_t packsize( const std::pair<TYPE1,TYPE2>& rhs )
|
|
||||||
{
|
|
||||||
return packsize(rhs.first)+packsize(rhs.second);
|
|
||||||
}
|
|
||||||
template<class TYPE1, class TYPE2>
|
|
||||||
void pack( const std::pair<TYPE1,TYPE2>& rhs, char *buffer )
|
|
||||||
{
|
|
||||||
pack(rhs.first,buffer);
|
|
||||||
pack(rhs.second,&buffer[packsize(rhs.first)]);
|
|
||||||
}
|
|
||||||
template<class TYPE1, class TYPE2>
|
|
||||||
void unpack( std::pair<TYPE1,TYPE2>& data, const char *buffer )
|
|
||||||
{
|
|
||||||
unpack(data.first,buffer);
|
|
||||||
unpack(data.second,&buffer[packsize(data.first)]);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/********************************************************
|
|
||||||
* Default instantiations for std::map *
|
|
||||||
********************************************************/
|
|
||||||
template<class TYPE1, class TYPE2>
|
|
||||||
size_t packsize( const std::map<TYPE1,TYPE2>& rhs )
|
|
||||||
{
|
|
||||||
size_t bytes = sizeof(size_t);
|
|
||||||
typename std::map<TYPE1,TYPE2>::const_iterator it;
|
|
||||||
for (it=rhs.begin(); it!=rhs.end(); ++it) {
|
|
||||||
bytes += packsize(it->first);
|
|
||||||
bytes += packsize(it->second);
|
|
||||||
}
|
|
||||||
return bytes;
|
|
||||||
}
|
|
||||||
template<class TYPE1, class TYPE2>
|
|
||||||
void pack( const std::map<TYPE1,TYPE2>& rhs, char *buffer )
|
|
||||||
{
|
|
||||||
size_t N = rhs.size();
|
|
||||||
pack(N,buffer);
|
|
||||||
size_t pos = sizeof(size_t);
|
|
||||||
typename std::map<TYPE1,TYPE2>::const_iterator it;
|
|
||||||
for (it=rhs.begin(); it!=rhs.end(); ++it) {
|
|
||||||
pack(it->first,&buffer[pos]); pos+=packsize(it->first);
|
|
||||||
pack(it->second,&buffer[pos]); pos+=packsize(it->second);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
template<class TYPE1, class TYPE2>
|
|
||||||
void unpack( std::map<TYPE1,TYPE2>& data, const char *buffer )
|
|
||||||
{
|
|
||||||
size_t N = 0;
|
|
||||||
unpack(N,buffer);
|
|
||||||
size_t pos = sizeof(size_t);
|
|
||||||
data.clear();
|
|
||||||
for (size_t i=0; i<N; i++) {
|
|
||||||
std::pair<TYPE1,TYPE2> tmp;
|
|
||||||
unpack(tmp.first,&buffer[pos]); pos+=packsize(tmp.first);
|
|
||||||
unpack(tmp.second,&buffer[pos]); pos+=packsize(tmp.second);
|
|
||||||
data.insert(tmp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
/********************************************************
|
|
||||||
* Default instantiations for std::set *
|
|
||||||
********************************************************/
|
|
||||||
template<class TYPE>
|
|
||||||
size_t packsize( const std::set<TYPE>& rhs )
|
|
||||||
{
|
|
||||||
size_t bytes = sizeof(size_t);
|
|
||||||
typename std::set<TYPE>::const_iterator it;
|
|
||||||
for (it=rhs.begin(); it!=rhs.end(); ++it) {
|
|
||||||
bytes += packsize(*it);
|
|
||||||
}
|
|
||||||
return bytes;
|
|
||||||
}
|
|
||||||
template<class TYPE>
|
|
||||||
void pack( const std::set<TYPE>& rhs, char *buffer )
|
|
||||||
{
|
|
||||||
size_t N = rhs.size();
|
|
||||||
pack(N,buffer);
|
|
||||||
size_t pos = sizeof(size_t);
|
|
||||||
typename std::set<TYPE>::const_iterator it;
|
|
||||||
for (it=rhs.begin(); it!=rhs.end(); ++it) {
|
|
||||||
pack(*it); pos+=packsize(*it);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
template<class TYPE>
|
|
||||||
void unpack( std::set<TYPE>& data, const char *buffer )
|
|
||||||
{
|
|
||||||
size_t N = 0;
|
|
||||||
unpack(N,buffer);
|
|
||||||
size_t pos = sizeof(size_t);
|
|
||||||
data.clear();
|
|
||||||
for (size_t i=0; i<N; i++) {
|
|
||||||
TYPE tmp;
|
|
||||||
unpack(tmp,&buffer[pos]); pos+=packsize(tmp);
|
|
||||||
data.insert(tmp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user