Adding MPI wrapper class
This commit is contained in:
@@ -4,7 +4,7 @@
|
||||
#include "common/Domain.h"
|
||||
#include "common/Communication.h"
|
||||
#include "common/Utilities.h"
|
||||
#include "common/MPI_Helpers.h"
|
||||
#include "common/MPI.h"
|
||||
#include "IO/MeshDatabase.h"
|
||||
#include "IO/Reader.h"
|
||||
#include "IO/Writer.h"
|
||||
@@ -109,13 +109,13 @@ void Minkowski::ComputeScalar(const DoubleArray& Field, const double isovalue)
|
||||
// convert X for 2D manifold to 3D object
|
||||
Xi *= 0.5;
|
||||
|
||||
MPI_Barrier(Dm->Comm);
|
||||
Dm->Comm.barrier();
|
||||
// Phase averages
|
||||
MPI_Allreduce(&Vi,&Vi_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&Xi,&Xi_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&Ai,&Ai_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&Ji,&Ji_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Barrier(Dm->Comm);
|
||||
Vi_global = Dm->Comm.sumReduce( Vi );
|
||||
Xi_global = Dm->Comm.sumReduce( Xi );
|
||||
Ai_global = Dm->Comm.sumReduce( Ai );
|
||||
Ji_global = Dm->Comm.sumReduce( Ji );
|
||||
Dm->Comm.barrier();
|
||||
PROFILE_STOP("ComputeScalar");
|
||||
}
|
||||
|
||||
@@ -168,7 +168,7 @@ int Minkowski::MeasureConnectedPathway(){
|
||||
double vF=0.0;
|
||||
n_connected_components = ComputeGlobalBlobIDs(Nx-2,Ny-2,Nz-2,Dm->rank_info,distance,distance,vF,vF,label,Dm->Comm);
|
||||
// int n_connected_components = ComputeGlobalPhaseComponent(Nx-2,Ny-2,Nz-2,Dm->rank_info,const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, Dm->Comm )
|
||||
MPI_Barrier(Dm->Comm);
|
||||
Dm->Comm.barrier();
|
||||
|
||||
for (int k=0; k<Nz; k++){
|
||||
for (int j=0; j<Ny; j++){
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
#include "analysis/distance.h"
|
||||
|
||||
#include "common/Utilities.h"
|
||||
#include "common/MPI_Helpers.h"
|
||||
#include "common/MPI.h"
|
||||
#include "IO/MeshDatabase.h"
|
||||
#include "IO/Reader.h"
|
||||
#include "IO/Writer.h"
|
||||
|
||||
@@ -229,25 +229,25 @@ void SubPhase::Basic(){
|
||||
}
|
||||
}
|
||||
}
|
||||
gwb.V=sumReduce( Dm->Comm, wb.V);
|
||||
gnb.V=sumReduce( Dm->Comm, nb.V);
|
||||
gwb.M=sumReduce( Dm->Comm, wb.M);
|
||||
gnb.M=sumReduce( Dm->Comm, nb.M);
|
||||
gwb.Px=sumReduce( Dm->Comm, wb.Px);
|
||||
gwb.Py=sumReduce( Dm->Comm, wb.Py);
|
||||
gwb.Pz=sumReduce( Dm->Comm, wb.Pz);
|
||||
gnb.Px=sumReduce( Dm->Comm, nb.Px);
|
||||
gnb.Py=sumReduce( Dm->Comm, nb.Py);
|
||||
gnb.Pz=sumReduce( Dm->Comm, nb.Pz);
|
||||
gwb.V = Dm->Comm.sumReduce( wb.V);
|
||||
gnb.V = Dm->Comm.sumReduce( nb.V);
|
||||
gwb.M = Dm->Comm.sumReduce( wb.M);
|
||||
gnb.M = Dm->Comm.sumReduce( nb.M);
|
||||
gwb.Px = Dm->Comm.sumReduce( wb.Px);
|
||||
gwb.Py = Dm->Comm.sumReduce( wb.Py);
|
||||
gwb.Pz = Dm->Comm.sumReduce( wb.Pz);
|
||||
gnb.Px = Dm->Comm.sumReduce( nb.Px);
|
||||
gnb.Py = Dm->Comm.sumReduce( nb.Py);
|
||||
gnb.Pz = Dm->Comm.sumReduce( nb.Pz);
|
||||
|
||||
count_w=sumReduce( Dm->Comm, count_w);
|
||||
count_n=sumReduce( Dm->Comm, count_n);
|
||||
count_w = Dm->Comm.sumReduce( count_w);
|
||||
count_n = Dm->Comm.sumReduce( count_n);
|
||||
if (count_w > 0.0)
|
||||
gwb.p=sumReduce( Dm->Comm, wb.p) / count_w;
|
||||
gwb.p = Dm->Comm.sumReduce(wb.p) / count_w;
|
||||
else
|
||||
gwb.p = 0.0;
|
||||
if (count_n > 0.0)
|
||||
gnb.p=sumReduce( Dm->Comm, nb.p) / count_n;
|
||||
gnb.p = Dm->Comm.sumReduce( nb.p) / count_n;
|
||||
else
|
||||
gnb.p = 0.0;
|
||||
|
||||
@@ -444,14 +444,14 @@ void SubPhase::Full(){
|
||||
nd.X -= nc.X;
|
||||
|
||||
// compute global entities
|
||||
gnc.V=sumReduce( Dm->Comm, nc.V);
|
||||
gnc.A=sumReduce( Dm->Comm, nc.A);
|
||||
gnc.H=sumReduce( Dm->Comm, nc.H);
|
||||
gnc.X=sumReduce( Dm->Comm, nc.X);
|
||||
gnd.V=sumReduce( Dm->Comm, nd.V);
|
||||
gnd.A=sumReduce( Dm->Comm, nd.A);
|
||||
gnd.H=sumReduce( Dm->Comm, nd.H);
|
||||
gnd.X=sumReduce( Dm->Comm, nd.X);
|
||||
gnc.V = Dm->Comm.sumReduce( nc.V );
|
||||
gnc.A = Dm->Comm.sumReduce( nc.A );
|
||||
gnc.H = Dm->Comm.sumReduce( nc.H );
|
||||
gnc.X = Dm->Comm.sumReduce( nc.X );
|
||||
gnd.V = Dm->Comm.sumReduce( nd.V );
|
||||
gnd.A = Dm->Comm.sumReduce( nd.A );
|
||||
gnd.H = Dm->Comm.sumReduce( nd.H );
|
||||
gnd.X = Dm->Comm.sumReduce( nd.X );
|
||||
gnd.Nc = nd.Nc;
|
||||
// wetting
|
||||
for (k=0; k<Nz; k++){
|
||||
@@ -491,14 +491,14 @@ void SubPhase::Full(){
|
||||
wd.H -= wc.H;
|
||||
wd.X -= wc.X;
|
||||
// compute global entities
|
||||
gwc.V=sumReduce( Dm->Comm, wc.V);
|
||||
gwc.A=sumReduce( Dm->Comm, wc.A);
|
||||
gwc.H=sumReduce( Dm->Comm, wc.H);
|
||||
gwc.X=sumReduce( Dm->Comm, wc.X);
|
||||
gwd.V=sumReduce( Dm->Comm, wd.V);
|
||||
gwd.A=sumReduce( Dm->Comm, wd.A);
|
||||
gwd.H=sumReduce( Dm->Comm, wd.H);
|
||||
gwd.X=sumReduce( Dm->Comm, wd.X);
|
||||
gwc.V = Dm->Comm.sumReduce( wc.V );
|
||||
gwc.A = Dm->Comm.sumReduce( wc.A );
|
||||
gwc.H = Dm->Comm.sumReduce( wc.H );
|
||||
gwc.X = Dm->Comm.sumReduce( wc.X );
|
||||
gwd.V = Dm->Comm.sumReduce( wd.V );
|
||||
gwd.A = Dm->Comm.sumReduce( wd.A );
|
||||
gwd.H = Dm->Comm.sumReduce( wd.H );
|
||||
gwd.X = Dm->Comm.sumReduce( wd.X );
|
||||
gwd.Nc = wd.Nc;
|
||||
|
||||
/* Set up geometric analysis of interface region */
|
||||
@@ -526,20 +526,20 @@ void SubPhase::Full(){
|
||||
iwn.A = morph_i->A();
|
||||
iwn.H = morph_i->H();
|
||||
iwn.X = morph_i->X();
|
||||
giwn.V=sumReduce( Dm->Comm, iwn.V);
|
||||
giwn.A=sumReduce( Dm->Comm, iwn.A);
|
||||
giwn.H=sumReduce( Dm->Comm, iwn.H);
|
||||
giwn.X=sumReduce( Dm->Comm, iwn.X);
|
||||
giwn.V = Dm->Comm.sumReduce( iwn.V );
|
||||
giwn.A = Dm->Comm.sumReduce( iwn.A );
|
||||
giwn.H = Dm->Comm.sumReduce( iwn.H );
|
||||
giwn.X = Dm->Comm.sumReduce( iwn.X );
|
||||
// measure only the connected part
|
||||
iwnc.Nc = morph_i->MeasureConnectedPathway();
|
||||
iwnc.V = morph_i->V();
|
||||
iwnc.A = morph_i->A();
|
||||
iwnc.H = morph_i->H();
|
||||
iwnc.X = morph_i->X();
|
||||
giwnc.V=sumReduce( Dm->Comm, iwnc.V);
|
||||
giwnc.A=sumReduce( Dm->Comm, iwnc.A);
|
||||
giwnc.H=sumReduce( Dm->Comm, iwnc.H);
|
||||
giwnc.X=sumReduce( Dm->Comm, iwnc.X);
|
||||
giwnc.V = Dm->Comm.sumReduce( iwnc.V );
|
||||
giwnc.A = Dm->Comm.sumReduce( iwnc.A );
|
||||
giwnc.H = Dm->Comm.sumReduce( iwnc.H );
|
||||
giwnc.X = Dm->Comm.sumReduce( iwnc.X );
|
||||
giwnc.Nc = iwnc.Nc;
|
||||
|
||||
double vol_nc_bulk = 0.0;
|
||||
@@ -630,46 +630,46 @@ void SubPhase::Full(){
|
||||
}
|
||||
}
|
||||
|
||||
gnd.M=sumReduce( Dm->Comm, nd.M);
|
||||
gnd.Px=sumReduce( Dm->Comm, nd.Px);
|
||||
gnd.Py=sumReduce( Dm->Comm, nd.Py);
|
||||
gnd.Pz=sumReduce( Dm->Comm, nd.Pz);
|
||||
gnd.K=sumReduce( Dm->Comm, nd.K);
|
||||
gnd.M = Dm->Comm.sumReduce( nd.M );
|
||||
gnd.Px = Dm->Comm.sumReduce( nd.Px );
|
||||
gnd.Py = Dm->Comm.sumReduce( nd.Py );
|
||||
gnd.Pz = Dm->Comm.sumReduce( nd.Pz );
|
||||
gnd.K = Dm->Comm.sumReduce( nd.K );
|
||||
|
||||
gwd.M=sumReduce( Dm->Comm, wd.M);
|
||||
gwd.Px=sumReduce( Dm->Comm, wd.Px);
|
||||
gwd.Py=sumReduce( Dm->Comm, wd.Py);
|
||||
gwd.Pz=sumReduce( Dm->Comm, wd.Pz);
|
||||
gwd.K=sumReduce( Dm->Comm, wd.K);
|
||||
gwd.M = Dm->Comm.sumReduce( wd.M );
|
||||
gwd.Px = Dm->Comm.sumReduce( wd.Px );
|
||||
gwd.Py = Dm->Comm.sumReduce( wd.Py );
|
||||
gwd.Pz = Dm->Comm.sumReduce( wd.Pz );
|
||||
gwd.K = Dm->Comm.sumReduce( wd.K );
|
||||
|
||||
gnc.M=sumReduce( Dm->Comm, nc.M);
|
||||
gnc.Px=sumReduce( Dm->Comm, nc.Px);
|
||||
gnc.Py=sumReduce( Dm->Comm, nc.Py);
|
||||
gnc.Pz=sumReduce( Dm->Comm, nc.Pz);
|
||||
gnc.K=sumReduce( Dm->Comm, nc.K);
|
||||
gnc.M = Dm->Comm.sumReduce( nc.M );
|
||||
gnc.Px = Dm->Comm.sumReduce( nc.Px );
|
||||
gnc.Py = Dm->Comm.sumReduce( nc.Py );
|
||||
gnc.Pz = Dm->Comm.sumReduce( nc.Pz );
|
||||
gnc.K = Dm->Comm.sumReduce( nc.K );
|
||||
|
||||
gwc.M=sumReduce( Dm->Comm, wc.M);
|
||||
gwc.Px=sumReduce( Dm->Comm, wc.Px);
|
||||
gwc.Py=sumReduce( Dm->Comm, wc.Py);
|
||||
gwc.Pz=sumReduce( Dm->Comm, wc.Pz);
|
||||
gwc.K=sumReduce( Dm->Comm, wc.K);
|
||||
gwc.M = Dm->Comm.sumReduce( wc.M );
|
||||
gwc.Px = Dm->Comm.sumReduce( wc.Px );
|
||||
gwc.Py = Dm->Comm.sumReduce( wc.Py );
|
||||
gwc.Pz = Dm->Comm.sumReduce( wc.Pz );
|
||||
gwc.K = Dm->Comm.sumReduce( wc.K );
|
||||
|
||||
giwn.Mn=sumReduce( Dm->Comm, iwn.Mn);
|
||||
giwn.Pnx=sumReduce( Dm->Comm, iwn.Pnx);
|
||||
giwn.Pny=sumReduce( Dm->Comm, iwn.Pny);
|
||||
giwn.Pnz=sumReduce( Dm->Comm, iwn.Pnz);
|
||||
giwn.Kn=sumReduce( Dm->Comm, iwn.Kn);
|
||||
giwn.Mw=sumReduce( Dm->Comm, iwn.Mw);
|
||||
giwn.Pwx=sumReduce( Dm->Comm, iwn.Pwx);
|
||||
giwn.Pwy=sumReduce( Dm->Comm, iwn.Pwy);
|
||||
giwn.Pwz=sumReduce( Dm->Comm, iwn.Pwz);
|
||||
giwn.Kw=sumReduce( Dm->Comm, iwn.Kw);
|
||||
giwn.Mn = Dm->Comm.sumReduce( iwn.Mn );
|
||||
giwn.Pnx = Dm->Comm.sumReduce( iwn.Pnx );
|
||||
giwn.Pny = Dm->Comm.sumReduce( iwn.Pny );
|
||||
giwn.Pnz = Dm->Comm.sumReduce( iwn.Pnz );
|
||||
giwn.Kn = Dm->Comm.sumReduce( iwn.Kn );
|
||||
giwn.Mw = Dm->Comm.sumReduce( iwn.Mw );
|
||||
giwn.Pwx = Dm->Comm.sumReduce( iwn.Pwx );
|
||||
giwn.Pwy = Dm->Comm.sumReduce( iwn.Pwy );
|
||||
giwn.Pwz = Dm->Comm.sumReduce( iwn.Pwz );
|
||||
giwn.Kw = Dm->Comm.sumReduce( iwn.Kw );
|
||||
|
||||
// pressure averaging
|
||||
gnc.p=sumReduce( Dm->Comm, nc.p);
|
||||
gnd.p=sumReduce( Dm->Comm, nd.p);
|
||||
gwc.p=sumReduce( Dm->Comm, wc.p);
|
||||
gwd.p=sumReduce( Dm->Comm, wd.p);
|
||||
gnc.p = Dm->Comm.sumReduce( nc.p );
|
||||
gnd.p = Dm->Comm.sumReduce( nd.p );
|
||||
gwc.p = Dm->Comm.sumReduce( wc.p );
|
||||
gwd.p = Dm->Comm.sumReduce( wd.p );
|
||||
|
||||
if (vol_wc_bulk > 0.0)
|
||||
wc.p = wc.p /vol_wc_bulk;
|
||||
@@ -680,10 +680,10 @@ void SubPhase::Full(){
|
||||
if (vol_nd_bulk > 0.0)
|
||||
nd.p = nd.p /vol_nd_bulk;
|
||||
|
||||
vol_wc_bulk=sumReduce( Dm->Comm, vol_wc_bulk);
|
||||
vol_wd_bulk=sumReduce( Dm->Comm, vol_wd_bulk);
|
||||
vol_nc_bulk=sumReduce( Dm->Comm, vol_nc_bulk);
|
||||
vol_nd_bulk=sumReduce( Dm->Comm, vol_nd_bulk);
|
||||
vol_wc_bulk = Dm->Comm.sumReduce( vol_wc_bulk );
|
||||
vol_wd_bulk = Dm->Comm.sumReduce( vol_wd_bulk );
|
||||
vol_nc_bulk = Dm->Comm.sumReduce( vol_nc_bulk );
|
||||
vol_nd_bulk = Dm->Comm.sumReduce( vol_nd_bulk );
|
||||
|
||||
if (vol_wc_bulk > 0.0)
|
||||
gwc.p = gwc.p /vol_wc_bulk;
|
||||
@@ -719,7 +719,7 @@ void SubPhase::AggregateLabels( const std::string& filename )
|
||||
}
|
||||
}
|
||||
}
|
||||
MPI_Barrier(Dm->Comm);
|
||||
Dm->Comm.barrier();
|
||||
|
||||
Dm->AggregateLabels( filename );
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
#include "analysis/distance.h"
|
||||
#include "analysis/Minkowski.h"
|
||||
#include "common/Utilities.h"
|
||||
#include "common/MPI_Helpers.h"
|
||||
#include "common/MPI.h"
|
||||
#include "IO/MeshDatabase.h"
|
||||
#include "IO/Reader.h"
|
||||
#include "IO/Writer.h"
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#include "common/Domain.h"
|
||||
#include "common/Communication.h"
|
||||
#include "common/Utilities.h"
|
||||
#include "common/MPI_Helpers.h"
|
||||
#include "common/MPI.h"
|
||||
#include "IO/MeshDatabase.h"
|
||||
#include "IO/Reader.h"
|
||||
#include "IO/Writer.h"
|
||||
@@ -882,7 +882,7 @@ void TwoPhase::ComponentAverages()
|
||||
}
|
||||
}
|
||||
|
||||
MPI_Barrier(Dm->Comm);
|
||||
Dm->Comm.barrier();
|
||||
if (Dm->rank()==0){
|
||||
printf("Component averages computed locally -- reducing result... \n");
|
||||
}
|
||||
@@ -895,8 +895,8 @@ void TwoPhase::ComponentAverages()
|
||||
for (int idx=0; idx<BLOB_AVG_COUNT; idx++) ComponentAverages_NWP(idx,b)=RecvBuffer(idx);
|
||||
}
|
||||
*/
|
||||
MPI_Barrier(Dm->Comm);
|
||||
MPI_Allreduce(ComponentAverages_NWP.data(),RecvBuffer.data(),BLOB_AVG_COUNT*NumberComponents_NWP, MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
Dm->Comm.barrier();
|
||||
Dm->Comm.sumReduce(ComponentAverages_NWP.data(),RecvBuffer.data(),BLOB_AVG_COUNT*NumberComponents_NWP);
|
||||
// MPI_Reduce(ComponentAverages_NWP.data(),RecvBuffer.data(),BLOB_AVG_COUNT,MPI_DOUBLE,MPI_SUM,0,Dm->Comm);
|
||||
|
||||
if (Dm->rank()==0){
|
||||
@@ -993,9 +993,9 @@ void TwoPhase::ComponentAverages()
|
||||
|
||||
// reduce the wetting phase averages
|
||||
for (int b=0; b<NumberComponents_WP; b++){
|
||||
MPI_Barrier(Dm->Comm);
|
||||
Dm->Comm.barrier();
|
||||
// MPI_Allreduce(&ComponentAverages_WP(0,b),RecvBuffer.data(),BLOB_AVG_COUNT,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Reduce(&ComponentAverages_WP(0,b),RecvBuffer.data(),BLOB_AVG_COUNT,MPI_DOUBLE,MPI_SUM,0,Dm->Comm);
|
||||
Dm->Comm.sumReduce(&ComponentAverages_WP(0,b),RecvBuffer.data(),BLOB_AVG_COUNT);
|
||||
for (int idx=0; idx<BLOB_AVG_COUNT; idx++) ComponentAverages_WP(idx,b)=RecvBuffer(idx);
|
||||
}
|
||||
|
||||
@@ -1078,43 +1078,42 @@ void TwoPhase::Reduce()
|
||||
int i;
|
||||
double iVol_global=1.0/Volume;
|
||||
//...........................................................................
|
||||
MPI_Barrier(Dm->Comm);
|
||||
MPI_Allreduce(&nwp_volume,&nwp_volume_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&wp_volume,&wp_volume_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&awn,&awn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&ans,&ans_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&aws,&aws_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&lwns,&lwns_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&As,&As_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&Jwn,&Jwn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&Kwn,&Kwn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&KGwns,&KGwns_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&KNwns,&KNwns_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&efawns,&efawns_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&wwndnw,&wwndnw_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&wwnsdnwn,&wwnsdnwn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&Jwnwwndnw,&Jwnwwndnw_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
Dm->Comm.barrier();
|
||||
nwp_volume_global = Dm->Comm.sumReduce( nwp_volume );
|
||||
wp_volume_global = Dm->Comm.sumReduce( wp_volume );
|
||||
awn_global = Dm->Comm.sumReduce( awn );
|
||||
ans_global = Dm->Comm.sumReduce( ans );
|
||||
aws_global = Dm->Comm.sumReduce( aws );
|
||||
lwns_global = Dm->Comm.sumReduce( lwns );
|
||||
As_global = Dm->Comm.sumReduce( As );
|
||||
Jwn_global = Dm->Comm.sumReduce( Jwn );
|
||||
Kwn_global = Dm->Comm.sumReduce( Kwn );
|
||||
KGwns_global = Dm->Comm.sumReduce( KGwns );
|
||||
KNwns_global = Dm->Comm.sumReduce( KNwns );
|
||||
efawns_global = Dm->Comm.sumReduce( efawns );
|
||||
wwndnw_global = Dm->Comm.sumReduce( wwndnw );
|
||||
wwnsdnwn_global = Dm->Comm.sumReduce( wwnsdnwn );
|
||||
Jwnwwndnw_global = Dm->Comm.sumReduce( Jwnwwndnw );
|
||||
// Phase averages
|
||||
MPI_Allreduce(&vol_w,&vol_w_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&vol_n,&vol_n_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&paw,&paw_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&pan,&pan_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&vaw(0),&vaw_global(0),3,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&van(0),&van_global(0),3,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&vawn(0),&vawn_global(0),3,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&vawns(0),&vawns_global(0),3,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&Gwn(0),&Gwn_global(0),6,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&Gns(0),&Gns_global(0),6,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&Gws(0),&Gws_global(0),6,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&trawn,&trawn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&trJwn,&trJwn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&trRwn,&trRwn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&euler,&euler_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&An,&An_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&Jn,&Jn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&Kn,&Kn_global,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
|
||||
MPI_Barrier(Dm->Comm);
|
||||
vol_w_global = Dm->Comm.sumReduce( vol_w );
|
||||
vol_n_global = Dm->Comm.sumReduce( vol_n );
|
||||
paw_global = Dm->Comm.sumReduce( paw );
|
||||
pan_global = Dm->Comm.sumReduce( pan );
|
||||
vaw_global(0) = Dm->Comm.sumReduce( vaw(0) );
|
||||
van_global(0) = Dm->Comm.sumReduce( van(0) );
|
||||
vawn_global(0) = Dm->Comm.sumReduce( vawn(0) );
|
||||
vawns_global(0) = Dm->Comm.sumReduce( vawns(0) );
|
||||
Gwn_global(0) = Dm->Comm.sumReduce( Gwn(0) );
|
||||
Gns_global(0) = Dm->Comm.sumReduce( Gns(0) );
|
||||
Gws_global(0) = Dm->Comm.sumReduce( Gws(0) );
|
||||
trawn_global = Dm->Comm.sumReduce( trawn );
|
||||
trJwn_global = Dm->Comm.sumReduce( trJwn );
|
||||
trRwn_global = Dm->Comm.sumReduce( trRwn );
|
||||
euler_global = Dm->Comm.sumReduce( euler );
|
||||
An_global = Dm->Comm.sumReduce( An );
|
||||
Jn_global = Dm->Comm.sumReduce( Jn );
|
||||
Kn_global = Dm->Comm.sumReduce( Kn );
|
||||
Dm->Comm.barrier();
|
||||
|
||||
// Normalize the phase averages
|
||||
// (density of both components = 1.0)
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
#include "common/Domain.h"
|
||||
#include "common/Communication.h"
|
||||
#include "common/Utilities.h"
|
||||
#include "common/MPI_Helpers.h"
|
||||
#include "common/MPI.h"
|
||||
#include "IO/MeshDatabase.h"
|
||||
#include "IO/Reader.h"
|
||||
#include "IO/Writer.h"
|
||||
|
||||
@@ -188,7 +188,7 @@ int ComputeLocalPhaseComponent(const IntArray &PhaseID, int &VALUE, BlobIDArray
|
||||
/******************************************************************
|
||||
* Reorder the global blob ids *
|
||||
******************************************************************/
|
||||
static int ReorderBlobIDs2( BlobIDArray& ID, int N_blobs, int ngx, int ngy, int ngz, MPI_Comm comm )
|
||||
static int ReorderBlobIDs2( BlobIDArray& ID, int N_blobs, int ngx, int ngy, int ngz, const Utilities::MPI& comm )
|
||||
{
|
||||
if ( N_blobs==0 )
|
||||
return 0;
|
||||
@@ -212,7 +212,7 @@ static int ReorderBlobIDs2( BlobIDArray& ID, int N_blobs, int ngx, int ngy, int
|
||||
}
|
||||
}
|
||||
ASSERT(max_id<N_blobs);
|
||||
MPI_Allreduce(local_size,global_size,N_blobs,MPI_DOUBLE,MPI_SUM,comm);
|
||||
comm.sumReduce(local_size,global_size,N_blobs);
|
||||
std::vector<std::pair<double,int> > map1(N_blobs);
|
||||
int N_blobs2 = 0;
|
||||
for (int i=0; i<N_blobs; i++) {
|
||||
@@ -235,12 +235,12 @@ static int ReorderBlobIDs2( BlobIDArray& ID, int N_blobs, int ngx, int ngy, int
|
||||
PROFILE_STOP("ReorderBlobIDs2",1);
|
||||
return N_blobs2;
|
||||
}
|
||||
void ReorderBlobIDs( BlobIDArray& ID, MPI_Comm comm )
|
||||
void ReorderBlobIDs( BlobIDArray& ID, const Utilities::MPI& comm )
|
||||
{
|
||||
PROFILE_START("ReorderBlobIDs");
|
||||
int tmp = ID.max()+1;
|
||||
int N_blobs = 0;
|
||||
MPI_Allreduce(&tmp,&N_blobs,1,MPI_INT,MPI_MAX,comm);
|
||||
N_blobs = comm.maxReduce( tmp );
|
||||
ReorderBlobIDs2(ID,N_blobs,1,1,1,comm);
|
||||
PROFILE_STOP("ReorderBlobIDs");
|
||||
}
|
||||
@@ -260,30 +260,29 @@ static void updateRemoteIds(
|
||||
int N_send, const std::vector<int>& N_recv,
|
||||
int64_t *send_buf, std::vector<int64_t*>& recv_buf,
|
||||
std::map<int64_t,int64_t>& remote_map,
|
||||
MPI_Comm comm )
|
||||
const Utilities::MPI& comm )
|
||||
{
|
||||
std::vector<MPI_Request> send_req(neighbors.size());
|
||||
std::vector<MPI_Request> recv_req(neighbors.size());
|
||||
std::vector<MPI_Status> status(neighbors.size());
|
||||
std::map<int64_t,global_id_info_struct>::const_iterator it = map.begin();
|
||||
auto it = map.begin();
|
||||
ASSERT(N_send==(int)map.size());
|
||||
for (size_t i=0; i<map.size(); i++, ++it) {
|
||||
send_buf[2*i+0] = it->first;
|
||||
send_buf[2*i+1] = it->second.new_id;
|
||||
}
|
||||
for (size_t i=0; i<neighbors.size(); i++) {
|
||||
MPI_Isend( send_buf, 2*N_send, MPI_LONG_LONG, neighbors[i], 0, comm, &send_req[i] );
|
||||
MPI_Irecv( recv_buf[i], 2*N_recv[i], MPI_LONG_LONG, neighbors[i], 0, comm, &recv_req[i] );
|
||||
send_req[i] = comm.Isend( send_buf, 2*N_send, neighbors[i], 0 );
|
||||
recv_req[i] = comm.Irecv( recv_buf[i], 2*N_recv[i], neighbors[i], 0 );
|
||||
}
|
||||
for (it=map.begin(); it!=map.end(); ++it) {
|
||||
remote_map[it->first] = it->second.new_id;
|
||||
}
|
||||
for (size_t i=0; i<neighbors.size(); i++) {
|
||||
MPI_Wait(&recv_req[i],&status[i]);
|
||||
comm.wait( recv_req[i] );
|
||||
for (int j=0; j<N_recv[i]; j++)
|
||||
remote_map[recv_buf[i][2*j+0]] = recv_buf[i][2*j+1];
|
||||
}
|
||||
MPI_Waitall(neighbors.size(),getPtr(send_req),getPtr(status));
|
||||
comm.waitAll(neighbors.size(),getPtr(send_req));
|
||||
}
|
||||
// Compute a new local id for each local id
|
||||
static bool updateLocalIds( const std::map<int64_t,int64_t>& remote_map,
|
||||
@@ -304,18 +303,18 @@ static bool updateLocalIds( const std::map<int64_t,int64_t>& remote_map,
|
||||
return changed;
|
||||
}
|
||||
static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info,
|
||||
int nblobs, BlobIDArray& IDs, MPI_Comm comm )
|
||||
int nblobs, BlobIDArray& IDs, const Utilities::MPI& comm )
|
||||
{
|
||||
PROFILE_START("LocalToGlobalIDs",1);
|
||||
const int rank = rank_info.rank[1][1][1];
|
||||
int nprocs = comm_size(comm);
|
||||
int nprocs = comm.getSize();
|
||||
const int ngx = (IDs.size(0)-nx)/2;
|
||||
const int ngy = (IDs.size(1)-ny)/2;
|
||||
const int ngz = (IDs.size(2)-nz)/2;
|
||||
// Get the number of blobs for each rank
|
||||
std::vector<int> N_blobs(nprocs,0);
|
||||
PROFILE_START("LocalToGlobalIDs-Allgather",1);
|
||||
MPI_Allgather(&nblobs,1,MPI_INT,getPtr(N_blobs),1,MPI_INT,comm);
|
||||
comm.allGather(nblobs,getPtr(N_blobs));
|
||||
PROFILE_STOP("LocalToGlobalIDs-Allgather",1);
|
||||
int64_t N_blobs_tot = 0;
|
||||
int offset = 0;
|
||||
@@ -363,13 +362,12 @@ static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_
|
||||
std::vector<int> N_recv(neighbors.size(),0);
|
||||
std::vector<MPI_Request> send_req(neighbors.size());
|
||||
std::vector<MPI_Request> recv_req(neighbors.size());
|
||||
std::vector<MPI_Status> status(neighbors.size());
|
||||
for (size_t i=0; i<neighbors.size(); i++) {
|
||||
MPI_Isend( &N_send, 1, MPI_INT, neighbors[i], 0, comm, &send_req[i] );
|
||||
MPI_Irecv( &N_recv[i], 1, MPI_INT, neighbors[i], 0, comm, &recv_req[i] );
|
||||
send_req[i] = comm.Isend( &N_send, 1, neighbors[i], 0 );
|
||||
recv_req[i] = comm.Irecv( &N_recv[i], 1, neighbors[i], 0 );
|
||||
}
|
||||
MPI_Waitall(neighbors.size(),getPtr(send_req),getPtr(status));
|
||||
MPI_Waitall(neighbors.size(),getPtr(recv_req),getPtr(status));
|
||||
comm.waitAll(neighbors.size(),getPtr(send_req));
|
||||
comm.waitAll(neighbors.size(),getPtr(recv_req));
|
||||
// Allocate memory for communication
|
||||
int64_t *send_buf = new int64_t[2*N_send];
|
||||
std::vector<int64_t*> recv_buf(neighbors.size());
|
||||
@@ -398,8 +396,7 @@ static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_
|
||||
bool changed = updateLocalIds( remote_map, map );
|
||||
// Check if we are finished
|
||||
int test = changed ? 1:0;
|
||||
int result = 0;
|
||||
MPI_Allreduce(&test,&result,1,MPI_INT,MPI_SUM,comm);
|
||||
int result = comm.sumReduce( test );
|
||||
if ( result==0 )
|
||||
break;
|
||||
}
|
||||
@@ -435,7 +432,7 @@ static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_
|
||||
}
|
||||
int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info,
|
||||
const DoubleArray& Phase, const DoubleArray& SignDist, double vF, double vS,
|
||||
BlobIDArray& GlobalBlobID, MPI_Comm comm )
|
||||
BlobIDArray& GlobalBlobID, const Utilities::MPI& comm )
|
||||
{
|
||||
PROFILE_START("ComputeGlobalBlobIDs");
|
||||
// First compute the local ids
|
||||
@@ -446,7 +443,7 @@ int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_inf
|
||||
return nglobal;
|
||||
}
|
||||
int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& rank_info,
|
||||
const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, MPI_Comm comm )
|
||||
const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, const Utilities::MPI& comm )
|
||||
{
|
||||
PROFILE_START("ComputeGlobalPhaseComponent");
|
||||
// First compute the local ids
|
||||
@@ -462,37 +459,27 @@ int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& r
|
||||
* Compute the mapping of blob ids between timesteps *
|
||||
******************************************************************/
|
||||
typedef std::map<BlobIDType,std::map<BlobIDType,int64_t> > map_type;
|
||||
template<class TYPE> inline MPI_Datatype getMPIType();
|
||||
template<> inline MPI_Datatype getMPIType<int32_t>() { return MPI_INT; }
|
||||
template<> inline MPI_Datatype getMPIType<int64_t>() {
|
||||
if ( sizeof(int64_t)==sizeof(long int) )
|
||||
return MPI_LONG;
|
||||
else if ( sizeof(int64_t)==sizeof(double) )
|
||||
return MPI_DOUBLE;
|
||||
}
|
||||
template<class TYPE>
|
||||
void gatherSet( std::set<TYPE>& set, MPI_Comm comm )
|
||||
void gatherSet( std::set<TYPE>& set, const Utilities::MPI& comm )
|
||||
{
|
||||
int nprocs = comm_size(comm);
|
||||
MPI_Datatype type = getMPIType<TYPE>();
|
||||
int nprocs = comm.getSize();
|
||||
std::vector<TYPE> send_data(set.begin(),set.end());
|
||||
int send_count = send_data.size();
|
||||
std::vector<int> recv_count(nprocs,0), recv_disp(nprocs,0);
|
||||
MPI_Allgather(&send_count,1,MPI_INT,getPtr(recv_count),1,MPI_INT,comm);
|
||||
comm.allGather( send_count, getPtr(recv_count) );
|
||||
for (int i=1; i<nprocs; i++)
|
||||
recv_disp[i] = recv_disp[i-1] + recv_count[i-1];
|
||||
std::vector<TYPE> recv_data(recv_disp[nprocs-1]+recv_count[nprocs-1]);
|
||||
MPI_Allgatherv(getPtr(send_data),send_count,type,
|
||||
getPtr(recv_data),getPtr(recv_count),getPtr(recv_disp),type,comm);
|
||||
comm.allGather( getPtr(send_data), send_count, getPtr(recv_data),
|
||||
getPtr(recv_count), getPtr(recv_disp), true );
|
||||
for (size_t i=0; i<recv_data.size(); i++)
|
||||
set.insert(recv_data[i]);
|
||||
}
|
||||
void gatherSrcIDMap( map_type& src_map, MPI_Comm comm )
|
||||
void gatherSrcIDMap( map_type& src_map, const Utilities::MPI& comm )
|
||||
{
|
||||
int nprocs = comm_size(comm);
|
||||
MPI_Datatype type = getMPIType<int64_t>();
|
||||
int nprocs = comm.getSize();
|
||||
std::vector<int64_t> send_data;
|
||||
for (map_type::const_iterator it=src_map.begin(); it!=src_map.end(); ++it) {
|
||||
for (auto it=src_map.begin(); it!=src_map.end(); ++it) {
|
||||
int id = it->first;
|
||||
const std::map<BlobIDType,int64_t>& src_ids = it->second;
|
||||
send_data.push_back(id);
|
||||
@@ -505,21 +492,21 @@ void gatherSrcIDMap( map_type& src_map, MPI_Comm comm )
|
||||
}
|
||||
int send_count = send_data.size();
|
||||
std::vector<int> recv_count(nprocs,0), recv_disp(nprocs,0);
|
||||
MPI_Allgather(&send_count,1,MPI_INT,getPtr(recv_count),1,MPI_INT,comm);
|
||||
comm.allGather(send_count,getPtr(recv_count));
|
||||
for (int i=1; i<nprocs; i++)
|
||||
recv_disp[i] = recv_disp[i-1] + recv_count[i-1];
|
||||
std::vector<int64_t> recv_data(recv_disp[nprocs-1]+recv_count[nprocs-1]);
|
||||
MPI_Allgatherv(getPtr(send_data),send_count,type,
|
||||
getPtr(recv_data),getPtr(recv_count),getPtr(recv_disp),type,comm);
|
||||
comm.allGather(getPtr(send_data),send_count,
|
||||
getPtr(recv_data),getPtr(recv_count),getPtr(recv_disp),true);
|
||||
size_t i=0;
|
||||
src_map.clear();
|
||||
while ( i < recv_data.size() ) {
|
||||
BlobIDType id = recv_data[i];
|
||||
size_t count = recv_data[i+1];
|
||||
i += 2;
|
||||
std::map<BlobIDType,int64_t>& src_ids = src_map[id];
|
||||
auto& src_ids = src_map[id];
|
||||
for (size_t j=0; j<count; j++,i+=2) {
|
||||
std::map<BlobIDType,int64_t>::iterator it = src_ids.find(recv_data[i]);
|
||||
auto it = src_ids.find(recv_data[i]);
|
||||
if ( it == src_ids.end() )
|
||||
src_ids.insert(std::pair<BlobIDType,int64_t>(recv_data[i],recv_data[i+1]));
|
||||
else
|
||||
@@ -538,7 +525,7 @@ void addSrcDstIDs( BlobIDType src_id, map_type& src_map, map_type& dst_map,
|
||||
}
|
||||
}
|
||||
ID_map_struct computeIDMap( int nx, int ny, int nz,
|
||||
const BlobIDArray& ID1, const BlobIDArray& ID2, MPI_Comm comm )
|
||||
const BlobIDArray& ID1, const BlobIDArray& ID2, const Utilities::MPI& comm )
|
||||
{
|
||||
ASSERT(ID1.size()==ID2.size());
|
||||
PROFILE_START("computeIDMap");
|
||||
@@ -780,7 +767,7 @@ void renumberIDs( const std::vector<BlobIDType>& new_ids, BlobIDArray& IDs )
|
||||
******************************************************************/
|
||||
void writeIDMap( const ID_map_struct& map, long long int timestep, const std::string& filename )
|
||||
{
|
||||
int rank = MPI_WORLD_RANK();
|
||||
int rank = Utilities::MPI( MPI_COMM_WORLD ).getRank();
|
||||
if ( rank!=0 )
|
||||
return;
|
||||
bool empty = map.created.empty() && map.destroyed.empty() &&
|
||||
|
||||
@@ -58,7 +58,7 @@ int ComputeLocalPhaseComponent( const IntArray &PhaseID, int &VALUE, IntArray &C
|
||||
*/
|
||||
int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info,
|
||||
const DoubleArray& Phase, const DoubleArray& SignDist, double vF, double vS,
|
||||
BlobIDArray& GlobalBlobID, MPI_Comm comm );
|
||||
BlobIDArray& GlobalBlobID, const Utilities::MPI& comm );
|
||||
|
||||
|
||||
/*!
|
||||
@@ -75,7 +75,7 @@ int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_inf
|
||||
* @return Return the number of components in the specified phase
|
||||
*/
|
||||
int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& rank_info,
|
||||
const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, MPI_Comm comm );
|
||||
const IntArray &PhaseID, int &VALUE, BlobIDArray &GlobalBlobID, const Utilities::MPI& comm );
|
||||
|
||||
|
||||
/*!
|
||||
@@ -87,7 +87,7 @@ int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& r
|
||||
* @param[in] nz Number of elements in the z-direction
|
||||
* @param[in/out] ID The ids of the blobs
|
||||
*/
|
||||
void ReorderBlobIDs( BlobIDArray& ID, MPI_Comm comm );
|
||||
void ReorderBlobIDs( BlobIDArray& ID, const Utilities::MPI& comm );
|
||||
|
||||
|
||||
typedef std::pair<BlobIDType,std::vector<BlobIDType> > BlobIDSplitStruct;
|
||||
@@ -120,7 +120,7 @@ struct ID_map_struct {
|
||||
* @param[in] ID1 The blob ids at the first timestep
|
||||
* @param[in] ID2 The blob ids at the second timestep
|
||||
*/
|
||||
ID_map_struct computeIDMap( int nx, int ny, int nz, const BlobIDArray& ID1, const BlobIDArray& ID2, MPI_Comm comm );
|
||||
ID_map_struct computeIDMap( int nx, int ny, int nz, const BlobIDArray& ID1, const BlobIDArray& ID2, const Utilities::MPI& comm );
|
||||
|
||||
|
||||
/*!
|
||||
|
||||
@@ -176,7 +176,7 @@ void CalcVecDist( Array<Vec> &d, const Array<int> &ID0, const Domain &Dm,
|
||||
// Update distance
|
||||
double err = calcVecUpdateInterior( d, dx[0], dx[1], dx[2] );
|
||||
// Check if we are finished
|
||||
err = maxReduce( Dm.Comm, err );
|
||||
err = Dm.Comm.maxReduce( err );
|
||||
if ( err < tol )
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -58,11 +58,11 @@ double MorphOpen(DoubleArray &SignDist, signed char *id, std::shared_ptr<Domain>
|
||||
}
|
||||
}
|
||||
}
|
||||
MPI_Barrier(Dm->Comm);
|
||||
Dm->Comm.barrier();
|
||||
|
||||
// total Global is the number of nodes in the pore-space
|
||||
MPI_Allreduce(&count,&totalGlobal,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&maxdist,&maxdistGlobal,1,MPI_DOUBLE,MPI_MAX,Dm->Comm);
|
||||
totalGlobal = Dm->Comm.sumReduce( count );
|
||||
maxdistGlobal = Dm->Comm.sumReduce( maxdist );
|
||||
double volume=double(nprocx*nprocy*nprocz)*double(nx-2)*double(ny-2)*double(nz-2);
|
||||
double volume_fraction=totalGlobal/volume;
|
||||
if (rank==0) printf("Volume fraction for morphological opening: %f \n",volume_fraction);
|
||||
@@ -133,7 +133,6 @@ double MorphOpen(DoubleArray &SignDist, signed char *id, std::shared_ptr<Domain>
|
||||
double deltaR=0.05; // amount to change the radius in voxel units
|
||||
double Rcrit_old;
|
||||
|
||||
double GlobalNumber = 1.f;
|
||||
int imin,jmin,kmin,imax,jmax,kmax;
|
||||
|
||||
if (ErodeLabel == 1){
|
||||
@@ -203,41 +202,41 @@ double MorphOpen(DoubleArray &SignDist, signed char *id, std::shared_ptr<Domain>
|
||||
PackID(Dm->sendList_YZ, Dm->sendCount_YZ ,sendID_YZ, id);
|
||||
//......................................................................................
|
||||
MPI_Sendrecv(sendID_x,Dm->sendCount_x,MPI_CHAR,Dm->rank_x(),sendtag,
|
||||
recvID_X,Dm->recvCount_X,MPI_CHAR,Dm->rank_X(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_X,Dm->recvCount_X,MPI_CHAR,Dm->rank_X(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_X,Dm->sendCount_X,MPI_CHAR,Dm->rank_X(),sendtag,
|
||||
recvID_x,Dm->recvCount_x,MPI_CHAR,Dm->rank_x(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_x,Dm->recvCount_x,MPI_CHAR,Dm->rank_x(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_y,Dm->sendCount_y,MPI_CHAR,Dm->rank_y(),sendtag,
|
||||
recvID_Y,Dm->recvCount_Y,MPI_CHAR,Dm->rank_Y(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_Y,Dm->recvCount_Y,MPI_CHAR,Dm->rank_Y(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_Y,Dm->sendCount_Y,MPI_CHAR,Dm->rank_Y(),sendtag,
|
||||
recvID_y,Dm->recvCount_y,MPI_CHAR,Dm->rank_y(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_y,Dm->recvCount_y,MPI_CHAR,Dm->rank_y(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_z,Dm->sendCount_z,MPI_CHAR,Dm->rank_z(),sendtag,
|
||||
recvID_Z,Dm->recvCount_Z,MPI_CHAR,Dm->rank_Z(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_Z,Dm->recvCount_Z,MPI_CHAR,Dm->rank_Z(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_Z,Dm->sendCount_Z,MPI_CHAR,Dm->rank_Z(),sendtag,
|
||||
recvID_z,Dm->recvCount_z,MPI_CHAR,Dm->rank_z(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_z,Dm->recvCount_z,MPI_CHAR,Dm->rank_z(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_xy,Dm->sendCount_xy,MPI_CHAR,Dm->rank_xy(),sendtag,
|
||||
recvID_XY,Dm->recvCount_XY,MPI_CHAR,Dm->rank_XY(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_XY,Dm->recvCount_XY,MPI_CHAR,Dm->rank_XY(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_XY,Dm->sendCount_XY,MPI_CHAR,Dm->rank_XY(),sendtag,
|
||||
recvID_xy,Dm->recvCount_xy,MPI_CHAR,Dm->rank_xy(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_xy,Dm->recvCount_xy,MPI_CHAR,Dm->rank_xy(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_Xy,Dm->sendCount_Xy,MPI_CHAR,Dm->rank_Xy(),sendtag,
|
||||
recvID_xY,Dm->recvCount_xY,MPI_CHAR,Dm->rank_xY(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_xY,Dm->recvCount_xY,MPI_CHAR,Dm->rank_xY(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_xY,Dm->sendCount_xY,MPI_CHAR,Dm->rank_xY(),sendtag,
|
||||
recvID_Xy,Dm->recvCount_Xy,MPI_CHAR,Dm->rank_Xy(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_Xy,Dm->recvCount_Xy,MPI_CHAR,Dm->rank_Xy(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_xz,Dm->sendCount_xz,MPI_CHAR,Dm->rank_xz(),sendtag,
|
||||
recvID_XZ,Dm->recvCount_XZ,MPI_CHAR,Dm->rank_XZ(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_XZ,Dm->recvCount_XZ,MPI_CHAR,Dm->rank_XZ(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_XZ,Dm->sendCount_XZ,MPI_CHAR,Dm->rank_XZ(),sendtag,
|
||||
recvID_xz,Dm->recvCount_xz,MPI_CHAR,Dm->rank_xz(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_xz,Dm->recvCount_xz,MPI_CHAR,Dm->rank_xz(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_Xz,Dm->sendCount_Xz,MPI_CHAR,Dm->rank_Xz(),sendtag,
|
||||
recvID_xZ,Dm->recvCount_xZ,MPI_CHAR,Dm->rank_xZ(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_xZ,Dm->recvCount_xZ,MPI_CHAR,Dm->rank_xZ(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_xZ,Dm->sendCount_xZ,MPI_CHAR,Dm->rank_xZ(),sendtag,
|
||||
recvID_Xz,Dm->recvCount_Xz,MPI_CHAR,Dm->rank_Xz(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_Xz,Dm->recvCount_Xz,MPI_CHAR,Dm->rank_Xz(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_yz,Dm->sendCount_yz,MPI_CHAR,Dm->rank_yz(),sendtag,
|
||||
recvID_YZ,Dm->recvCount_YZ,MPI_CHAR,Dm->rank_YZ(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_YZ,Dm->recvCount_YZ,MPI_CHAR,Dm->rank_YZ(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_YZ,Dm->sendCount_YZ,MPI_CHAR,Dm->rank_YZ(),sendtag,
|
||||
recvID_yz,Dm->recvCount_yz,MPI_CHAR,Dm->rank_yz(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_yz,Dm->recvCount_yz,MPI_CHAR,Dm->rank_yz(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_Yz,Dm->sendCount_Yz,MPI_CHAR,Dm->rank_Yz(),sendtag,
|
||||
recvID_yZ,Dm->recvCount_yZ,MPI_CHAR,Dm->rank_yZ(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_yZ,Dm->recvCount_yZ,MPI_CHAR,Dm->rank_yZ(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_yZ,Dm->sendCount_yZ,MPI_CHAR,Dm->rank_yZ(),sendtag,
|
||||
recvID_Yz,Dm->recvCount_Yz,MPI_CHAR,Dm->rank_Yz(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_Yz,Dm->recvCount_Yz,MPI_CHAR,Dm->rank_Yz(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
//......................................................................................
|
||||
UnpackID(Dm->recvList_x, Dm->recvCount_x ,recvID_x, id);
|
||||
UnpackID(Dm->recvList_X, Dm->recvCount_X ,recvID_X, id);
|
||||
@@ -259,7 +258,7 @@ double MorphOpen(DoubleArray &SignDist, signed char *id, std::shared_ptr<Domain>
|
||||
UnpackID(Dm->recvList_YZ, Dm->recvCount_YZ ,recvID_YZ, id);
|
||||
//......................................................................................
|
||||
|
||||
MPI_Allreduce(&LocalNumber,&GlobalNumber,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
//double GlobalNumber = Dm->Comm.sumReduce( LocalNumber );
|
||||
|
||||
count = 0.f;
|
||||
for (int k=1; k<Nz-1; k++){
|
||||
@@ -272,7 +271,7 @@ double MorphOpen(DoubleArray &SignDist, signed char *id, std::shared_ptr<Domain>
|
||||
}
|
||||
}
|
||||
}
|
||||
MPI_Allreduce(&count,&countGlobal,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
countGlobal = Dm->Comm.sumReduce( count );
|
||||
void_fraction_new = countGlobal/totalGlobal;
|
||||
void_fraction_diff_new = abs(void_fraction_new-VoidFraction);
|
||||
/* if (rank==0){
|
||||
@@ -360,11 +359,11 @@ double MorphDrain(DoubleArray &SignDist, signed char *id, std::shared_ptr<Domain
|
||||
}
|
||||
}
|
||||
|
||||
MPI_Barrier(Dm->Comm);
|
||||
Dm->Comm.barrier();
|
||||
|
||||
// total Global is the number of nodes in the pore-space
|
||||
MPI_Allreduce(&count,&totalGlobal,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
MPI_Allreduce(&maxdist,&maxdistGlobal,1,MPI_DOUBLE,MPI_MAX,Dm->Comm);
|
||||
totalGlobal = Dm->Comm.sumReduce( count );
|
||||
maxdistGlobal = Dm->Comm.sumReduce( maxdist );
|
||||
double volume=double(nprocx*nprocy*nprocz)*double(nx-2)*double(ny-2)*double(nz-2);
|
||||
double volume_fraction=totalGlobal/volume;
|
||||
if (rank==0) printf("Volume fraction for morphological opening: %f \n",volume_fraction);
|
||||
@@ -434,7 +433,6 @@ double MorphDrain(DoubleArray &SignDist, signed char *id, std::shared_ptr<Domain
|
||||
double deltaR=0.05; // amount to change the radius in voxel units
|
||||
double Rcrit_old;
|
||||
|
||||
double GlobalNumber = 1.f;
|
||||
int imin,jmin,kmin,imax,jmax,kmax;
|
||||
|
||||
double Rcrit_new = maxdistGlobal;
|
||||
@@ -442,7 +440,7 @@ double MorphDrain(DoubleArray &SignDist, signed char *id, std::shared_ptr<Domain
|
||||
// Rcrit_new = strtod(argv[2],NULL);
|
||||
// if (rank==0) printf("Max. distance =%f, Initial critical radius = %f \n",maxdistGlobal,Rcrit_new);
|
||||
//}
|
||||
MPI_Barrier(Dm->Comm);
|
||||
Dm->Comm.barrier();
|
||||
|
||||
|
||||
FILE *DRAIN = fopen("morphdrain.csv","w");
|
||||
@@ -509,41 +507,41 @@ double MorphDrain(DoubleArray &SignDist, signed char *id, std::shared_ptr<Domain
|
||||
PackID(Dm->sendList_YZ, Dm->sendCount_YZ ,sendID_YZ, id);
|
||||
//......................................................................................
|
||||
MPI_Sendrecv(sendID_x,Dm->sendCount_x,MPI_CHAR,Dm->rank_x(),sendtag,
|
||||
recvID_X,Dm->recvCount_X,MPI_CHAR,Dm->rank_X(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_X,Dm->recvCount_X,MPI_CHAR,Dm->rank_X(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_X,Dm->sendCount_X,MPI_CHAR,Dm->rank_X(),sendtag,
|
||||
recvID_x,Dm->recvCount_x,MPI_CHAR,Dm->rank_x(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_x,Dm->recvCount_x,MPI_CHAR,Dm->rank_x(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_y,Dm->sendCount_y,MPI_CHAR,Dm->rank_y(),sendtag,
|
||||
recvID_Y,Dm->recvCount_Y,MPI_CHAR,Dm->rank_Y(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_Y,Dm->recvCount_Y,MPI_CHAR,Dm->rank_Y(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_Y,Dm->sendCount_Y,MPI_CHAR,Dm->rank_Y(),sendtag,
|
||||
recvID_y,Dm->recvCount_y,MPI_CHAR,Dm->rank_y(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_y,Dm->recvCount_y,MPI_CHAR,Dm->rank_y(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_z,Dm->sendCount_z,MPI_CHAR,Dm->rank_z(),sendtag,
|
||||
recvID_Z,Dm->recvCount_Z,MPI_CHAR,Dm->rank_Z(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_Z,Dm->recvCount_Z,MPI_CHAR,Dm->rank_Z(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_Z,Dm->sendCount_Z,MPI_CHAR,Dm->rank_Z(),sendtag,
|
||||
recvID_z,Dm->recvCount_z,MPI_CHAR,Dm->rank_z(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_z,Dm->recvCount_z,MPI_CHAR,Dm->rank_z(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_xy,Dm->sendCount_xy,MPI_CHAR,Dm->rank_xy(),sendtag,
|
||||
recvID_XY,Dm->recvCount_XY,MPI_CHAR,Dm->rank_XY(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_XY,Dm->recvCount_XY,MPI_CHAR,Dm->rank_XY(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_XY,Dm->sendCount_XY,MPI_CHAR,Dm->rank_XY(),sendtag,
|
||||
recvID_xy,Dm->recvCount_xy,MPI_CHAR,Dm->rank_xy(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_xy,Dm->recvCount_xy,MPI_CHAR,Dm->rank_xy(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_Xy,Dm->sendCount_Xy,MPI_CHAR,Dm->rank_Xy(),sendtag,
|
||||
recvID_xY,Dm->recvCount_xY,MPI_CHAR,Dm->rank_xY(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_xY,Dm->recvCount_xY,MPI_CHAR,Dm->rank_xY(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_xY,Dm->sendCount_xY,MPI_CHAR,Dm->rank_xY(),sendtag,
|
||||
recvID_Xy,Dm->recvCount_Xy,MPI_CHAR,Dm->rank_Xy(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_Xy,Dm->recvCount_Xy,MPI_CHAR,Dm->rank_Xy(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_xz,Dm->sendCount_xz,MPI_CHAR,Dm->rank_xz(),sendtag,
|
||||
recvID_XZ,Dm->recvCount_XZ,MPI_CHAR,Dm->rank_XZ(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_XZ,Dm->recvCount_XZ,MPI_CHAR,Dm->rank_XZ(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_XZ,Dm->sendCount_XZ,MPI_CHAR,Dm->rank_XZ(),sendtag,
|
||||
recvID_xz,Dm->recvCount_xz,MPI_CHAR,Dm->rank_xz(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_xz,Dm->recvCount_xz,MPI_CHAR,Dm->rank_xz(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_Xz,Dm->sendCount_Xz,MPI_CHAR,Dm->rank_Xz(),sendtag,
|
||||
recvID_xZ,Dm->recvCount_xZ,MPI_CHAR,Dm->rank_xZ(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_xZ,Dm->recvCount_xZ,MPI_CHAR,Dm->rank_xZ(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_xZ,Dm->sendCount_xZ,MPI_CHAR,Dm->rank_xZ(),sendtag,
|
||||
recvID_Xz,Dm->recvCount_Xz,MPI_CHAR,Dm->rank_Xz(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_Xz,Dm->recvCount_Xz,MPI_CHAR,Dm->rank_Xz(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_yz,Dm->sendCount_yz,MPI_CHAR,Dm->rank_yz(),sendtag,
|
||||
recvID_YZ,Dm->recvCount_YZ,MPI_CHAR,Dm->rank_YZ(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_YZ,Dm->recvCount_YZ,MPI_CHAR,Dm->rank_YZ(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_YZ,Dm->sendCount_YZ,MPI_CHAR,Dm->rank_YZ(),sendtag,
|
||||
recvID_yz,Dm->recvCount_yz,MPI_CHAR,Dm->rank_yz(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_yz,Dm->recvCount_yz,MPI_CHAR,Dm->rank_yz(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_Yz,Dm->sendCount_Yz,MPI_CHAR,Dm->rank_Yz(),sendtag,
|
||||
recvID_yZ,Dm->recvCount_yZ,MPI_CHAR,Dm->rank_yZ(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_yZ,Dm->recvCount_yZ,MPI_CHAR,Dm->rank_yZ(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
MPI_Sendrecv(sendID_yZ,Dm->sendCount_yZ,MPI_CHAR,Dm->rank_yZ(),sendtag,
|
||||
recvID_Yz,Dm->recvCount_Yz,MPI_CHAR,Dm->rank_Yz(),recvtag,Dm->Comm,MPI_STATUS_IGNORE);
|
||||
recvID_Yz,Dm->recvCount_Yz,MPI_CHAR,Dm->rank_Yz(),recvtag,Dm->Comm.getCommunicator(),MPI_STATUS_IGNORE);
|
||||
//......................................................................................
|
||||
UnpackID(Dm->recvList_x, Dm->recvCount_x ,recvID_x, id);
|
||||
UnpackID(Dm->recvList_X, Dm->recvCount_X ,recvID_X, id);
|
||||
@@ -564,7 +562,7 @@ double MorphDrain(DoubleArray &SignDist, signed char *id, std::shared_ptr<Domain
|
||||
UnpackID(Dm->recvList_yZ, Dm->recvCount_yZ ,recvID_yZ, id);
|
||||
UnpackID(Dm->recvList_YZ, Dm->recvCount_YZ ,recvID_YZ, id);
|
||||
//......................................................................................
|
||||
MPI_Allreduce(&LocalNumber,&GlobalNumber,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
// double GlobalNumber = Dm->Comm.sumReduce( LocalNumber );
|
||||
|
||||
for (int k=0; k<nz; k++){
|
||||
for (int j=0; j<ny; j++){
|
||||
@@ -583,7 +581,7 @@ double MorphDrain(DoubleArray &SignDist, signed char *id, std::shared_ptr<Domain
|
||||
BlobIDstruct new_index;
|
||||
double vF=0.0; double vS=0.0;
|
||||
ComputeGlobalBlobIDs(nx-2,ny-2,nz-2,Dm->rank_info,phase,SignDist,vF,vS,phase_label,Dm->Comm);
|
||||
MPI_Barrier(Dm->Comm);
|
||||
Dm->Comm.barrier();
|
||||
|
||||
for (int k=0; k<nz; k++){
|
||||
for (int j=0; j<ny; j++){
|
||||
@@ -645,7 +643,7 @@ double MorphDrain(DoubleArray &SignDist, signed char *id, std::shared_ptr<Domain
|
||||
}
|
||||
}
|
||||
}
|
||||
MPI_Allreduce(&count,&countGlobal,1,MPI_DOUBLE,MPI_SUM,Dm->Comm);
|
||||
countGlobal = Dm->Comm.sumReduce( count );
|
||||
void_fraction_new = countGlobal/totalGlobal;
|
||||
void_fraction_diff_new = abs(void_fraction_new-VoidFraction);
|
||||
if (rank==0){
|
||||
@@ -702,7 +700,7 @@ double MorphGrow(DoubleArray &BoundaryDist, DoubleArray &Dist, Array<char> &id,
|
||||
}
|
||||
}
|
||||
}
|
||||
double count_original=sumReduce( Dm->Comm, count);
|
||||
double count_original = Dm->Comm.sumReduce( count);
|
||||
|
||||
// Estimate morph_delta
|
||||
double morph_delta = 0.0;
|
||||
@@ -732,8 +730,8 @@ double MorphGrow(DoubleArray &BoundaryDist, DoubleArray &Dist, Array<char> &id,
|
||||
}
|
||||
}
|
||||
}
|
||||
count=sumReduce( Dm->Comm, count);
|
||||
MAX_DISPLACEMENT = maxReduce( Dm->Comm, MAX_DISPLACEMENT);
|
||||
count = Dm->Comm.sumReduce( count );
|
||||
MAX_DISPLACEMENT = Dm->Comm.maxReduce( MAX_DISPLACEMENT );
|
||||
GrowthEstimate = count - count_original;
|
||||
ERROR = fabs((GrowthEstimate-TargetGrowth) /TargetGrowth);
|
||||
|
||||
@@ -776,7 +774,7 @@ double MorphGrow(DoubleArray &BoundaryDist, DoubleArray &Dist, Array<char> &id,
|
||||
}
|
||||
}
|
||||
}
|
||||
count=sumReduce( Dm->Comm, count);
|
||||
count = Dm->Comm.sumReduce( count );
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
#include "analysis/analysis.h"
|
||||
#include "common/Array.h"
|
||||
#include "common/Communication.h"
|
||||
#include "common/MPI_Helpers.h"
|
||||
#include "common/MPI.h"
|
||||
#include "common/ScaLBL.h"
|
||||
#include "models/ColorModel.h"
|
||||
|
||||
@@ -462,7 +462,7 @@ private:
|
||||
/******************************************************************
|
||||
* MPI comm wrapper for use with analysis *
|
||||
******************************************************************/
|
||||
runAnalysis::commWrapper::commWrapper( int tag_, MPI_Comm comm_, runAnalysis* analysis_ ):
|
||||
runAnalysis::commWrapper::commWrapper( int tag_, const Utilities::MPI& comm_, runAnalysis* analysis_ ):
|
||||
comm(comm_),
|
||||
tag(tag_),
|
||||
analysis(analysis_)
|
||||
@@ -479,7 +479,7 @@ runAnalysis::commWrapper::~commWrapper()
|
||||
{
|
||||
if ( tag == -1 )
|
||||
return;
|
||||
MPI_Barrier( comm );
|
||||
comm.barrier();
|
||||
analysis->d_comm_used[tag] = false;
|
||||
}
|
||||
runAnalysis::commWrapper runAnalysis::getComm( )
|
||||
@@ -496,10 +496,10 @@ runAnalysis::commWrapper runAnalysis::getComm( )
|
||||
if ( tag == -1 )
|
||||
ERROR("Unable to get comm");
|
||||
}
|
||||
MPI_Bcast( &tag, 1, MPI_INT, 0, d_comm );
|
||||
tag = d_comm.bcast( tag, 0 );
|
||||
d_comm_used[tag] = true;
|
||||
if ( d_comms[tag] == MPI_COMM_NULL )
|
||||
MPI_Comm_dup( MPI_COMM_WORLD, &d_comms[tag] );
|
||||
if ( d_comms[tag].isNull() )
|
||||
d_comms[tag] = d_comm.dup();
|
||||
return commWrapper(tag,d_comms[tag],this);
|
||||
}
|
||||
|
||||
@@ -507,14 +507,20 @@ runAnalysis::commWrapper runAnalysis::getComm( )
|
||||
/******************************************************************
|
||||
* Constructor/Destructors *
|
||||
******************************************************************/
|
||||
runAnalysis::runAnalysis(std::shared_ptr<Database> input_db, const RankInfoStruct& rank_info, std::shared_ptr<ScaLBL_Communicator> ScaLBL_Comm, std::shared_ptr <Domain> Dm,
|
||||
int Np, bool Regular, IntArray Map ):
|
||||
d_Np( Np ),
|
||||
d_regular ( Regular),
|
||||
d_rank_info( rank_info ),
|
||||
d_Map( Map ),
|
||||
d_fillData(Dm->Comm,Dm->rank_info,{Dm->Nx-2,Dm->Ny-2,Dm->Nz-2},{1,1,1},0,1),
|
||||
d_ScaLBL_Comm( ScaLBL_Comm)
|
||||
runAnalysis::runAnalysis( std::shared_ptr<Database> input_db,
|
||||
const RankInfoStruct& rank_info,
|
||||
std::shared_ptr<ScaLBL_Communicator> ScaLBL_Comm,
|
||||
std::shared_ptr <Domain> Dm,
|
||||
int Np,
|
||||
bool Regular,
|
||||
IntArray Map ):
|
||||
d_Np( Np ),
|
||||
d_regular ( Regular),
|
||||
d_rank_info( rank_info ),
|
||||
d_Map( Map ),
|
||||
d_fillData(Dm->Comm,Dm->rank_info,{Dm->Nx-2,Dm->Ny-2,Dm->Nz-2},{1,1,1},0,1),
|
||||
d_comm( Utilities::MPI( MPI_COMM_WORLD ).dup() ),
|
||||
d_ScaLBL_Comm( ScaLBL_Comm)
|
||||
{
|
||||
|
||||
auto db = input_db->getDatabase( "Analysis" );
|
||||
@@ -552,7 +558,7 @@ runAnalysis::runAnalysis(std::shared_ptr<Database> input_db, const RankInfoStruc
|
||||
d_restartFile = restart_file + "." + rankString;
|
||||
|
||||
|
||||
d_rank = MPI_WORLD_RANK();
|
||||
d_rank = d_comm.getRank();
|
||||
writeIDMap(ID_map_struct(),0,id_map_filename);
|
||||
// Initialize IO for silo
|
||||
IO::initialize("","silo","false");
|
||||
@@ -621,11 +627,8 @@ runAnalysis::runAnalysis(std::shared_ptr<Database> input_db, const RankInfoStruc
|
||||
|
||||
|
||||
// Initialize the comms
|
||||
MPI_Comm_dup(MPI_COMM_WORLD,&d_comm);
|
||||
for (int i=0; i<1024; i++) {
|
||||
d_comms[i] = MPI_COMM_NULL;
|
||||
for (int i=0; i<1024; i++)
|
||||
d_comm_used[i] = false;
|
||||
}
|
||||
// Initialize the threads
|
||||
int N_threads = db->getWithDefault<int>( "N_threads", 4 );
|
||||
auto method = db->getWithDefault<std::string>( "load_balance", "default" );
|
||||
@@ -635,12 +638,6 @@ runAnalysis::~runAnalysis( )
|
||||
{
|
||||
// Finish processing analysis
|
||||
finish();
|
||||
// Clear internal data
|
||||
MPI_Comm_free( &d_comm );
|
||||
for (int i=0; i<1024; i++) {
|
||||
if ( d_comms[i] != MPI_COMM_NULL )
|
||||
MPI_Comm_free(&d_comms[i]);
|
||||
}
|
||||
}
|
||||
void runAnalysis::finish( )
|
||||
{
|
||||
@@ -654,7 +651,7 @@ void runAnalysis::finish( )
|
||||
d_wait_subphase.reset();
|
||||
d_wait_restart.reset();
|
||||
// Syncronize
|
||||
MPI_Barrier( d_comm );
|
||||
d_comm.barrier();
|
||||
PROFILE_STOP("finish");
|
||||
}
|
||||
|
||||
|
||||
@@ -68,10 +68,10 @@ public:
|
||||
class commWrapper
|
||||
{
|
||||
public:
|
||||
MPI_Comm comm;
|
||||
Utilities::MPI comm;
|
||||
int tag;
|
||||
runAnalysis *analysis;
|
||||
commWrapper( int tag, MPI_Comm comm, runAnalysis *analysis );
|
||||
commWrapper( int tag, const Utilities::MPI& comm, runAnalysis *analysis );
|
||||
commWrapper( ) = delete;
|
||||
commWrapper( const commWrapper &rhs ) = delete;
|
||||
commWrapper& operator=( const commWrapper &rhs ) = delete;
|
||||
@@ -100,8 +100,8 @@ private:
|
||||
std::vector<IO::MeshDataStruct> d_meshData;
|
||||
fillHalo<double> d_fillData;
|
||||
std::string d_restartFile;
|
||||
MPI_Comm d_comm;
|
||||
MPI_Comm d_comms[1024];
|
||||
Utilities::MPI d_comm;
|
||||
Utilities::MPI d_comms[1024];
|
||||
volatile bool d_comm_used[1024];
|
||||
std::shared_ptr<ScaLBL_Communicator> d_ScaLBL_Comm;
|
||||
|
||||
|
||||
@@ -228,8 +228,7 @@ void filter_final( Array<char>& ID, Array<float>& Dist,
|
||||
Array<float>& Mean, Array<float>& Dist1, Array<float>& Dist2 )
|
||||
{
|
||||
PROFILE_SCOPED(timer,"filter_final");
|
||||
int rank;
|
||||
MPI_Comm_rank(Dm.Comm,&rank);
|
||||
int rank = Dm.Comm.getRank();
|
||||
int Nx = Dm.Nx-2;
|
||||
int Ny = Dm.Ny-2;
|
||||
int Nz = Dm.Nz-2;
|
||||
@@ -242,7 +241,7 @@ void filter_final( Array<char>& ID, Array<float>& Dist,
|
||||
float tmp = 0;
|
||||
for (size_t i=0; i<Dist0.length(); i++)
|
||||
tmp += Dist0(i)*Dist0(i);
|
||||
tmp = sqrt( sumReduce(Dm.Comm,tmp) / sumReduce(Dm.Comm,(float)Dist0.length()) );
|
||||
tmp = sqrt( Dm.Comm.sumReduce(tmp) / Dm.Comm.sumReduce<float>(Dist0.length()) );
|
||||
const float dx1 = 0.3*tmp;
|
||||
const float dx2 = 1.05*dx1;
|
||||
if (rank==0)
|
||||
@@ -285,7 +284,7 @@ void filter_final( Array<char>& ID, Array<float>& Dist,
|
||||
Phase.fill(1);
|
||||
ComputeGlobalBlobIDs( Nx, Ny, Nz, Dm.rank_info, Phase, SignDist, 0, 0, GlobalBlobID, Dm.Comm );
|
||||
fillInt.fill(GlobalBlobID);
|
||||
int N_blobs = maxReduce(Dm.Comm,GlobalBlobID.max()+1);
|
||||
int N_blobs = Dm.Comm.maxReduce(GlobalBlobID.max()+1);
|
||||
std::vector<float> mean(N_blobs,0);
|
||||
std::vector<int> count(N_blobs,0);
|
||||
for (int k=1; k<=Nz; k++) {
|
||||
@@ -321,8 +320,8 @@ void filter_final( Array<char>& ID, Array<float>& Dist,
|
||||
}
|
||||
}
|
||||
}
|
||||
mean = sumReduce(Dm.Comm,mean);
|
||||
count = sumReduce(Dm.Comm,count);
|
||||
mean = Dm.Comm.sumReduce(mean);
|
||||
count = Dm.Comm.sumReduce(count);
|
||||
for (size_t i=0; i<mean.size(); i++)
|
||||
mean[i] /= count[i];
|
||||
/*if (rank==0) {
|
||||
|
||||
Reference in New Issue
Block a user