Cleaning up some code (merging duplicate functionallity into subfuction

This commit is contained in:
Mark Berrill 2015-07-17 11:08:33 -04:00
parent f4a2f22f57
commit a0a8b578e2
6 changed files with 174 additions and 275 deletions

View File

@ -158,6 +158,7 @@ static std::vector<IO::MeshDatabase> writeMeshesNewFormat(
// Write the mesh data // Write the mesh data
void IO::writeData( int timestep, const std::vector<IO::MeshDataStruct>& meshData, int format ) void IO::writeData( int timestep, const std::vector<IO::MeshDataStruct>& meshData, int format )
{ {
PROFILE_START("writeData");
int rank = MPI_WORLD_RANK(); int rank = MPI_WORLD_RANK();
int size = MPI_WORLD_SIZE(); int size = MPI_WORLD_SIZE();
// Create the output directory // Create the output directory
@ -196,6 +197,7 @@ void IO::writeData( int timestep, const std::vector<IO::MeshDataStruct>& meshDat
fprintf(fid,"%s\n",path); fprintf(fid,"%s\n",path);
fclose(fid); fclose(fid);
} }
PROFILE_STOP("writeData");
} }

View File

@ -12,8 +12,8 @@ inline const TYPE* getPtr( const std::vector<TYPE>& x ) { return x.empty() ? NUL
/****************************************************************** /******************************************************************
* Compute the blobs * * Compute the blobs *
******************************************************************/ ******************************************************************/
int ComputePhaseComponent(IntArray &ComponentLabel, static int ComputePhaseComponent( IntArray &ComponentLabel,
IntArray &PhaseID, int VALUE, int &ncomponents, const IntArray &PhaseID, int VALUE, int &ncomponents,
int startx, int starty, int startz, IntArray &temp, bool periodic) int startx, int starty, int startz, IntArray &temp, bool periodic)
{ {
@ -309,7 +309,7 @@ int ComputeLocalBlobIDs( const DoubleArray& Phase, const DoubleArray& SignDist,
return nblobs; return nblobs;
} }
int ComputeLocalPhaseComponent(IntArray &PhaseID, int VALUE, IntArray &ComponentLabel, bool periodic ) int ComputeLocalPhaseComponent(const IntArray &PhaseID, int VALUE, IntArray &ComponentLabel, bool periodic )
{ {
PROFILE_START("ComputeLocalPhaseComponent"); PROFILE_START("ComputeLocalPhaseComponent");
size_t Nx = PhaseID.size(0); size_t Nx = PhaseID.size(0);
@ -352,6 +352,7 @@ int ComputeLocalPhaseComponent(IntArray &PhaseID, int VALUE, IntArray &Component
return ncomponents; return ncomponents;
} }
/****************************************************************** /******************************************************************
* Reorder the global blob ids * * Reorder the global blob ids *
******************************************************************/ ******************************************************************/
@ -412,6 +413,7 @@ void ReorderBlobIDs( IntArray& ID )
PROFILE_STOP("ReorderBlobIDs"); PROFILE_STOP("ReorderBlobIDs");
} }
/****************************************************************** /******************************************************************
* Compute the global blob ids * * Compute the global blob ids *
******************************************************************/ ******************************************************************/
@ -468,7 +470,137 @@ static bool updateLocalIds( const std::map<int64_t,int64_t>& remote_map,
} }
return changed; return changed;
} }
int ComputeGlobalBlobIDs( int nx, int ny, int nz, RankInfoStruct rank_info, static int LocalToGlobalIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info,
int nblobs, IntArray& IDs )
{
PROFILE_START("LocalToGlobalIDs",1);
const int rank = rank_info.rank[1][1][1];
int nprocs;
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
// Get the number of blobs for each rank
std::vector<int> N_blobs(nprocs,0);
PROFILE_START("LocalToGlobalIDs-Allgather",1);
MPI_Allgather(&nblobs,1,MPI_INT,getPtr(N_blobs),1,MPI_INT,MPI_COMM_WORLD);
PROFILE_STOP("LocalToGlobalIDs-Allgather",1);
int64_t N_blobs_tot = 0;
int offset = 0;
for (int i=0; i<rank; i++)
offset += N_blobs[i];
for (int i=0; i<nprocs; i++)
N_blobs_tot += N_blobs[i];
INSIST(N_blobs_tot<0x80000000,"Maximum number of blobs exceeded");
// Compute temporary global ids
for (size_t i=0; i<IDs.length(); i++) {
if ( IDs(i) >= 0 )
IDs(i) += offset;
}
const Array<int> LocalIDs = IDs;
// Copy the ids and get the neighbors through the halos
fillHalo<int> fillData(rank_info,nx,ny,nz,1,1,1,0,1,true,true,true);
fillData.fill(IDs);
// Create a list of all neighbor ranks (excluding self)
std::vector<int> neighbors;
neighbors.push_back( rank_info.rank[0][1][1] );
neighbors.push_back( rank_info.rank[2][1][1] );
neighbors.push_back( rank_info.rank[1][0][1] );
neighbors.push_back( rank_info.rank[1][2][1] );
neighbors.push_back( rank_info.rank[1][1][0] );
neighbors.push_back( rank_info.rank[1][1][2] );
std::unique(neighbors.begin(),neighbors.end());
if ( std::find(neighbors.begin(),neighbors.end(),rank) != neighbors.end() )
neighbors.erase(std::find(neighbors.begin(),neighbors.end(),rank));
// Create a map of all local ids to the neighbor ids
std::map<int64_t,global_id_info_struct> map;
std::set<int64_t> local;
for (size_t i=0; i<LocalIDs.length(); i++) {
if ( LocalIDs(i)>=0 ) {
local.insert(LocalIDs(i));
if ( LocalIDs(i)!=IDs(i) )
map[LocalIDs(i)].remote_ids.insert(IDs(i));
}
}
std::map<int64_t,global_id_info_struct>::iterator it;
for (it=map.begin(); it!=map.end(); ++it) {
it->second.new_id = it->first;
local.erase(it->first);
}
// Get the number of ids we will recieve from each rank
int N_send = map.size();
std::vector<int> N_recv(neighbors.size(),0);
std::vector<MPI_Request> send_req(neighbors.size());
std::vector<MPI_Request> recv_req(neighbors.size());
std::vector<MPI_Status> status(neighbors.size());
for (size_t i=0; i<neighbors.size(); i++) {
MPI_Isend( &N_send, 1, MPI_INT, neighbors[i], 0, MPI_COMM_WORLD, &send_req[i] );
MPI_Irecv( &N_recv[i], 1, MPI_INT, neighbors[i], 0, MPI_COMM_WORLD, &recv_req[i] );
}
MPI_Waitall(neighbors.size(),getPtr(send_req),getPtr(status));
MPI_Waitall(neighbors.size(),getPtr(recv_req),getPtr(status));
// Allocate memory for communication
int64_t *send_buf = new int64_t[2*N_send];
std::vector<int64_t*> recv_buf(neighbors.size());
for (size_t i=0; i<neighbors.size(); i++)
recv_buf[i] = new int64_t[2*N_recv[i]];
// Compute a map for the remote ids, and new local id for each id
std::map<int64_t,int64_t> remote_map;
for (it=map.begin(); it!=map.end(); ++it) {
int64_t id = it->first;
std::set<int64_t>::const_iterator it2;
for (it2=it->second.remote_ids.begin(); it2!=it->second.remote_ids.end(); ++it2) {
int64_t id2 = *it2;
id = std::min(id,id2);
remote_map.insert(std::pair<int64_t,int64_t>(id2,id2));
}
it->second.new_id = id;
}
// Iterate until we are done
int iteration = 1;
PROFILE_START("LocalToGlobalIDs-loop",1);
while ( 1 ) {
iteration++;
// Send the local ids and their new value to all neighbors
updateRemoteIds( map, neighbors, N_send, N_recv,send_buf, recv_buf, remote_map );
// Compute a new local id for each local id
bool changed = updateLocalIds( remote_map, map );
// Check if we are finished
int test = changed ? 1:0;
int result = 0;
MPI_Allreduce(&test,&result,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD);
if ( result==0 )
break;
}
PROFILE_STOP("LocalToGlobalIDs-loop",1);
// Relabel the ids
std::vector<int> final_map(nblobs,-1);
for (it=map.begin(); it!=map.end(); ++it)
final_map[it->first-offset] = it->second.new_id;
for (std::set<int64_t>::const_iterator it2=local.begin(); it2!=local.end(); ++it2)
final_map[*it2-offset] = *it2;
int ngx = (IDs.size(0)-nx)/2;
int ngy = (IDs.size(1)-ny)/2;
int ngz = (IDs.size(2)-nz)/2;
for (size_t k=ngz; k<IDs.size(2)-ngz; k++) {
for (size_t j=ngy; j<IDs.size(1)-ngy; j++) {
for (size_t i=ngx; i<IDs.size(0)-ngx; i++) {
int id = IDs(i,j,k);
if ( id >= 0 )
IDs(i,j,k) = final_map[id-offset];
}
}
}
// Fill the ghosts
fillHalo<int> fillData2(rank_info,nx,ny,nz,1,1,1,0,1,true,true,true);
fillData2.fill(IDs);
// Reorder based on size (and compress the id space
int N_blobs_global = ReorderBlobIDs2(IDs,N_blobs_tot,ngx,ngy,ngz);
// Finished
delete [] send_buf;
for (size_t i=0; i<neighbors.size(); i++)
delete [] recv_buf[i];
PROFILE_STOP("LocalToGlobalIDs",1);
return N_blobs_global;
}
int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info,
const DoubleArray& Phase, const DoubleArray& SignDist, double vF, double vS, const DoubleArray& Phase, const DoubleArray& SignDist, double vF, double vS,
IntArray& GlobalBlobID ) IntArray& GlobalBlobID )
{ {
@ -477,262 +609,22 @@ int ComputeGlobalBlobIDs( int nx, int ny, int nz, RankInfoStruct rank_info,
int nprocs; int nprocs;
MPI_Comm_size(MPI_COMM_WORLD,&nprocs); MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
// First compute the local ids // First compute the local ids
IntArray LocalIDs; int nblobs = ComputeLocalBlobIDs(Phase,SignDist,vF,vS,GlobalBlobID,false);
int nblobs = ComputeLocalBlobIDs(Phase,SignDist,vF,vS,LocalIDs,false); // Compute the global ids
std::vector<int> N_blobs(nprocs,0); int nglobal = LocalToGlobalIDs( nx, ny, nz, rank_info, nblobs, GlobalBlobID );
PROFILE_START("ComputeGlobalBlobIDs-Allgather",1);
MPI_Allgather(&nblobs,1,MPI_INT,getPtr(N_blobs),1,MPI_INT,MPI_COMM_WORLD);
PROFILE_STOP("ComputeGlobalBlobIDs-Allgather",1);
int64_t N_blobs_tot = 0;
int offset = 0;
for (int i=0; i<rank; i++)
offset += N_blobs[i];
for (int i=0; i<nprocs; i++)
N_blobs_tot += N_blobs[i];
INSIST(N_blobs_tot<0x80000000,"Maximum number of blobs exceeded");
// Compute temporary global ids
for (size_t i=0; i<LocalIDs.length(); i++) {
if ( LocalIDs(i) >= 0 )
LocalIDs(i) += offset;
}
// Copy the ids and get the neighbors through the halos
GlobalBlobID = LocalIDs;
fillHalo<int> fillData(rank_info,nx,ny,nz,1,1,1,0,1,true,true,true);
fillData.fill(GlobalBlobID);
// Create a list of all neighbor ranks (excluding self)
std::vector<int> neighbors;
neighbors.push_back( rank_info.rank[0][1][1] );
neighbors.push_back( rank_info.rank[2][1][1] );
neighbors.push_back( rank_info.rank[1][0][1] );
neighbors.push_back( rank_info.rank[1][2][1] );
neighbors.push_back( rank_info.rank[1][1][0] );
neighbors.push_back( rank_info.rank[1][1][2] );
std::unique(neighbors.begin(),neighbors.end());
if ( std::find(neighbors.begin(),neighbors.end(),rank) != neighbors.end() )
neighbors.erase(std::find(neighbors.begin(),neighbors.end(),rank));
// Create a map of all local ids to the neighbor ids
std::map<int64_t,global_id_info_struct> map;
std::set<int64_t> local;
for (size_t i=0; i<LocalIDs.length(); i++) {
if ( LocalIDs(i)>=0 ) {
local.insert(LocalIDs(i));
if ( LocalIDs(i)!=GlobalBlobID(i) )
map[LocalIDs(i)].remote_ids.insert(GlobalBlobID(i));
}
}
std::map<int64_t,global_id_info_struct>::iterator it;
for (it=map.begin(); it!=map.end(); ++it) {
it->second.new_id = it->first;
local.erase(it->first);
}
// Get the number of ids we will recieve from each rank
int N_send = map.size();
std::vector<int> N_recv(neighbors.size(),0);
std::vector<MPI_Request> send_req(neighbors.size());
std::vector<MPI_Request> recv_req(neighbors.size());
std::vector<MPI_Status> status(neighbors.size());
for (size_t i=0; i<neighbors.size(); i++) {
MPI_Isend( &N_send, 1, MPI_INT, neighbors[i], 0, MPI_COMM_WORLD, &send_req[i] );
MPI_Irecv( &N_recv[i], 1, MPI_INT, neighbors[i], 0, MPI_COMM_WORLD, &recv_req[i] );
}
MPI_Waitall(neighbors.size(),getPtr(send_req),getPtr(status));
MPI_Waitall(neighbors.size(),getPtr(recv_req),getPtr(status));
// Allocate memory for communication
int64_t *send_buf = new int64_t[2*N_send];
std::vector<int64_t*> recv_buf(neighbors.size());
for (size_t i=0; i<neighbors.size(); i++)
recv_buf[i] = new int64_t[2*N_recv[i]];
// Compute a map for the remote ids, and new local id for each id
std::map<int64_t,int64_t> remote_map;
for (it=map.begin(); it!=map.end(); ++it) {
int64_t id = it->first;
std::set<int64_t>::const_iterator it2;
for (it2=it->second.remote_ids.begin(); it2!=it->second.remote_ids.end(); ++it2) {
int64_t id2 = *it2;
id = std::min(id,id2);
remote_map.insert(std::pair<int64_t,int64_t>(id2,id2));
}
it->second.new_id = id;
}
// Iterate until we are done
int iteration = 1;
PROFILE_START("ComputeGlobalBlobIDs-loop",1);
while ( 1 ) {
iteration++;
// Send the local ids and their new value to all neighbors
updateRemoteIds( map, neighbors, N_send, N_recv,send_buf, recv_buf, remote_map );
// Compute a new local id for each local id
bool changed = updateLocalIds( remote_map, map );
// Check if we are finished
int test = changed ? 1:0;
int result = 0;
MPI_Allreduce(&test,&result,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD);
if ( result==0 )
break;
}
PROFILE_STOP("ComputeGlobalBlobIDs-loop",1);
// Relabel the ids
std::vector<int> final_map(nblobs,-1);
for (it=map.begin(); it!=map.end(); ++it)
final_map[it->first-offset] = it->second.new_id;
for (std::set<int64_t>::const_iterator it2=local.begin(); it2!=local.end(); ++it2)
final_map[*it2-offset] = *it2;
int ngx = (GlobalBlobID.size(0)-nx)/2;
int ngy = (GlobalBlobID.size(1)-ny)/2;
int ngz = (GlobalBlobID.size(2)-nz)/2;
for (size_t k=ngz; k<GlobalBlobID.size(2)-ngz; k++) {
for (size_t j=ngy; j<GlobalBlobID.size(1)-ngy; j++) {
for (size_t i=ngx; i<GlobalBlobID.size(0)-ngx; i++) {
int id = GlobalBlobID(i,j,k);
if ( id >= 0 )
GlobalBlobID(i,j,k) = final_map[id-offset];
}
}
}
// Fill the ghosts
fillHalo<int> fillData2(rank_info,nx,ny,nz,1,1,1,0,1,true,true,true);
fillData2.fill(GlobalBlobID);
// Reorder based on size (and compress the id space
int N_blobs_global = ReorderBlobIDs2(GlobalBlobID,N_blobs_tot,ngx,ngy,ngz);
// Finished
delete [] send_buf;
for (size_t i=0; i<neighbors.size(); i++)
delete [] recv_buf[i];
PROFILE_STOP("ComputeGlobalBlobIDs"); PROFILE_STOP("ComputeGlobalBlobIDs");
return N_blobs_global; return nglobal;
} }
int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& rank_info,
int ComputeGlobalPhaseComponent( int nx, int ny, int nz, RankInfoStruct rank_info, const IntArray &PhaseID, int VALUE, IntArray &GlobalBlobID )
IntArray &PhaseID, int VALUE, IntArray &GlobalBlobID )
{ {
PROFILE_START("ComputeGlobalBlobIDs"); PROFILE_START("ComputeGlobalPhaseComponent");
const int rank = rank_info.rank[1][1][1];
int nprocs;
MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
// First compute the local ids // First compute the local ids
IntArray LocalIDs; int nblobs = ComputeLocalPhaseComponent(PhaseID,VALUE,GlobalBlobID,false);
int nblobs = ComputeLocalPhaseComponent(PhaseID,VALUE,LocalIDs,false); // Compute the global ids
std::vector<int> N_blobs(nprocs,0); int nglobal = LocalToGlobalIDs( nx, ny, nz, rank_info, nblobs, GlobalBlobID );
PROFILE_START("ComputeGlobalBlobIDs-Allgather",1); PROFILE_STOP("ComputeGlobalPhaseComponent");
MPI_Allgather(&nblobs,1,MPI_INT,getPtr(N_blobs),1,MPI_INT,MPI_COMM_WORLD); return nglobal;
PROFILE_STOP("ComputeGlobalBlobIDs-Allgather",1);
int64_t N_blobs_tot = 0;
int offset = 0;
for (int i=0; i<rank; i++)
offset += N_blobs[i];
for (int i=0; i<nprocs; i++)
N_blobs_tot += N_blobs[i];
INSIST(N_blobs_tot<0x80000000,"Maximum number of blobs exceeded");
// Compute temporary global ids
for (size_t i=0; i<LocalIDs.length(); i++) {
if ( LocalIDs(i) >= 0 )
LocalIDs(i) += offset;
}
// Copy the ids and get the neighbors through the halos
GlobalBlobID = LocalIDs;
fillHalo<int> fillData(rank_info,nx,ny,nz,1,1,1,0,1,true,true,true);
fillData.fill(GlobalBlobID);
// Create a list of all neighbor ranks (excluding self)
std::vector<int> neighbors;
neighbors.push_back( rank_info.rank[0][1][1] );
neighbors.push_back( rank_info.rank[2][1][1] );
neighbors.push_back( rank_info.rank[1][0][1] );
neighbors.push_back( rank_info.rank[1][2][1] );
neighbors.push_back( rank_info.rank[1][1][0] );
neighbors.push_back( rank_info.rank[1][1][2] );
std::unique(neighbors.begin(),neighbors.end());
if ( std::find(neighbors.begin(),neighbors.end(),rank) != neighbors.end() )
neighbors.erase(std::find(neighbors.begin(),neighbors.end(),rank));
// Create a map of all local ids to the neighbor ids
std::map<int64_t,global_id_info_struct> map;
std::set<int64_t> local;
for (size_t i=0; i<LocalIDs.length(); i++) {
if ( LocalIDs(i)>=0 ) {
local.insert(LocalIDs(i));
if ( LocalIDs(i)!=GlobalBlobID(i) )
map[LocalIDs(i)].remote_ids.insert(GlobalBlobID(i));
}
}
std::map<int64_t,global_id_info_struct>::iterator it;
for (it=map.begin(); it!=map.end(); ++it) {
it->second.new_id = it->first;
local.erase(it->first);
}
// Get the number of ids we will recieve from each rank
int N_send = map.size();
std::vector<int> N_recv(neighbors.size(),0);
std::vector<MPI_Request> send_req(neighbors.size());
std::vector<MPI_Request> recv_req(neighbors.size());
std::vector<MPI_Status> status(neighbors.size());
for (size_t i=0; i<neighbors.size(); i++) {
MPI_Isend( &N_send, 1, MPI_INT, neighbors[i], 0, MPI_COMM_WORLD, &send_req[i] );
MPI_Irecv( &N_recv[i], 1, MPI_INT, neighbors[i], 0, MPI_COMM_WORLD, &recv_req[i] );
}
MPI_Waitall(neighbors.size(),getPtr(send_req),getPtr(status));
MPI_Waitall(neighbors.size(),getPtr(recv_req),getPtr(status));
// Allocate memory for communication
int64_t *send_buf = new int64_t[2*N_send];
std::vector<int64_t*> recv_buf(neighbors.size());
for (size_t i=0; i<neighbors.size(); i++)
recv_buf[i] = new int64_t[2*N_recv[i]];
// Compute a map for the remote ids, and new local id for each id
std::map<int64_t,int64_t> remote_map;
for (it=map.begin(); it!=map.end(); ++it) {
int64_t id = it->first;
std::set<int64_t>::const_iterator it2;
for (it2=it->second.remote_ids.begin(); it2!=it->second.remote_ids.end(); ++it2) {
int64_t id2 = *it2;
id = std::min(id,id2);
remote_map.insert(std::pair<int64_t,int64_t>(id2,id2));
}
it->second.new_id = id;
}
// Iterate until we are done
int iteration = 1;
PROFILE_START("ComputeGlobalBlobIDs-loop",1);
while ( 1 ) {
iteration++;
// Send the local ids and their new value to all neighbors
updateRemoteIds( map, neighbors, N_send, N_recv,send_buf, recv_buf, remote_map );
// Compute a new local id for each local id
bool changed = updateLocalIds( remote_map, map );
// Check if we are finished
int test = changed ? 1:0;
int result = 0;
MPI_Allreduce(&test,&result,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD);
if ( result==0 )
break;
}
PROFILE_STOP("ComputeGlobalBlobIDs-loop",1);
// Relabel the ids
std::vector<int> final_map(nblobs,-1);
for (it=map.begin(); it!=map.end(); ++it)
final_map[it->first-offset] = it->second.new_id;
for (std::set<int64_t>::const_iterator it2=local.begin(); it2!=local.end(); ++it2)
final_map[*it2-offset] = *it2;
int ngx = (GlobalBlobID.size(0)-nx)/2;
int ngy = (GlobalBlobID.size(1)-ny)/2;
int ngz = (GlobalBlobID.size(2)-nz)/2;
for (size_t k=ngz; k<GlobalBlobID.size(2)-ngz; k++) {
for (size_t j=ngy; j<GlobalBlobID.size(1)-ngy; j++) {
for (size_t i=ngx; i<GlobalBlobID.size(0)-ngx; i++) {
int id = GlobalBlobID(i,j,k);
if ( id >= 0 )
GlobalBlobID(i,j,k) = final_map[id-offset];
}
}
}
// Fill the ghosts
fillHalo<int> fillData2(rank_info,nx,ny,nz,1,1,1,0,1,true,true,true);
fillData2.fill(GlobalBlobID);
// Reorder based on size (and compress the id space
int N_blobs_global = ReorderBlobIDs2(GlobalBlobID,N_blobs_tot,ngx,ngy,ngz);
// Finished
delete [] send_buf;
for (size_t i=0; i<neighbors.size(); i++)
delete [] recv_buf[i];
PROFILE_STOP("ComputeGlobalBlobIDs");
return N_blobs_global;
} }

View File

@ -54,7 +54,7 @@ int ComputeLocalBlobIDs( const DoubleArray& Phase, const DoubleArray& SignDist,
* @param[out] ComponentLabel * @param[out] ComponentLabel
* @param[in] periodic * @param[in] periodic
*/ */
int ComputeLocalPhaseComponent(IntArray &PhaseID, int VALUE, IntArray &ComponentLabel, int ComputeLocalPhaseComponent( const IntArray &PhaseID, int VALUE, IntArray &ComponentLabel,
bool periodic ); bool periodic );
@ -73,7 +73,7 @@ int ComputeLocalPhaseComponent(IntArray &PhaseID, int VALUE, IntArray &Component
* @param[out] LocalBlobID The ids of the blobs * @param[out] LocalBlobID The ids of the blobs
* @return Returns the number of blobs * @return Returns the number of blobs
*/ */
int ComputeGlobalBlobIDs( int nx, int ny, int nz, RankInfoStruct rank_info, int ComputeGlobalBlobIDs( int nx, int ny, int nz, const RankInfoStruct& rank_info,
const DoubleArray& Phase, const DoubleArray& SignDist, double vF, double vS, const DoubleArray& Phase, const DoubleArray& SignDist, double vF, double vS,
IntArray& GlobalBlobID ); IntArray& GlobalBlobID );
@ -91,8 +91,8 @@ int ComputeGlobalBlobIDs( int nx, int ny, int nz, RankInfoStruct rank_info,
* @param[out] GlobalBlobID The ids of the blobs for the phase * @param[out] GlobalBlobID The ids of the blobs for the phase
* @return Return the number of components in the specified phase * @return Return the number of components in the specified phase
*/ */
int ComputeGlobalPhaseComponent( int nx, int ny, int nz, RankInfoStruct rank_info, int ComputeGlobalPhaseComponent( int nx, int ny, int nz, const RankInfoStruct& rank_info,
IntArray &PhaseID, int VALUE, IntArray &GlobalBlobID ); const IntArray &PhaseID, int VALUE, IntArray &GlobalBlobID );
/*! /*!

View File

@ -133,11 +133,11 @@ void fillHalo<TYPE>::fill( Array<TYPE>& data )
} }
} }
} }
// Recv the dst data and unpack // Recv the dst data and unpack (we recive in reverse order to match the sends)
MPI_Status status; MPI_Status status;
for (int i=0; i<3; i++) { for (int i=2; i>=0; i--) {
for (int j=0; j<3; j++) { for (int j=2; j>=0; j--) {
for (int k=0; k<3; k++) { for (int k=2; k>=0; k--) {
if ( !fill_pattern[i][j][k] ) if ( !fill_pattern[i][j][k] )
continue; continue;
MPI_Wait(&recv_req[i][j][k],&status); MPI_Wait(&recv_req[i][j][k],&status);
@ -206,6 +206,7 @@ template<class TYPE>
template<class TYPE1, class TYPE2> template<class TYPE1, class TYPE2>
void fillHalo<TYPE>::copy( const Array<TYPE1>& src, Array<TYPE2>& dst ) void fillHalo<TYPE>::copy( const Array<TYPE1>& src, Array<TYPE2>& dst )
{ {
PROFILE_START("fillHalo::copy",1);
ASSERT(src.size(0)==nx||src.size(0)==nx+2*ngx); ASSERT(src.size(0)==nx||src.size(0)==nx+2*ngx);
ASSERT(dst.size(0)==nx||dst.size(0)==nx+2*ngx); ASSERT(dst.size(0)==nx||dst.size(0)==nx+2*ngx);
bool src_halo = src.size(0)==nx+2*ngx; bool src_halo = src.size(0)==nx+2*ngx;
@ -242,7 +243,7 @@ void fillHalo<TYPE>::copy( const Array<TYPE1>& src, Array<TYPE2>& dst )
} }
} }
} else if ( !src_halo && dst_halo ) { } else if ( !src_halo && dst_halo ) {
// Src has halos // Dst has halos
for (size_t k=0; k<nz; k++) { for (size_t k=0; k<nz; k++) {
for (size_t j=0; j<ny; j++) { for (size_t j=0; j<ny; j++) {
for (size_t i=0; i<nx; i++) { for (size_t i=0; i<nx; i++) {
@ -252,6 +253,7 @@ void fillHalo<TYPE>::copy( const Array<TYPE1>& src, Array<TYPE2>& dst )
} }
fill(dst); fill(dst);
} }
PROFILE_STOP("fillHalo::copy",1);
} }

View File

@ -1103,7 +1103,7 @@ int main(int argc, char **argv)
// double vx_w_global,vy_w_global,vz_w_global; // global phase averaged velocity // double vx_w_global,vy_w_global,vz_w_global; // global phase averaged velocity
// double vx_n_global,vy_n_global,vz_n_global; // global phase averaged velocity // double vx_n_global,vy_n_global,vz_n_global; // global phase averaged velocity
double As_global; double As_global;
double dEs,dAwn,dAns; // Global surface energy (calculated by rank=0) double dEs=0, dAwn=0, dAns=0; // Global surface energy (calculated by rank=0)
double pan_global,paw_global,pc_global; // local phase averaged pressure double pan_global,paw_global,pc_global; // local phase averaged pressure
DoubleArray van_global(3); DoubleArray van_global(3);
DoubleArray vaw_global(3); DoubleArray vaw_global(3);
@ -1439,12 +1439,12 @@ int main(int argc, char **argv)
if (rank==0) printf("No. of timesteps: %i \n", timestepMax); if (rank==0) printf("No. of timesteps: %i \n", timestepMax);
//.......create and start timer............ //.......create and start timer............
double starttime,stoptime,cputime; double starttime=0, stoptime=0, cputime=0;
sendtag = recvtag = 5; sendtag = recvtag = 5;
FILE *TIMELOG; FILE *TIMELOG=NULL;
FILE *FINALSTATE; FILE *FINALSTATE=NULL;
FILE *SPEED; FILE *SPEED=NULL;
if (rank==0){ if (rank==0){
TIMELOG= fopen("timelog.tcat","a+"); TIMELOG= fopen("timelog.tcat","a+");
FINALSTATE= fopen("finalstate.tcat","a"); FINALSTATE= fopen("finalstate.tcat","a");

View File

@ -26,10 +26,13 @@ struct bubble_struct {
Point center; Point center;
double radius; double radius;
// Get the distance to the bubble // Get the distance to the bubble
double dist( const Point& p, double Lx, double Ly, double Lz ) const { inline double dist( const Point& p, double Lx, double Ly, double Lz ) const {
double x = std::min(fabs(p.x-center.x),std::min(fabs(p.x-center.x-Lx),fabs(p.x-center.x+Lx))); double x = fabs(p.x-center.x);
double y = std::min(fabs(p.y-center.y),std::min(fabs(p.y-center.y-Ly),fabs(p.y-center.y+Ly))); double y = fabs(p.y-center.y);
double z = std::min(fabs(p.z-center.z),std::min(fabs(p.z-center.z-Lz),fabs(p.z-center.z+Lz))); double z = fabs(p.z-center.z);
x = std::min(x,fabs(Lx-x));
y = std::min(y,fabs(Ly-y));
z = std::min(z,fabs(Lz-z));
return sqrt(x*x+y*y+z*z)-radius; return sqrt(x*x+y*y+z*z)-radius;
} }
// Check if this bubble overlaps with rhs // Check if this bubble overlaps with rhs
@ -135,7 +138,7 @@ int main(int argc, char **argv)
MPI_Init(&argc,&argv); MPI_Init(&argc,&argv);
MPI_Comm_rank(MPI_COMM_WORLD,&rank); MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Comm_size(MPI_COMM_WORLD,&nprocs); MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
PROFILE_ENABLE(0); PROFILE_ENABLE(1);
PROFILE_DISABLE_TRACE(); PROFILE_DISABLE_TRACE();
PROFILE_SYNCHRONIZE(); PROFILE_SYNCHRONIZE();
PROFILE_START("main"); PROFILE_START("main");