Renamed macro USEMPI to HAVE_MPI in upscale_relperm.cpp so that the parallel implementation can be built in the DUNE setting

This commit is contained in:
Lars Vingli Odsæter 2012-07-16 08:23:34 +02:00
parent 70a908c0fa
commit 38165f3ab7

View File

@ -71,7 +71,7 @@
#include <map> #include <map>
#include <sys/utsname.h> #include <sys/utsname.h>
#ifdef USEMPI #ifdef HAVE_MPI
#include <mpi.h> #include <mpi.h>
#endif #endif
@ -149,7 +149,7 @@ void usage()
void usageandexit() { void usageandexit() {
usage(); usage();
#ifdef USEMPI #ifdef HAVE_MPI
MPI_Finalize(); MPI_Finalize();
#endif #endif
exit(1); exit(1);
@ -209,7 +209,7 @@ int main(int varnum, char** vararg)
*/ */
int mpi_rank = 0; int mpi_rank = 0;
#ifdef USEMPI #ifdef HAVE_MPI
int mpi_nodecount = 1; int mpi_nodecount = 1;
MPI_Init(&varnum, &vararg); MPI_Init(&varnum, &vararg);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@ -1348,7 +1348,7 @@ int main(int varnum, char** vararg)
node_vs_pressurepoint.push_back(0); node_vs_pressurepoint.push_back(0);
} }
#if USEMPI #if HAVE_MPI
// Distribute work load over mpi nodes. // Distribute work load over mpi nodes.
for (int idx=0; idx < points; ++idx) { for (int idx=0; idx < points; ++idx) {
// Ensure master node gets equal or less work than the other nodes, since // Ensure master node gets equal or less work than the other nodes, since
@ -1549,7 +1549,7 @@ int main(int varnum, char** vararg)
WaterSaturation[pointidx] = waterVolumeLF/poreVolume; WaterSaturation[pointidx] = waterVolumeLF/poreVolume;
#ifdef USEMPI #ifdef HAVE_MPI
cout << "Rank " << mpi_rank << ": "; cout << "Rank " << mpi_rank << ": ";
#endif #endif
cout << Ptestvalue << "\t" << WaterSaturation[pointidx]; cout << Ptestvalue << "\t" << WaterSaturation[pointidx];
@ -1569,7 +1569,7 @@ int main(int varnum, char** vararg)
clock_t finish_upscale_wallclock = clock(); clock_t finish_upscale_wallclock = clock();
timeused_upscale_wallclock = (double(finish_upscale_wallclock)-double(start_upscale_wallclock))/CLOCKS_PER_SEC; timeused_upscale_wallclock = (double(finish_upscale_wallclock)-double(start_upscale_wallclock))/CLOCKS_PER_SEC;
#ifdef USEMPI #ifdef HAVE_MPI
/* Step 8b: Transfer all computed data to master node. /* Step 8b: Transfer all computed data to master node.
Master node should post a receive for all values missing, Master node should post a receive for all values missing,
other nodes should post a send for all the values they have. other nodes should post a send for all the values they have.
@ -1637,7 +1637,7 @@ int main(int varnum, char** vararg)
#endif #endif
// Average time pr. upscaling point: // Average time pr. upscaling point:
#ifdef USEMPI #ifdef HAVE_MPI
// Sum the upscaling time used by all processes // Sum the upscaling time used by all processes
double timeused_total; double timeused_total;
MPI_Reduce(&timeused_upscale_wallclock, &timeused_total, 1, MPI_DOUBLE, MPI_Reduce(&timeused_upscale_wallclock, &timeused_total, 1, MPI_DOUBLE,
@ -1751,7 +1751,7 @@ int main(int varnum, char** vararg)
outputtmp << "######################################################################" << endl; outputtmp << "######################################################################" << endl;
outputtmp << "# Results from upscaling relative permeability."<< endl; outputtmp << "# Results from upscaling relative permeability."<< endl;
outputtmp << "#" << endl; outputtmp << "#" << endl;
#if USEMPI #if HAVE_MPI
outputtmp << "# (MPI-version)" << endl; outputtmp << "# (MPI-version)" << endl;
#endif #endif
time_t now = std::time(NULL); time_t now = std::time(NULL);
@ -1783,7 +1783,7 @@ int main(int varnum, char** vararg)
outputtmp << "#" << endl; outputtmp << "#" << endl;
outputtmp << "# Timings: Tesselation: " << timeused_tesselation << " secs" << endl; outputtmp << "# Timings: Tesselation: " << timeused_tesselation << " secs" << endl;
outputtmp << "# Upscaling: " << timeused_upscale_wallclock << " secs"; outputtmp << "# Upscaling: " << timeused_upscale_wallclock << " secs";
#ifdef USEMPI #ifdef HAVE_MPI
outputtmp << " (wallclock time)" << endl; outputtmp << " (wallclock time)" << endl;
outputtmp << "# " << avg_upscaling_time_pr_point << " secs pr. saturation point" << endl; outputtmp << "# " << avg_upscaling_time_pr_point << " secs pr. saturation point" << endl;
outputtmp << "# MPI-nodes: " << mpi_nodecount << endl; outputtmp << "# MPI-nodes: " << mpi_nodecount << endl;
@ -2028,7 +2028,7 @@ int main(int varnum, char** vararg)
} }
#if USEMPI #if HAVE_MPI
MPI_Finalize(); MPI_Finalize();
#endif #endif