diff --git a/sample_scripts/config_build_rhea b/sample_scripts/config_build_rhea new file mode 100755 index 00000000..0e9b7296 --- /dev/null +++ b/sample_scripts/config_build_rhea @@ -0,0 +1,60 @@ +# Clear all modules (except modules) +#for var in ${LOADEDMODULES//:/ }; do if [ " ${var///*/}" != " modules" ]; then module unload " ${var///*/}" > /dev/null 2>&1; fi; done + +source $MODULESHOME/init/bash +module unload PE-intel +module load PE-gnu/6.2.0-2.0.1 +#module load gcc/6.2.0 +module load cmake3/3.5.2 +module load openmpi netcdf hdf5 + +export MPICH_RDMA_ENABLED_CUDA=0 + + +echo $GNU_VERSION +module list + +# Remove CMake files from previous configures +rm -rf CMake* + + +# Configure +cmake \ + -D CMAKE_BUILD_TYPE:STRING=Release \ + -D CMAKE_C_COMPILER:PATH=mpicc \ + -D CMAKE_CXX_COMPILER:PATH=mpicxx \ + -D CMAKE_CXX_COMPILER:PATH=mpicxx \ + -D CMAKE_CXX_STD=11 \ + -D USE_TIMER=false \ + -D TIMER_DIRECTORY=${HOME}/timerutility/build/opt \ + -D MPI_COMPILER:BOOL=TRUE \ + -D MPIEXEC=aprun \ + -D USE_EXT_MPI_FOR_SERIAL_TESTS:BOOL=TRUE \ + -D USE_CUDA=0 \ + -D CUDA_FLAGS="-arch sm_35" \ + -D USE_HDF5=1 \ + -D USE_SILO=1 \ + -D HDF5_DIRECTORY=/sw/rhea/hdf5/1.8.11/rhel6.6_gnu4.8.2/ \ + -D HDF5_LIB=/sw/rhea/hdf5/1.8.11/rhel6.6_gnu4.8.2/lib/libhdf5.so \ + -D SILO_DIRECTORY=/lustre/atlas1/geo106/proj-shared/rhea/silo\ + ~/LBPM-WIA + + +# -D PREFIX=$MEMBERWORK/geo106/eos-LBPM-WIA \ + +#-D CUDA_HOST_COMPILER="/usr/bin/gcc" \ + + +# Build the code +make install -j 8 + +# Fix permissions +#chmod -R g+w $PROJWORK/geo106/eos-LBPM-WIA + + +# Run the fast tests +# ctest -E WEEKLY + + +# Run the slow tests +# ctest -R WEEKLY -VV diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index a9bf8365..2232384b 100755 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -41,6 +41,7 @@ ADD_LBPM_TEST( pmmc_cylinder ) ADD_LBPM_TEST( TestTorus ) ADD_LBPM_TEST( TestFluxBC ) ADD_LBPM_TEST( TestMap ) +ADD_LBPM_TEST( TestMinkowski ) #ADD_LBPM_TEST( TestMRT ) #ADD_LBPM_TEST( TestColorGrad ) #ADD_LBPM_TEST( TestColorGradDFH ) diff --git a/tests/TestMinkowski.cpp b/tests/TestMinkowski.cpp new file mode 100644 index 00000000..bcbee9a0 --- /dev/null +++ b/tests/TestMinkowski.cpp @@ -0,0 +1,222 @@ +// Sequential blob analysis +// Reads parallel simulation data and performs connectivity analysis +// and averaging on a blob-by-blob basis +// James E. McClure 2014 + +#include +#include +#include +#include +#include +#include +#include + +#include "common/Array.h" +#include "common/Domain.h" +#include "common/Communication.h" +#include "common/MPI_Helpers.h" +#include "IO/MeshDatabase.h" +#include "IO/Mesh.h" +#include "IO/Writer.h" +#include "IO/netcdf.h" +#include "analysis/analysis.h" +#include "analysis/filters.h" +#include "analysis/distance.h" +#include "analysis/Minkowski.h" + +#include "ProfilerApp.h" + +int main(int argc, char **argv) +{ + + // Initialize MPI + int rank, nprocs; + MPI_Init(&argc,&argv); + MPI_Comm comm = MPI_COMM_WORLD; + MPI_Comm_rank(comm,&rank); + MPI_Comm_size(comm,&nprocs); + { + Utilities::setErrorHandlers(); + PROFILE_START("Main"); + + //std::vector filenames; + if ( argc<2 ) { + if ( rank == 0 ){ + printf("At least one filename must be specified\n"); + } + return 1; + } + std::string filename = std::string(argv[1]); + if ( rank == 0 ){ + printf("Input data file: %s\n",filename.c_str()); + } + + auto db = std::make_shared( filename ); + auto domain_db = db->getDatabase( "Domain" ); + + // Read domain parameters + auto Filename = domain_db->getScalar( "Filename" ); + auto L = domain_db->getVector( "L" ); + auto size = domain_db->getVector( "n" ); + auto SIZE = domain_db->getVector( "N" ); + auto nproc = domain_db->getVector( "nproc" ); + auto ReadValues = domain_db->getVector( "ReadValues" ); + auto WriteValues = domain_db->getVector( "WriteValues" ); + auto nx = size[0]; + auto ny = size[1]; + auto nz = size[2]; + auto nprocx = nproc[0]; + auto nprocy = nproc[1]; + auto nprocz = nproc[2]; + auto Nx = SIZE[0]; + auto Ny = SIZE[1]; + auto Nz = SIZE[2]; + + int i,j,k,n; + + char *SegData = NULL; + // Rank=0 reads the entire segmented data and distributes to worker processes + if (rank==0){ + printf("Dimensions of segmented image: %i x %i x %i \n",Nx,Ny,Nz); + SegData = new char[Nx*Ny*Nz]; + FILE *SEGDAT = fopen(Filename.c_str(),"rb"); + if (SEGDAT==NULL) ERROR("Error reading segmented data"); + size_t ReadSeg; + ReadSeg=fread(SegData,1,Nx*Ny*Nz,SEGDAT); + if (ReadSeg != size_t(Nx*Ny*Nz)) printf("lbpm_segmented_decomp: Error reading segmented data (rank=%i)\n",rank); + fclose(SEGDAT); + printf("Read segmented data from %s \n",Filename.c_str()); + } + MPI_Barrier(comm); + + // Get the rank info + int N = (nx+2)*(ny+2)*(nz+2); + + std::shared_ptr Dm (new Domain(domain_db,comm)); + for (k=0;kid[n] = 1; + } + } + } + Dm->CommInit(); + + int z_transition_size = 0; + int xStart = 0; + int yStart = 0; + int zStart = 0; + // Set up the sub-domains + if (rank==0){ + printf("Distributing subdomain across %i processors \n",nprocs); + printf("Process grid: %i x %i x %i \n",Dm->nprocx(),Dm->nprocy(),Dm->nprocz()); + printf("Subdomain size: %i \n",N); + //printf("Size of transition region: %i \n", z_transition_size); + char *tmp; + tmp = new char[N]; + for (int kp=0; kpnprocx()*Dm->nprocy() + jp*Dm->nprocx() + ip; + // Pack and send the subdomain for rnk + for (k=0;kid[nlocal] = tmp[nlocal]; + } + } + } + } + else{ + printf("Sending data to process %i \n", rnk); + MPI_Send(tmp,N,MPI_CHAR,rnk,15,comm); + } + } + } + } + } + else{ + // Recieve the subdomain from rank = 0 + printf("Ready to recieve data %i at process %i \n", N,rank); + MPI_Recv(Dm->id,N,MPI_CHAR,0,15,comm,MPI_STATUS_IGNORE); + } + MPI_Barrier(comm); + + // Compute the Minkowski functionals + MPI_Barrier(comm); + std::shared_ptr Averages(new Minkowski(Dm)); + + // Calculate the distance + // Initialize the domain and communication + nx+=2; ny+=2; nz+=2; + Array id(nx,ny,nz); + + //if (rank==0){ + //printf("ID: %i, %i, %i \n",Dm->Nx, Dm->Ny, Dm->Nz); + // printf("ID: %i, %i, %i \n",id.size(0),id.size(1),id.size(2)); + // printf("SDn: %i, %i, %i \n",Averages->SDn.size(0),Averages->SDn.size(1),Averages->SDn.size(2)); + //} + + // Solve for the position of the solid phase + for (k=0;kid[n] == ReadValues[0]) id(i,j,k) = 1; + else id(i,j,k) = 0; + } + } + } + for (k=0;kSDn(i,j,k) = 2.0*double(id(i,j,k))-1.0; + } + } + } + //MeanFilter(Averages->SDn); + + //std::array bc(3)={1,1,1}; + if (rank==0) printf("Initialized solid phase -- Converting to Signed Distance function \n"); + CalcDist(Averages->SDn,id,*Dm); + + if (rank==0) printf("Computing Minkowski functionals \n"); + Averages->Initialize(); + Averages->UpdateMeshValues(); + Averages->ComputeLocal(); + Averages->Reduce(); + Averages->PrintAll(); + } + PROFILE_STOP("Main"); + PROFILE_SAVE("Minkowski",true); + MPI_Barrier(comm); + MPI_Finalize(); + return 0; +} +