Merge branch 'morphLBM' of github.com:JamesEMcClure/LBPM-WIA into morphLBM

This commit is contained in:
JamesEMcclure 2019-11-14 10:32:24 -05:00
commit a750533a7f
10 changed files with 321 additions and 21 deletions

View File

@ -694,15 +694,15 @@ Array<TYPE, FUN, Allocator> Array<TYPE, FUN, Allocator>::repmat(
for ( size_t i4 = 0, index = 0; i4 < N1[4]; i4++ ) {
for ( size_t j4 = 0; j4 < Nr[4]; j4++ ) {
for ( size_t i3 = 0; i3 < N1[3]; i3++ ) {
for ( size_t j4 = 0; j4 < Nr[3]; j4++ ) {
for ( size_t j3 = 0; j3 < Nr[3]; j3++ ) {
for ( size_t i2 = 0; i2 < N1[2]; i2++ ) {
for ( size_t j4 = 0; j4 < Nr[2]; j4++ ) {
for ( size_t j2 = 0; j2 < Nr[2]; j2++ ) {
for ( size_t i1 = 0; i1 < N1[1]; i1++ ) {
for ( size_t j4 = 0; j4 < Nr[1]; j4++ ) {
for ( size_t j1 = 0; j1 < Nr[1]; j1++ ) {
for ( size_t i0 = 0; i0 < N1[0]; i0++ ) {
size_t k = d_size.index( i0, i1, i2, i3, i4 );
TYPE x = d_data[k];
for ( size_t j4 = 0; j4 < Nr[0]; j4++, index++ )
for ( size_t j0 = 0; j0 < Nr[0]; j0++, index++ )
y2[index] = x;
}
}

View File

@ -4,16 +4,28 @@
/********************************************************
* Structure to store the rank info *
********************************************************/
inline int getRankForBlock( int nprocx, int nprocy, int nprocz, int i, int j, int k )
int RankInfoStruct::getRankForBlock( int i, int j, int k ) const
{
int i2 = (i+nprocx)%nprocx;
int j2 = (j+nprocy)%nprocy;
int k2 = (k+nprocz)%nprocz;
return i2 + j2*nprocx + k2*nprocx*nprocy;
int i2 = (i+nx)%nx;
int j2 = (j+ny)%ny;
int k2 = (k+nz)%nz;
return i2 + j2*nx + k2*nx*ny;
}
RankInfoStruct::RankInfoStruct()
{
memset(this,0,sizeof(RankInfoStruct));
nx = 0;
ny = 0;
nz = 0;
ix = -1;
jy = -1;
kz = -1;
for (int i=-1; i<=1; i++) {
for (int j=-1; j<=1; j++) {
for (int k=-1; k<=1; k++) {
rank[i+1][j+1][k+1] = -1;
}
}
}
}
RankInfoStruct::RankInfoStruct( int rank0, int nprocx, int nprocy, int nprocz )
{
@ -21,17 +33,30 @@ RankInfoStruct::RankInfoStruct( int rank0, int nprocx, int nprocy, int nprocz )
nx = nprocx;
ny = nprocy;
nz = nprocz;
ix = rank0%nprocx;
jy = (rank0/nprocx)%nprocy;
kz = rank0/(nprocx*nprocy);
for (int i=-1; i<=1; i++) {
for (int j=-1; j<=1; j++) {
for (int k=-1; k<=1; k++) {
rank[i+1][j+1][k+1] = getRankForBlock(nprocx,nprocy,nprocz,ix+i,jy+j,kz+k);
if ( rank0 >= nprocx * nprocy * nprocz ) {
ix = -1;
jy = -1;
kz = -1;
for (int i=-1; i<=1; i++) {
for (int j=-1; j<=1; j++) {
for (int k=-1; k<=1; k++) {
rank[i+1][j+1][k+1] = -1;
}
}
}
} else {
ix = rank0%nprocx;
jy = (rank0/nprocx)%nprocy;
kz = rank0/(nprocx*nprocy);
for (int i=-1; i<=1; i++) {
for (int j=-1; j<=1; j++) {
for (int k=-1; k<=1; k++) {
rank[i+1][j+1][k+1] = getRankForBlock(ix+i,jy+j,kz+k);
}
}
}
ASSERT(rank[1][1][1]==rank0);
}
ASSERT(rank[1][1][1]==rank0);
}

View File

@ -31,9 +31,16 @@ struct RankInfoStruct {
int rank[3][3][3]; //!< The rank for the neighbor [i][j][k]
RankInfoStruct();
RankInfoStruct( int rank, int nprocx, int nprocy, int nprocz );
int getRankForBlock( int i, int j, int k ) const;
};
//! Redistribute domain data (dst may be smaller than the src)
template<class TYPE>
Array<TYPE> redistribute( const RankInfoStruct& src_rank, const Array<TYPE>& src_data,
const RankInfoStruct& dst_rank, std::array<int,3> dst_size, MPI_Comm comm );
/*!
* @brief Communicate halo
* @details Fill the halo cells in an array from the neighboring processes

View File

@ -8,7 +8,90 @@
/********************************************************
* Structure to store the rank info *
* Redistribute data between two grids *
********************************************************/
template<class TYPE>
Array<TYPE> redistribute( const RankInfoStruct& src_rank, const Array<TYPE>& src_data,
const RankInfoStruct& dst_rank, std::array<int,3> dst_size, MPI_Comm comm )
{
#ifdef USE_MPI
// Get the src size
std::array<int,3> src_size;
int size0[3] = { (int) src_data.size(0), (int) src_data.size(1), (int) src_data.size(2) };
MPI_Allreduce( size0, src_size.data(), 3, MPI_INT, MPI_MAX, comm );
if ( !src_data.empty() )
ASSERT( src_size[0] == size0[0] && src_size[1] == size0[1] && src_size[2] == size0[2] );
// Check that dst_size matches on all ranks
MPI_Allreduce( dst_size.data(), size0, 3, MPI_INT, MPI_MAX, comm );
ASSERT( dst_size[0] == size0[0] && dst_size[1] == size0[1] && dst_size[2] == size0[2] );
// Function to get overlap range
auto calcOverlap = []( int i1[3], int i2[3], int j1[3], int j2[3] ) {
std::vector<size_t> index;
if ( i1[0] > j2[0] || i2[0] < j1[0] || i1[1] > j2[1] || i2[1] < j1[1] || i1[2] > j2[2] || i2[2] < j1[2] )
return index;
index.resize( 6 );
index[0] = std::max( j1[0] - i1[0], 0 );
index[1] = std::min( j2[0] - i1[0], i2[0] - i1[0] );
index[2] = std::max( j1[1] - i1[1], 0 );
index[3] = std::min( j2[1] - i1[1], i2[1] - i1[1] );
index[4] = std::max( j1[2] - i1[2], 0 );
index[5] = std::min( j2[2] - i1[2], i2[2] - i1[2] );
return index;
};
// Pack and send my data to the appropriate ranks (including myself)
std::vector<int> send_rank;
std::vector<Array<TYPE>> send_data;
if ( !src_data.empty() ) {
int i1[3] = { src_size[0] * src_rank.ix, src_size[1] * src_rank.jy, src_size[2] * src_rank.kz };
int i2[3] = { i1[0] + src_size[0] - 1, i1[1] + src_size[1] - 1, i1[2] + src_size[2] - 1 };
for ( size_t i=0; i<dst_rank.nx; i++ ) {
for ( size_t j=0; j<dst_rank.ny; j++ ) {
for ( size_t k=0; k<dst_rank.nz; k++ ) {
int j1[3] = { i * dst_size[0], j * dst_size[1], k * dst_size[2] };
int j2[3] = { j1[0] + dst_size[0] - 1, j1[1] + dst_size[1] - 1, j1[2] + dst_size[2] - 1 };
auto index = calcOverlap( i1, i2, j1, j2 );
if ( index.empty() )
continue;
send_rank.push_back( dst_rank.getRankForBlock(i,j,k) );
send_data.push_back( src_data.subset( index ) );
}
}
}
}
std::vector<MPI_Request> send_request( send_rank.size() );
for (size_t i=0; i<send_rank.size(); i++)
MPI_Isend( send_data[i].data(), sizeof(TYPE)*send_data[i].length(), MPI_BYTE, send_rank[i], 5462, comm, &send_request[i]);
// Unpack data from the appropriate ranks (including myself)
Array<TYPE> dst_data( dst_size[0], dst_size[1], dst_size[2] );
int i1[3] = { dst_size[0] * dst_rank.ix, dst_size[1] * dst_rank.jy, dst_size[2] * dst_rank.kz };
int i2[3] = { i1[0] + dst_size[0] - 1, i1[1] + dst_size[1] - 1, i1[2] + dst_size[2] - 1 };
for ( size_t i=0; i<src_rank.nx; i++ ) {
for ( size_t j=0; j<src_rank.ny; j++ ) {
for ( size_t k=0; k<src_rank.nz; k++ ) {
int j1[3] = { i * src_size[0], j * src_size[1], k * src_size[2] };
int j2[3] = { j1[0] + src_size[0] - 1, j1[1] + src_size[1] - 1, j1[2] + src_size[2] - 1 };
auto index = calcOverlap( i1, i2, j1, j2 );
if ( index.empty() )
continue;
int rank = src_rank.getRankForBlock(i,j,k);
Array<TYPE> data( index[1] - index[0] + 1, index[3] - index[2] + 1, index[5] - index[4] + 1 );
MPI_Recv( data.data(), sizeof(TYPE)*data.length(), MPI_BYTE, rank, 5462, comm, MPI_STATUS_IGNORE );
dst_data.copySubset( index, data );
}
}
}
// Free data
MPI_Waitall( send_request.size(), send_request.data(), MPI_STATUSES_IGNORE );
return dst_data;
#else
return src_data.subset( { 0, dst_size[0]-1, 0, dst_size[1]-1, 0, dst_size[2]-1 );
#endif
}
/********************************************************
* Structure to fill halo cells *
********************************************************/
template<class TYPE>
fillHalo<TYPE>::fillHalo( MPI_Comm comm_, const RankInfoStruct& info_,

102
common/ReadMicroCT.cpp Normal file
View File

@ -0,0 +1,102 @@
#include "common/ReadMicroCT.h"
#include "common/Utilities.h"
#include "zlib.h"
#include <cstring>
// Read a file into memory
std::vector<char> readFile( const std::string& filename )
{
auto fid = fopen( filename.c_str(), "rb" );
INSIST( fid, "File does not exist: " + filename );
fseek( fid, 0, SEEK_END );
size_t bytes = ftell(fid);
fseek( fid, 0, SEEK_SET );
std::vector<char> data( bytes );
size_t bytes2 = fread( data.data(), 1, bytes, fid );
ASSERT( bytes == bytes2 );
fclose( fid );
return data;
}
// Decompress a gzip buffer
std::vector<char> gunzip( const std::vector<char>& in )
{
z_stream stream;
std::vector<char> out( 1000000 );
stream.next_in = (Bytef*) in.data();
stream.avail_in = in.size();
stream.total_in = 0;
stream.zalloc = Z_NULL;
stream.zfree = Z_NULL;
stream.opaque = Z_NULL;
stream.next_out = (Bytef*) out.data();
stream.avail_out = out.size();
stream.total_out = 0;
ASSERT( inflateInit2(&stream, 16+MAX_WBITS) == Z_OK );
bool finished = inflate(&stream, Z_SYNC_FLUSH) == Z_STREAM_END;
while ( !finished && stream.msg == Z_NULL ) {
out.resize( 2 * out.size() );
stream.next_out = (Bytef*) &out[stream.total_out];
stream.avail_out = out.size() - stream.total_out;
finished = inflate(&stream, Z_SYNC_FLUSH) == Z_STREAM_END;
}
ASSERT( stream.msg == Z_NULL );
out.resize( stream.total_out );
inflateEnd(&stream);
return out;
}
// Read the compressed micro CT data
Array<uint8_t> readMicroCT( const std::string& filename )
{
auto in = readFile( filename );
auto out = gunzip( in );
ASSERT( out.size() == 1024*1024*1024 );
Array<uint8_t> data( 1024, 1024, 1024 );
memcpy( data.data(), out.data(), data.length() );
return data;
}
// Read the compressed micro CT data and distribute
Array<uint8_t> readMicroCT( const Database& domain, MPI_Comm comm )
{
// Get the local problem info
auto n = domain.getVector<int>( "n" );
int rank = comm_rank(MPI_COMM_WORLD);
auto nproc = domain.getVector<int>( "nproc" );
RankInfoStruct rankInfo( rank, nproc[0], nproc[1], nproc[2] );
// Determine the largest file number to get
int Nfx = ( n[0] * rankInfo.nx + 1023 ) / 1024;
int Nfy = ( n[1] * rankInfo.ny + 1023 ) / 1024;
int Nfz = ( n[2] * rankInfo.nz + 1023 ) / 1024;
// Load one of the files if rank < largest file
Array<uint8_t> data;
RankInfoStruct srcRankInfo( rank, Nfx, Nfy, Nfz );
if ( srcRankInfo.ix >= 0 ) {
auto filename = domain.getScalar<std::string>( "Filename" );
char tmp[100];
if ( filename.find( "0x_0y_0z.gbd.gz" ) != std::string::npos ) {
sprintf( tmp, "%ix_%iy_%iz.gbd.gz", srcRankInfo.ix, srcRankInfo.jy, srcRankInfo.kz );
filename = filename.replace( filename.find( "0x_0y_0z.gbd.gz" ), 15, std::string( tmp ) );
} else if ( filename.find( "x0_y0_z0.gbd.gz" ) != std::string::npos ) {
sprintf( tmp, "x%i_y%i_z%i.gbd.gz", srcRankInfo.ix, srcRankInfo.jy, srcRankInfo.kz );
filename = filename.replace( filename.find( "x0_y0_z0.gbd.gz" ), 15, std::string( tmp ) );
} else {
ERROR( "Invalid name for first file" );
}
data = readMicroCT( filename );
}
// Redistribute the data
data = redistribute( srcRankInfo, data, rankInfo, { n[0], n[1], n[2] }, comm );
return data;
}

15
common/ReadMicroCT.h Normal file
View File

@ -0,0 +1,15 @@
#ifndef READMICROCT_H
#define READMICROCT_H
#include "common/Array.h"
#include "common/Communication.h"
#include "common/Database.h"
Array<uint8_t> readMicroCT( const std::string& filename );
Array<uint8_t> readMicroCT( const Database& domain, MPI_Comm comm );
#endif

View File

@ -551,7 +551,7 @@ void ScaLBL_ColorModel::Run(){
if (color_db->keyExists( "capillary_number" )){
capillary_number = color_db->getScalar<double>( "capillary_number" );
SET_CAPILLARY_NUMBER=true;
RESCALE_FORCE_MAX = 1;
//RESCALE_FORCE_MAX = 1;
}
if (analysis_db->keyExists( "rescale_force_count" )){
RESCALE_FORCE_MAX = analysis_db->getScalar<int>( "rescale_force_count" );

View File

@ -32,7 +32,7 @@ void ScaLBL_MRTModel::ReadParams(string filename){
timestepMax = mrt_db->getScalar<int>( "timestepMax" );
}
if (mrt_db->keyExists( "tolerance" )){
tolerance = mrt_db->getScalar<int>( "timestepMax" );
tolerance = mrt_db->getScalar<double>( "tolerance" );
}
if (mrt_db->keyExists( "tau" )){
tau = mrt_db->getScalar<double>( "tau" );

View File

@ -69,6 +69,7 @@ ADD_LBPM_TEST_PARALLEL( TestCommD3Q19 8 )
ADD_LBPM_TEST_1_2_4( testCommunication )
ADD_LBPM_TEST( TestWriter )
ADD_LBPM_TEST( TestDatabase )
ADD_LBPM_PROVISIONAL_TEST( TestMicroCTReader )
IF ( USE_NETCDF )
ADD_LBPM_TEST_PARALLEL( TestNetcdf 8 )
ADD_LBPM_EXECUTABLE( lbpm_uCT_pp )

View File

@ -0,0 +1,67 @@
// Test reading high-resolution files from the microct database
#include "common/MPI_Helpers.h"
#include "common/UnitTest.h"
#include "common/Database.h"
#include "common/Domain.h"
#include "common/ReadMicroCT.h"
#include "IO/Writer.h"
#include <memory>
void testReadMicroCT( const std::string& filename, UnitTest& ut )
{
// Get the domain info
auto db = std::make_shared<Database>( filename );
auto domain_db = db->getDatabase( "Domain" );
// Test reading microCT files
auto data = readMicroCT( *domain_db, MPI_COMM_WORLD );
// Check if we loaded the data correctly
if ( data.size() == domain_db->getVector<size_t>( "n" ) )
ut.passes( "Read data" );
else
ut.passes( "Data size does not match" );
// Write the results to silo
auto n = domain_db->getVector<int>( "n" );
auto nproc = domain_db->getVector<int>( "nproc" );
int N[3] = { n[0]*nproc[0], n[1]*nproc[1], n[2]*nproc[2] };
int rank = comm_rank(MPI_COMM_WORLD);
RankInfoStruct rankInfo( rank, nproc[0], nproc[1], nproc[2] );
std::vector<IO::MeshDataStruct> meshData( 1 );
auto Var = std::make_shared<IO::Variable>();
Var->name = "Variable";
Var->type = IO::VariableType::VolumeVariable;
Var->dim = 1;
Var->data.copy( data );
meshData[0].meshName = "grid";
meshData[0].mesh = std::make_shared<IO::DomainMesh>(rankInfo,n[0],n[1],n[2],N[0],N[1],N[2]);
meshData[0].vars.push_back(Var);
IO::writeData( 0, meshData, MPI_COMM_WORLD );
}
int main(int argc, char **argv)
{
// Initialize MPI
MPI_Init(&argc,&argv);
UnitTest ut;
// Run the tests
ASSERT( argc == 2 );
testReadMicroCT( argv[1], ut );
// Print the results
ut.report();
int N_errors = ut.NumFailGlobal();
// Close MPI
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return N_errors;
}