add dense case for crusher

This commit is contained in:
James E McClure 2022-02-09 11:37:18 -05:00
parent b843af8e5e
commit 7919de40f6
4 changed files with 150 additions and 0 deletions

View File

@ -0,0 +1,36 @@
#!/bin/bash
#SBATCH -A CSC380
#SBATCH -J MPI-multinode
#SBATCH -o %x-%j.out
#SBATCH -t 6:00:00
#SBATCH -p batch
#SBATCH -N 8
#SBATCH --exclusive
# MODULE ENVIRONMENT
module load PrgEnv-amd
module load rocm/4.5.0
module load cray-mpich
module load cray-hdf5-parallel
#module load craype-accel-amd-gfx908
## These must be set before compiling so the executable picks up GTL
export PE_MPICH_GTL_DIR_amd_gfx90a="-L${CRAY_MPICH_ROOTDIR}/gtl/lib"
export PE_MPICH_GTL_LIBS_amd_gfx90a="-lmpi_gtl_hsa"
export MPICH_GPU_SUPPORT_ENABLED=1
#export MPL_MBX_SIZE=1024000000
export LD_LIBRARY_PATH=${CRAY_LD_LIBRARY_PATH}:${LD_LIBRARY_PATH}
export LBPM_BIN=/ccs/proj/csc380/mcclurej/crusher/LBPM/tests
echo "Running Color LBM"
MYCPUBIND="--cpu-bind=verbose,map_cpu:57"
srun --verbose -N8 -n8 --cpus-per-gpu=8 --gpus-per-task=1 --gpu-bind=closest ${MYCPUBIND} $LBPM_BIN/TestCommD3Q19 multinode.db
#srun --verbose -N1 -n2 --mem-per-gpu=8g --cpus-per-gpu=1 --gpus-per-node=2 --gpu-bind=closest $LBPM_BIN/lbpm_permeability_simulator input.db
exit;

View File

@ -0,0 +1,36 @@
#!/bin/bash
#SBATCH -A CSC380
#SBATCH -J MPI-singlenode
#SBATCH -o %x-%j.out
#SBATCH -t 0:10:00
#SBATCH -p batch
#SBATCH -N 1
#SBATCH --exclusive
# MODULE ENVIRONMENT
module load PrgEnv-amd
module load rocm/4.5.0
module load cray-mpich
module load cray-hdf5-parallel
#module load craype-accel-amd-gfx908
## These must be set before compiling so the executable picks up GTL
export PE_MPICH_GTL_DIR_amd_gfx90a="-L${CRAY_MPICH_ROOTDIR}/gtl/lib"
export PE_MPICH_GTL_LIBS_amd_gfx90a="-lmpi_gtl_hsa"
export MPICH_GPU_SUPPORT_ENABLED=1
#export MPL_MBX_SIZE=1024000000
export LD_LIBRARY_PATH=${CRAY_LD_LIBRARY_PATH}:${LD_LIBRARY_PATH}
export LBPM_BIN=/ccs/proj/csc380/mcclurej/crusher/LBPM/tests
echo "Running Color LBM"
MYCPUBIND="--cpu-bind=verbose,map_cpu:57"
srun --verbose -N1 -n8 --cpus-per-gpu=8 --gpus-per-task=1 --gpu-bind=closest ${MYCPUBIND} $LBPM_BIN/TestCommD3Q19 multinode.db
#srun --verbose -N1 -n2 --mem-per-gpu=8g --cpus-per-gpu=1 --gpus-per-node=2 --gpu-bind=closest $LBPM_BIN/lbpm_permeability_simulator input.db
exit;

View File

@ -0,0 +1,9 @@
import numpy as np
N = 1024
data = np.random.randint(low=1,high=3,size=(N,N,N),dtype=np.uint8)
data.tofile("dense_1024x1024x1024.raw")

View File

@ -0,0 +1,69 @@
MRT {
timestepMax = 100
analysis_interval = 20000
tau = 0.7
F = 0, 0, 5.0e-5
Restart = false
din = 1.0
dout = 1.0
flux = 0.0
}
Color {
tauA = 0.7;
tauB = 0.7;
rhoA = 1.0;
rhoB = 1.0;
alpha = 1e-2;
beta = 0.95;
F = 0, 0, 0.0
Restart = false
flux = 0.0 // voxels per timestep
timestepMax = 10
// rescale_force_after_timestep = 100000
ComponentLabels = 0, -1, -2
ComponentAffinity = -1.0, -1.0, -0.9
// protocol = "image sequence"
// capillary_number = 1e-5
}
Domain {
Filename = "dense_1024x1024x1024.raw"
nproc = 2, 2, 2 // Number of processors (Npx,Npy,Npz)
offset = 0, 0, 0
n = 222, 222, 222 // Size of local domain (Nx,Ny,Nz)
N = 1024, 1024, 1024 // size of the input image
voxel_length = 1.0 // Length of domain (x,y,z)
BC = 0 // Boundary condition type
//Sw = 0.2
ReadType = "8bit"
ReadValues = 0, 1, 2, -1, -2
WriteValues = 0, 1, 2, -1, -2
ComponentLabels = 0, -1, -2
InletLayers = 0, 0, 5
OutletLayers = 0, 0, 5
}
Analysis {
visualization_interval = 1000000
//morph_interval = 100000
//morph_delta = -0.08
analysis_interval = 20000 // Frequency to perform analysis
min_steady_timesteps = 15000000
max_steady_timesteps = 15000000
restart_interval = 500000 // Frequency to write restart data
restart_file = "Restart" // Filename to use for restart file (will append rank)
N_threads = 0 // Number of threads to use
load_balance = "default" // Load balance method to use: "none", "default", "independent"
}
Visualization {
save_8bit_raw = true
write_silo = true
}
FlowAdaptor {
}