Skip to content
job_supermuc-ng_test_case_B.slurm 1.59 KiB
Newer Older
#!/bin/bash
#SBATCH -J Test_case_B
#SBATCH --nodes=384
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=12
#SBATCH --time=00:29:59
#SBATCH --no-requeue
#SBATCH --account=pn68go
#SBATCH --partition=general # insert test, micro, general, large or fat
#SBATCH -o ./%x-12OMP.%j.out
#set -e
source ../env/env_supermuc-ng
cat ../env/env_supermuc-ng
cat job_supermuc-ng_test_case_A.slurm
module load slurm_setup

cd $install_dir/TestCaseB/specfem3d_globe

#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
export KMP_HW_SUBSET=1T
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
#export KMP_AFFINITY=granularity=core,compact
export FORT_BUFFERED=true
export FORT_BLOCKSIZE=16777216

ulimit -s unlimited

export LIBRARY_PATH=$LD_LIBRARY_PATH
echo "LD_LIBRARY_PATH = $LD_LIBRARY_PATH"

MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D

# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/

##
## mesh generation
##
sleep 2

echo
echo `date`
echo "starting MPI mesher"
echo

MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
echo "SLURM_NNODES=" $SLURM_NNODES
echo "MPI_PROCESS $MPI_PROCESS"
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"

time mpiexec -np ${MPI_PROCESS} ${MESHER_EXE}
echo "  mesher done: `date`"
echo

##
## forward simulation
##
sleep 2

echo
echo `date`
echo starting run in current directory $PWD
echo
time mpiexec -np ${MPI_PROCESS}  ${SOLVER_EXE}

echo "finished successfully"
echo `date`