Skip to content
job_juwels_test_case_B.slurm 1.41 KiB
Newer Older
#!/bin/bash -x
#SBATCH -J Test_case_B
#SBATCH --nodes=384
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=12
#SBATCH --time=00:30:00
#SBATCH --partition=batch
#SBATCH --output=specfem_%x_juwels-HT-%j.output
set -e
cat job_juwels_test_case_B.slurm 
source ../env/env_juwels
cd $install_dir/TestCaseB/specfem3d_globe
#export I_MPI_DEBUG=4
export I_MPI_PMI_VALUE_LENGTH_MAX=1800

#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
export KMP_HW_SUBSET=2T
export OMP_NUM_THREADS=24
export KMP_AFFINITY=granularity=thread,compact
export FORT_BUFFERED=true

MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D

# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/

##
## mesh generation
##
sleep 2

echo
echo `date`
echo "starting MPI mesher"
echo

MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l` 
echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
echo "SLURM_NNODES=" $SLURM_NNODES
echo "MPI_PROCESS $MPI_PROCESS"
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"

time srun -n ${MPI_PROCESS} ${MESHER_EXE}
echo "  mesher done: `date`"
echo

##
## forward simulation
##
sleep 2

echo
echo `date`
echo starting run in current directory $PWD
echo
time srun -n ${MPI_PROCESS}  ${SOLVER_EXE}

echo "finished successfully"
echo `date`