Skip to content
job_frioul_test_case_A.slurm 1.37 KiB
Newer Older
#!/bin/bash
#SBATCH -J specfem3D_globe-KNL
#SBATCH --nodes=24
#SBATCH --ntasks=96
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=17
#SBATCH --time 04:59:00
#SBATCH -C quad,cache
#SBATCH --exclusive
#SBATCH --mem=64GB
#SBATCH --output=specfem3D_test_case_A_frioul_module-%j.output

Cedric Jourdain's avatar
Cedric Jourdain committed
source ../env/env_frioul
cd $install_dir/TestCaseA/specfem3d_globe

export I_MPI_PIN_RESPECT_CPUSET=0
export MLX5_SINGLE_THREADED=0

#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
export KMP_HW_SUBSET=1T
export OMP_NUM_THREADS=17
export KMP_AFFINITY=compact
#export KMP_AFFINITY=compact,verbose

ulimit -s 512000

MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D

# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/

##
## mesh generation
##
sleep 2

echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=$SLURM_NTASKS
echo "SLURM_NTASKS= " $SLURM_NTASKS
echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
echo "SLURM_NNODES=" $SLURM_NNODES
echo "MPI_PROCESS $MPI_PROCESS"

#time srun -n ${MPI_PROCESS} ${MESHER_EXE}
echo "  mesher done: `date`"
echo

##
## forward simulation
##
sleep 2

echo
echo `date`
echo starting run in current directory $PWD
echo
time srun -n ${MPI_PROCESS}  ${SOLVER_EXE}

echo "finished successfully"
echo `date`