Skip to content
job_davide_test_case_C.slurm 1.33 KiB
Newer Older
#!/bin/bash
#SBATCH -J Test_case_A
#SBATCH --time=01:30:00
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=6
##SBATCH --ntasks-per-core=1
#SBATCH --cpus-per-task=2
#SBATCH --partition=dvd_usr_prod
##SBATCH --qos=noQOS
#SBATCH --mem=86000
#SBATCH --out=Test_case_C_davide-%j.out 
#SBATCH --err=Test_case_C_davide-%j.err 
#SBATCH --account=Dec00_5IPwp7
#SBATCH --gres=gpu:4 # (N=1,4)

set -e
source ../env/env_davide

export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
export OMP_PLACES=threads
export OMP_PROC_BIND=true

cd $install_dir/TestCaseC/specfem3d_globe

#ulimit -s unlimited

MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D

# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/

##
## mesh generation
##
sleep 2

echo
echo `date`
echo "starting MPI mesher"
echo

MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l` 
echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
echo "SLURM_NNODES=" $SLURM_NNODES
echo "MPI_PROCESS $MPI_PROCESS"

time srun -n ${MPI_PROCESS} ${MESHER_EXE}
echo "  mesher done: `date`"
echo

##
## forward simulation
##
sleep 2

echo
echo `date`
echo starting run in current directory $PWD
echo
time srun -n ${MPI_PROCESS} ${SOLVER_EXE}

echo "finished successfully"
echo `date`