#!/bin/bash -x #SBATCH -J Test_case_C #SBATCH --account=prpb66 #SBATCH --nodes=1 #SBATCH --ntasks-per-node=6 #SBATCH --cpus-per-task=8 #SBATCH --time=00:30:00 #SBATCH --partition=batch #SBATCH --output=specfem_juwels_TestCaseC-%j.output #SBATCH --acctg-freq=task=1 set -e source ../env/env_juwels cd $install_dir/TestCaseC/specfem3d_globe export I_MPI_DOMAIN=auto export I_MPI_PIN_RESPECT_CPUSET=0 export I_MPI_DEBUG=4 #Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task export KMP_HW_SUBSET=1T export OMP_NUM_THREADS=8 #export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK} ulimit -s unlimited MESHER_EXE=./bin/xmeshfem3D SOLVER_EXE=./bin/xspecfem3D # backup files used for this simulation cp DATA/Par_file OUTPUT_FILES/ cp DATA/STATIONS OUTPUT_FILES/ cp DATA/CMTSOLUTION OUTPUT_FILES/ ## ## mesh generation ## sleep 2 echo echo `date` echo "starting MPI mesher" echo MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l` echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK echo "SLURM_NNODES=" $SLURM_NNODES echo "MPI_PROCESS $MPI_PROCESS" echo "OMP_NUM_THREADS=$OMP_NUM_THREADS" #time mpirun -n ${MPI_PROCESS} ${MESHER_EXE} time srun -n ${MPI_PROCESS} ${MESHER_EXE} echo " mesher done: `date`" echo ## ## forward simulation ## sleep 2 echo echo `date` echo starting run in current directory $PWD echo #time mpirun -n ${MPI_PROCESS} ${SOLVER_EXE} time srun -n ${MPI_PROCESS} ${SOLVER_EXE} echo "finished successfully" echo `date`