#!/bin/bash #SBATCH -J Test_case_A-gpu #SBATCH --nodes=24 #SBATCH --ntasks-per-node=4 #SBATCH --cpus-per-task=8 #SBATCH --time=00:30:00 #SBATCH --output specfem-gpu_TestCaseA-gcc-9-cuda-11-GPU-NoopenMP-NoMultithread-8cpus-distribBlock-znver2-%j.output #SBATCH -p gpu #SBATCH --gres=gpu:4 #SBATCH --hint=nomultithread #SBATCH --distribution=block:block #set -e source ../env/env_vega-gpu grep "^[^#;]" ../env/env_vega-gpu cat job_vega-gpu_test_case_A.slurm echo "==========" echo "config.log" echo "==========" cat $install_dir/TestCaseA/specfem3d_globe/config.log echo "========" echo "make.log" echo "========" cat $install_dir/TestCaseA/specfem3d_globe/make.log cd $install_dir/TestCaseA/specfem3d_globe grep GPU DATA/Par_file export OMPI_MCA_pml=ucx export OMPI_MCA_btl="^uct,tcp,openib,vader" #self,vader,openib" # with ^ucx and ^tcp -> error occurred in MPI_Bcast export CUDA_VISIBLE_DEVICES=0,1,2,3 #export OMP_NUM_THREADS=2 ulimit -s unlimited MESHER_EXE=./bin/xmeshfem3D SOLVER_EXE=./bin/xspecfem3D # backup files used for this simulation cp DATA/Par_file OUTPUT_FILES/ cp DATA/STATIONS OUTPUT_FILES/ cp DATA/CMTSOLUTION OUTPUT_FILES/ ## ## mesh generation ## sleep 2 echo echo `date` echo "starting MPI mesher" echo MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l` echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK echo "SLURM_NNODES=" $SLURM_NNODES echo "MPI_PROCESS $MPI_PROCESS" time mpirun -n ${MPI_PROCESS} ${MESHER_EXE} echo " mesher done: `date`" echo ## ## forward simulation ## sleep 2 echo echo `date` echo starting run in current directory $PWD echo #unset FORT_BUFFERED time mpirun -n ${MPI_PROCESS} ${SOLVER_EXE} echo "finished successfully" echo "=====================" echo `date` ls -lrth $install_dir/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_*.txt cat $install_dir/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_mesher.txt cat $install_dir/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_solver.txt