Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
#!/bin/bash
#SBATCH -J Test_case_A-cpu
#SBATCH --nodes=24
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=8
#SBATCH --time=00:30:00
#SBATCH --output specfem-cpu_TestCaseA-gcc-9-8-openMP-nomultithread-Ofast-znver2-OMP_PLACESCores-mpirun-withMCA-%j.output
#SBATCH -p cpu
#SBATCH --hint=nomultithread
#SBATCH --distribution=block:block
#set -e
source ../env/env_vega-cpu
grep "^[^#;]" ../env/env_vega-cpu
cat job_vega-cpu_test_case_A.slurm
echo "=========="
echo "config.log"
echo "=========="
cat $install_dir/TestCaseA/specfem3d_globe/config.log
echo "========"
echo "make.log"
echo "========"
cat $install_dir/TestCaseA/specfem3d_globe/make.log
cd $install_dir/TestCaseA/specfem3d_globe
#export SLURM_CPU_BIND=NONE
export OMPI_MCA_pml=ucx
export OMPI_MCA_btl="^uct,tcp,openib,vader" #self,vader,openib" # with ^ucx and ^tcp -> error occurred in MPI_Bcast
#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
#export KMP_HW_SUBSET=2T
export OMP_PLACES=cores #sockets
#export OMP_SCHEDULE=DYNAMIC
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
ulimit -s unlimited
MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D
echo $LD_LIBRARY_PATH
ldd /exa5/scratch/user/eucedricj//benchmarks/vega-cpu/specfem3d_globe/31octobre/cpu-znver2/TestCaseA/specfem3d_globe/bin/xspecfem3D
# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep 2
echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
echo "SLURM_NNODES=" $SLURM_NNODES
echo "MPI_PROCESS $MPI_PROCESS"
#time mpirun --display-devel-map -n ${MPI_PROCESS} ${MESHER_EXE}
time mpirun -n ${MPI_PROCESS} ${MESHER_EXE}
#time srun --mpi=pmix_v3 --cpu-bind=core -n ${MPI_PROCESS} ${MESHER_EXE} #
echo " mesher done: `date`"
echo
##
## forward simulation
##
sleep 2
echo
echo `date`
echo starting run in current directory $PWD
echo
#unset FORT_BUFFERED
#time mpirun --display-devel-map -n ${MPI_PROCESS} ${SOLVER_EXE}
time mpirun -n ${MPI_PROCESS} ${SOLVER_EXE}
#time srun --mpi=pmix_v3 --cpu-bind=core -n ${MPI_PROCESS} ${SOLVER_EXE}
echo "finished successfully"
echo `date`
ls -lrth $install_dir/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_mesher.txt
cat $install_dir/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_mesher.txt
ls -lrth $install_dir/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_solver.txt
cat $install_dir/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_solver.txt