From 38c0a9429d659682999a415fe0d9bac3f6e25447 Mon Sep 17 00:00:00 2001
From: Cedric Jourdain <jourdain@cines.fr>
Date: Thu, 25 Nov 2021 15:35:43 +0000
Subject: [PATCH] Add test case B for marconi100

---
 .../job_marconi100_test_case_B.slurm          | 81 +++++++++++++++++++
 1 file changed, 81 insertions(+)
 create mode 100644 specfem3d/job_script/job_marconi100_test_case_B.slurm

diff --git a/specfem3d/job_script/job_marconi100_test_case_B.slurm b/specfem3d/job_script/job_marconi100_test_case_B.slurm
new file mode 100644
index 0000000..f27d168
--- /dev/null
+++ b/specfem3d/job_script/job_marconi100_test_case_B.slurm
@@ -0,0 +1,81 @@
+#!/bin/bash
+#SBATCH -J Test_case_B
+#SBATCH -A Ppp4x_5850
+#SBATCH -p m100_usr_prod
+#SBATCH --time 01:59:00
+#SBATCH --nodes=384
+#SBATCH --ntasks-per-node=4
+#SBATCH --cpus-per-task=8
+#SBATCH --output=specfem3D_%x_marconi100-xl-spectrumpi-GPU-%j.output
+#SBATCH --gres=gpu:4
+#SBATCH --gpus-per-node=4
+#SBATCH --hint=nomultithread
+#SBATCH --exclusive
+
+source ../env/env_marconi100
+echo "Environment used:"
+echo "================="
+grep -E -v '^(#|$)' ../env/env_marconi100
+cat job_marconi100_test_case_B.slurm
+cd $install_dir/TestCaseB/specfem3d_globe
+grep GPU DATA/Par_file
+export CUDA_VISIBLE_DEVICES=0,1,2,3
+
+ulimit -s unlimited
+MESHER_EXE=./bin/xmeshfem3D
+SOLVER_EXE=./bin/xspecfem3D
+
+echo "=========="
+echo "config.log"
+echo "=========="
+cat $install_dir/TestCaseB/specfem3d_globe/config.log
+echo "========"
+echo "make.log"
+echo "========"
+cat $install_dir/TestCaseB/specfem3d_globe/make.log
+
+# backup files used for this simulation
+cp DATA/Par_file OUTPUT_FILES/
+cp DATA/STATIONS OUTPUT_FILES/
+cp DATA/CMTSOLUTION OUTPUT_FILES/
+
+##
+## mesh generation
+##
+sleep 2
+
+echo
+echo `date`
+echo "starting MPI mesher"
+echo
+
+MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
+echo "SLURM_NTASKS= " $SLURM_NTASKS
+echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
+echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
+echo "SLURM_NNODES=" $SLURM_NNODES
+echo "MPI_PROCESS $MPI_PROCESS"
+
+time mpirun -gpu -np ${MPI_PROCESS} ${MESHER_EXE}
+# set the value of --ntasks-per-node to the number of MPI processes you want to run per node, and --cpus-per-task = OMP_NUM_THREADS (if you want to exploit the SMT in terms of number of OMP threads) or to 128 / (ntasks-per-node) (if you want to exploit the SMT in terms of number of MPI processes).
+echo "  mesher done: `date`"
+echo
+
+##
+## forward simulation
+##
+sleep 2
+
+echo
+echo `date`
+echo starting run in current directory $PWD
+echo
+time mpirun -gpu -np ${MPI_PROCESS}  ${SOLVER_EXE}
+
+ls -lrth  $install_dir/TestCaseB/specfem3d_globe/OUTPUT_FILES/output_*.txt
+cat $install_dir/TestCaseB/specfem3d_globe/OUTPUT_FILES/output_solver.txt
+echo "========"
+cat $install_dir/TestCaseB/specfem3d_globe/OUTPUT_FILES/output_mesher.txt
+
+echo "finished successfully"
+echo `date`
-- 
GitLab