Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
UEABS
ueabs
Commits
980e1ed9
Commit
980e1ed9
authored
Nov 25, 2021
by
Cedric Jourdain
🐵
Browse files
Add test case A job script supermuc-NG
parent
9751a9a1
Changes
1
Hide whitespace changes
Inline
Side-by-side
specfem3d/job_script/job_supermuc-ng_test_case_A.slurm
0 → 100644
View file @
980e1ed9
#!/bin/bash
#SBATCH -J Test_case_A
#SBATCH --nodes=12
#SBATCH --ntasks-per-node=8
#SBATCH --cpus-per-task=6
#SBATCH --time=01:00:00
#SBATCH --no-requeue
#SBATCH --account=pn73ve
#SBATCH --partition=micro #general # insert test, micro, general, large or fat
#SBATCH -o ./%x-12Nodes-6OMP-%j.out
#set -e
source
../env/env_supermuc-ng
cat
../env/env_supermuc-ng
grep
"^[^#;]"
../env/env_supermuc-ng
cat
job_supermuc-ng_test_case_A.slurm
echo
"=========="
echo
"config.log"
echo
"=========="
cat
$install_dir
/TestCaseA/specfem3d_globe/config.log
echo
"========"
echo
"make.log"
echo
"========"
cat
$install_dir
/TestCaseA/specfem3d_globe/make.log
echo
"========"
echo
"Par_file"
echo
"========"
cat
$install_dir
/TestCaseA/specfem3d_globe/DATA/Par_file
module load slurm_setup
cd
$install_dir
/TestCaseA/specfem3d_globe
#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
export
KMP_HW_SUBSET
=
1T
export
OMP_NUM_THREADS
=
${
SLURM_CPUS_PER_TASK
}
#export KMP_AFFINITY=granularity=core,compact
export
FORT_BUFFERED
=
true
export
FORT_BLOCKSIZE
=
16777216
ulimit
-s
unlimited
export
LIBRARY_PATH
=
$LD_LIBRARY_PATH
echo
"LD_LIBRARY_PATH =
$LD_LIBRARY_PATH
"
MESHER_EXE
=
./bin/xmeshfem3D
SOLVER_EXE
=
./bin/xspecfem3D
# backup files used for this simulation
cp
DATA/Par_file OUTPUT_FILES/
cp
DATA/STATIONS OUTPUT_FILES/
cp
DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep
2
echo
echo
`
date
`
echo
"starting MPI mesher"
echo
MPI_PROCESS
=
`
echo
"
$SLURM_NNODES
*
$SLURM_NTASKS_PER_NODE
"
| bc
-l
`
echo
"SLURM_NTASKS_PER_NODE = "
$SLURM_NTASKS_PER_NODE
echo
"SLURM_CPUS_PER_TASKS = "
$SLURM_CPUS_PER_TASK
echo
"SLURM_NNODES="
$SLURM_NNODES
echo
"MPI_PROCESS
$MPI_PROCESS
"
echo
"OMP_NUM_THREADS=
$OMP_NUM_THREADS
"
time
mpiexec
-np
${
MPI_PROCESS
}
${
MESHER_EXE
}
echo
" mesher done:
`
date
`
"
echo
##
## forward simulation
##
sleep
2
echo
echo
`
date
`
echo
starting run
in
current directory
$PWD
echo
time
mpiexec
-np
${
MPI_PROCESS
}
${
SOLVER_EXE
}
echo
"finished successfully"
echo
"====================="
echo
`
date
`
ls
-lrth
$install_dir
/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_
*
.txt
cat
$install_dir
/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_mesher.txt
cat
$install_dir
/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_solver.txt
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment