Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
UEABS
ueabs
Commits
c0ed75d9
Commit
c0ed75d9
authored
Oct 19, 2021
by
Cedric Jourdain
🐵
Browse files
Merge branch 'r2.2-dev' of
https://repository.prace-ri.eu/git/UEABS/ueabs
into r2.2-dev
parents
23b23375
45190c63
Changes
5
Hide whitespace changes
Inline
Side-by-side
specfem3d/compile.sh
View file @
c0ed75d9
...
...
@@ -7,6 +7,7 @@ echo " - daint-gpu "
echo
" - daint-cpu-only "
echo
" - davide "
echo
" - juwels"
echo
" - juwels_2018"
echo
" - irene-skl "
echo
" - irene-knl "
echo
" - dibona "
...
...
@@ -29,14 +30,10 @@ Untar(){
if
[[
$code
!=
"0"
]]
;
then
echo
"Git clone failed, try a hard copy:"
Copy
break
1
else
cd
specfem3d_globe/
# Checkout of 31 ocotbre 2017 version
git checkout b1d6ba966496f269611eff8c2cf1f22bcdac2bd9
# Checkout v7.0.2, last version : unstable : tested on differents architectures and all simulations failed
#git checkout v7.0.2
git checkout b1d6ba966496f269611eff8c2cf1f22bcdac2bd9
# Checkout of 31 ocotbre 2017 version
#git checkout v7.0.2 # Checkout v7.0.2, last version : unstable : tested on differents architectures and all simulations failed
fi
cd
$ueabs_dir
}
...
...
specfem3d/env/env_juwels
View file @
c0ed75d9
#!/bin/bash
module purge
module load Intel/2019.0.117-GCC-7.3.0 ParaStationMPI/5.2.1-1
#IntelMPI/2019.0.117
module
--force
purge
module use
$OTHERSTAGES
module load Stages/Devel-2019a
module load Intel/2019.5.281-GCC-8.3.0 IntelMPI/2019.7.217
export
machine
=
juwels
export
software
=
specfem3d_globe
export
version
=
31octobre
#export install_dir=$HOME/benchmarks/$machine/$software/$version/
export
install_dir
=
$SCRATCH_cprpb66
/benchmarks/
$machine
/
$software
/
$version
/
export
CC
=
"mpicc"
export
FC
=
"mpifort"
#export CC="mpiicc"
#export FC="mpiifort"
export
install_dir
=
$SCRATCH_prpb85
/benchmarks/
$machine
/
$software
/
$version
/
export
CC
=
"mpiicc"
export
FC
=
"mpiifort"
export
MPIFC
=
$FC
#export FCFLAGS=" -g -O3 -qopenmp -xhost -DUSE_FP32 -DOPT_STREAMS -fp-model fast=2 -traceback -mcmodel=large"
#export CFLAGS=" -g -O3 -xhost "
export
FCFLAGS
=
" -g -O3 -qopenmp -xCORE-AVX512 -mtune=skylake -ipo -DUSE_FP32 -DOPT_STREAMS -fp-model fast=2 -traceback -mcmodel=large"
export
CFLAGS
=
" -g -O3 -xCORE-AVX512 -mtune=skylake -ipo"
export
FCFLAGS
=
" -O3 -qopenmp -xCORE-AVX512 -mtune=skylake -ipo -no-prec-div -no-prec-sqrt -fma -qopt-zmm-usage=high -DUSE_FP32 -DOPT_STREAMS -fp-model fast=2 -traceback -mcmodel=large"
export
CFLAGS
=
" -O3 -qopenmp -xCORE-AVX512 -mtune=skylake -ipo -no-prec-div -no-prec-sqrt -fma -qopt-zmm-usage=high"
specfem3d/job_script/job_juwels_small_benchmark_run_to_test_more_complex_Earth.slurm
0 → 100644
View file @
c0ed75d9
#!/bin/bash
#SBATCH -J Validation_case_specfem-small_benchmark_run_to_test_more_complex_Earth
#SBATCH --account=prpb85
#SBATCH --partition=batch
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=24
#SBATCH --time=02:59:59
#SBATCH --output Validation_case_specfem_small_benchmark_run_to_test_more_complex_Earth-%j.out
##SBATCH --acctg-freq=task=1
#set -e
source
../env/env_juwels
echo
"Environment used:"
echo
"================="
cat
../env/env_juwels
cd
$install_dir
/specfem3d_globe/EXAMPLES/small_benchmark_run_to_test_more_complex_Earth
export
SLURM_CPU_BIND
=
NONE
export
I_MPI_PIN
=
1
#export I_MPI_PIN_PROCESSOR_LIST=0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23
alias
mpirun
=
'srun'
time
./run_this_example.sh
echo
echo
"running seismogram comparisons:"
echo
cd
$install_dir
/specfem3d_globe/
# uncompress seismograms
if
[
-e
EXAMPLES/small_benchmark_run_to_test_more_complex_Earth/OUTPUT_FILES_reference_OK/II.AAK.MXE.sem.ascii.bz2
]
;
then
echo
echo
"unzipping references..."
echo
mkdir
OUTPUT_FILES_reference_OK/
bunzip2 EXAMPLES/small_benchmark_run_to_test_more_complex_Earth/OUTPUT_FILES_reference_OK/
*
.bz2
echo
echo
fi
module purge
export
PATH
=
$PATH
:/p/software/juwels/stages/2018a/software/Python/2.7.14-GCCcore-7.3.0/bin
pip
install
--user
numpy
#module load intel/17.0 python/2.7.13
# compares seismograms by plotting correlations
./utils/compare_seismogram_correlations.py EXAMPLES/small_benchmark_run_to_test_more_complex_Earth/OUTPUT_FILES/ EXAMPLES/small_benchmark_run_to_test_more_complex_Earth/OUTPUT_FILES_reference_OK/
echo
echo
"done"
echo
specfem3d/job_script/job_juwels_test_case_A.slurm
View file @
c0ed75d9
#!/bin/bash -x
#SBATCH -J Test_case_A
#SBATCH --account=prpb
66
#SBATCH --account=prpb
85
#SBATCH --nodes=24
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=12
#SBATCH --time=0
0:30:00
#SBATCH --time=0
1:59:59
#SBATCH --partition=batch
#SBATCH --output=specfem_
small
_juwels
_96MPI_12OMP_srun_AVX512_mtune_skl_ParaStationMPI
-%j.output
#SBATCH --acctg-freq=task=1
#SBATCH --output=specfem_
%x
_juwels-%j.output
#
#SBATCH --acctg-freq=task=1
set
-e
source
../env/env_juwels
cat
../env/env_juwels
cat
job_juwels_test_case_A.slurm
cd
$install_dir
/TestCaseA/specfem3d_globe
export
I_MPI_DOMAIN
=
auto
export
I_MPI_PIN_RESPECT_CPUSET
=
0
export
I_MPI_DEBUG
=
4
#export I_MPI_DEBUG=5
export
I_MPI_PMI_VALUE_LENGTH_MAX
=
1800
#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
export
KMP_HW_SUBSET
=
1T
export
OMP_NUM_THREADS
=
12
#export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
export
OMP_NUM_THREADS
=
${
SLURM_CPUS_PER_TASK
}
export
KMP_AFFINITY
=
granularity
=
core,compact
export
FORT_BUFFERED
=
true
ulimit
-s
unlimited
...
...
@@ -49,7 +50,6 @@ echo "SLURM_NNODES=" $SLURM_NNODES
echo
"MPI_PROCESS
$MPI_PROCESS
"
echo
"OMP_NUM_THREADS=
$OMP_NUM_THREADS
"
#time mpirun -n ${MPI_PROCESS} ${MESHER_EXE}
time
srun
-n
${
MPI_PROCESS
}
${
MESHER_EXE
}
echo
" mesher done:
`
date
`
"
echo
...
...
@@ -63,9 +63,7 @@ echo
echo
`
date
`
echo
starting run
in
current directory
$PWD
echo
#time mpirun -n ${MPI_PROCESS} ${SOLVER_EXE}
time
srun
-n
${
MPI_PROCESS
}
${
SOLVER_EXE
}
echo
"finished successfully"
echo
`
date
`
specfem3d/job_script/job_juwels_test_case_B.slurm
View file @
c0ed75d9
#!/bin/bash -x
#SBATCH -J Test_case_B
#SBATCH --account=prpb
66
#SBATCH --account=prpb
85
#SBATCH --nodes=384
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=
6
#SBATCH --cpus-per-task=
12
#SBATCH --time=00:30:00
#SBATCH --partition=batch
#SBATCH --output=specfem_
TestCaseB_juwels_12OMP-HT-ParaStationMPI
-%j.output
#SBATCH --output=specfem_
%x_juwels-HT
-%j.output
set
-e
cat
job_juwels_test_case_B.slurm
source
../env/env_juwels
cd
$install_dir
/TestCaseB/specfem3d_globe
#export I_MPI_DOMAIN=auto
#export I_MPI_PIN_RESPECT_CPUSET=0
#export I_MPI_DEBUG=4
export
I_MPI_PMI_VALUE_LENGTH_MAX
=
1800
#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
export
KMP_HW_SUBSET
=
2T
export
OMP_NUM_THREADS
=
12
#export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
export
OMP_NUM_THREADS
=
24
export
KMP_AFFINITY
=
granularity
=
thread,compact
export
FORT_BUFFERED
=
true
#
ulimit -s unlimited
ulimit
-s
unlimited
MESHER_EXE
=
./bin/xmeshfem3D
SOLVER_EXE
=
./bin/xspecfem3D
...
...
@@ -48,7 +47,6 @@ echo "SLURM_NNODES=" $SLURM_NNODES
echo
"MPI_PROCESS
$MPI_PROCESS
"
echo
"OMP_NUM_THREADS=
$OMP_NUM_THREADS
"
#time mpirun -n ${MPI_PROCESS} ${MESHER_EXE}
time
srun
-n
${
MPI_PROCESS
}
${
MESHER_EXE
}
echo
" mesher done:
`
date
`
"
echo
...
...
@@ -62,10 +60,7 @@ echo
echo
`
date
`
echo
starting run
in
current directory
$PWD
echo
#unset FORT_BUFFERED
#time mpirun -n ${MPI_PROCESS} ${SOLVER_EXE}
time
srun
-n
${
MPI_PROCESS
}
${
SOLVER_EXE
}
echo
"finished successfully"
echo
`
date
`
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment