Commit 1a0555cc authored by Cedric Jourdain's avatar Cedric Jourdain 🐵
Browse files

add juwels booster system

parent c0ed75d9
#!/bin/bash
#module --force purge
#module use $OTHERSTAGES
#module load Stages/Devel-2019a
#module load Intel/2019.5.281-GCC-8.3.0 IntelMPI/2019.7.217
module load Intel/2021.2.0-GCC-10.3.0 ParaStationMPI/5.4.10-1 CUDA/11.3
export machine=juwels-booster
export software=specfem3d_globe
export version=31octobre
export NEX_XI=384 #128 #448 #416 #352 #320 #288 #256 #224 #192 #160 #128 #96 #288
export install_dir=$SCRATCH_prpb85/benchmarks/$machine/$software/$version/$NEX_XI
export CC="mpicc"
export FC="mpifort"
export MPIFC=$FC
export FCFLAGS=" -O3 -qopenmp -march=core-avx2 -mtune=core-avx2 -ipo -no-prec-div -no-prec-sqrt -fma -DUSE_FP32 -DOPT_STREAMS -fp-model fast=2 -mcmodel=large"
export CFLAGS=" -O3 -qopenmp -march=core-avx2 -mtune=core-avx2 -ipo -no-prec-div -no-prec-sqrt -fma "
#!/bin/bash
#SBATCH -J specfem_gpu_small_benchmark_run_to_test_more_complex_Earth
#SBATCH --account=prpb85
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=24
#SBATCH --cpus-per-task=2
#SBATCH --time=01:59:59
#SBATCH --output specfem_gpu_small_benchmark_run_to_test_more_complex_Earth-%j.out
#SBATCH --exclusive
#SBATCH -p booster
#SBATCH --gres=gpu:4
#set -e
source ../env/env_juwels-booster
grep "^[^#;]" ../env/env_juwels-booster
cat job_juwels-booster_small_benchmark_run_to_test_more_complex_Earth.slurm
cd $install_dir/specfem3d_globe/EXAMPLES/small_benchmark_run_to_test_more_complex_Earth
export OMPI_MCA_pml=ucx
export OMPI_MCA_btl="^uct,tcp,openib,vader"
export CUDA_VISIBLE_DEVICES=0,1,2,3
#export OMP_NUM_THREADS=1
sed -i s/"GPU_MODE = .false."/"GPU_MODE = .true."/g $install_dir/specfem3d_globe/EXAMPLES/small_benchmark_run_to_test_more_complex_Earth/DATA/Par_file
sed -i s/"GPU_DEVICE = Tesla"/"GPU_DEVICE = *"/g $install_dir/specfem3d_globe/EXAMPLES/small_benchmark_run_to_test_more_complex_Earth/DATA/Par_file
sed -i s/"configure --enable-openmp"/"configure --build=ppc64 --with-cuda=cuda8 "/g $install_dir/specfem3d_globe/EXAMPLES/small_benchmark_run_to_test_more_complex_Earth/run_this_example.sh
sed -i s/"mpirun -np"/"srun -n"/g $install_dir/specfem3d_globe/EXAMPLES/small_benchmark_run_to_test_more_complex_Earth/run_mesher_solver.bash
#taskset -a -p $PPID
time ./run_this_example.sh
echo "=========="
echo "config.log"
echo "=========="
cat $install_dir/specfem3d_globe/config.log
echo "========"
echo "make.log"
echo "========"
cat $install_dir/specfem3d_globe/make.log
echo
echo "running seismogram comparisons:"
echo
cd $install_dir/specfem3d_globe/
# uncompress seismograms
if [ -e EXAMPLES/small_benchmark_run_to_test_more_complex_Earth/OUTPUT_FILES_reference_OK/II.AAK.MXE.sem.ascii.bz2 ]; then
echo
echo "unzipping references..."
echo
mkdir OUTPUT_FILES_reference_OK/
bunzip2 EXAMPLES/small_benchmark_run_to_test_more_complex_Earth/OUTPUT_FILES_reference_OK/*.bz2
echo
echo
fi
#wget https://repo.anaconda.com/miniconda/Miniconda3-py37_4.10.3-Linux-x86_64.sh
#sh /ceph/hpc/home/eucedricj/Miniconda3-py37_4.10.3-Linux-x86_64.sh
#source miniconda3/bin/activate
#conda create --name python2 python=2.7
# compares seismograms by plotting correlations
./utils/compare_seismogram_correlations.py EXAMPLES/small_benchmark_run_to_test_more_complex_Earth/OUTPUT_FILES/ EXAMPLES/small_benchmark_run_to_test_more_complex_Earth/OUTPUT_FILES_reference_OK/
echo
echo "done"
ls -lrth $install_dir/specfem3d_globe/EXAMPLES/small_benchmark_run_to_test_more_complex_Earth/OUTPUT_FILES/output_*.txt
cat $install_dir/specfem3d_globe/EXAMPLES/small_benchmark_run_to_test_more_complex_Earth/OUTPUT_FILES/output_mesher.txt
cat $install_dir/specfem3d_globe/EXAMPLES/small_benchmark_run_to_test_more_complex_Earth/OUTPUT_FILES/output_solver.txt
#!/bin/bash -x
#SBATCH -J Test_case_A-gpu
#SBATCH --account=prpb85
#SBATCH --nodes=24
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=12
#SBATCH --time=00:29:59
#SBATCH --partition=booster
#SBATCH --output=specfem_%x_juwels-booster-%j.output
#SBATCH --gres=gpu:4
##SBATCH --acctg-freq=task=1
set -e
source ../env/env_juwels-booster
grep "^[^#;]" ../env/env_juwels-booster
cat job_juwels-booster_test_case_A.slurm
echo "=========="
echo "config.log"
echo "=========="
cat $install_dir/TestCaseA/specfem3d_globe/config.log
echo "========"
echo "make.log"
echo "========"
cat $install_dir/TestCaseA/specfem3d_globe/make.log
cd $install_dir/TestCaseA/specfem3d_globe
export I_MPI_PMI_VALUE_LENGTH_MAX=1800
#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
export KMP_HW_SUBSET=1T
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
export KMP_AFFINITY=granularity=core,compact
export FORT_BUFFERED=true
ulimit -s unlimited
MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D
# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep 2
echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
echo "SLURM_NNODES=" $SLURM_NNODES
echo "MPI_PROCESS $MPI_PROCESS"
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
time srun -n ${MPI_PROCESS} ${MESHER_EXE}
echo " mesher done: `date`"
echo
##
## forward simulation
##
sleep 2
echo
echo `date`
echo starting run in current directory $PWD
echo
time srun -n ${MPI_PROCESS} ${SOLVER_EXE}
echo "====================="
echo `date`
ls -lrth $install_dir/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_*.txt
cat $install_dir/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_mesher.txt
cat $install_dir/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_solver.txt
#!/bin/bash -x
#SBATCH -J Test_case_A-gpu
#SBATCH --account=prpb85
#SBATCH --nodes=48
#SBATCH --ntasks-per-node=2
#SBATCH --cpus-per-task=32
#SBATCH --time=00:29:59
#SBATCH --partition=booster
#SBATCH --output=specfem_%x_juwels-booster-strong-48Nodes-%j.output
#SBATCH --gres=gpu:4
##SBATCH --acctg-freq=task=1
set -e
source ../env/env_juwels-booster
grep "^[^#;]" ../env/env_juwels-booster
cat job_juwels-booster_test_case_A_strong.slurm
echo "=========="
echo "config.log"
echo "=========="
cat $install_dir/TestCaseA/specfem3d_globe/config.log
echo "========"
echo "make.log"
echo "========"
cat $install_dir/TestCaseA/specfem3d_globe/make.log
cd $install_dir/TestCaseA/specfem3d_globe
export I_MPI_PMI_VALUE_LENGTH_MAX=1800
#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
export KMP_HW_SUBSET=1T
#export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
export KMP_AFFINITY=granularity=core,compact
export FORT_BUFFERED=true
ulimit -s unlimited
MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D
# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep 2
echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
echo "SLURM_NNODES=" $SLURM_NNODES
echo "MPI_PROCESS $MPI_PROCESS"
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
time srun -n ${MPI_PROCESS} ${MESHER_EXE}
echo " mesher done: `date`"
echo
##
## forward simulation
##
sleep 2
echo
echo `date`
echo starting run in current directory $PWD
echo
time srun -n ${MPI_PROCESS} ${SOLVER_EXE}
echo "====================="
echo `date`
ls -lrth $install_dir/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_*.txt
cat $install_dir/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_mesher.txt
cat $install_dir/TestCaseA/specfem3d_globe/OUTPUT_FILES/output_solver.txt
#!/bin/bash -x
#SBATCH -J Test_case_B-gpu
#SBATCH --account=prpb85
#SBATCH --nodes=24
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=12
#SBATCH --time=00:29:59
#SBATCH --partition=booster
#SBATCH --output=specfem_%x_juwels-booster-%j.output
#SBATCH --gres=gpu:4
##SBATCH --acctg-freq=task=1
set -e
source ../env/env_juwels-booster
grep "^[^#;]" ../env/env_juwels-booster
cat job_juwels-booster_test_case_B.slurm
echo "=========="
echo "config.log"
echo "=========="
cat $install_dir/TestCaseB/specfem3d_globe/config.log
echo "========"
echo "make.log"
echo "========"
cat $install_dir/TestCaseB/specfem3d_globe/make.log
cd $install_dir/TestCaseB/specfem3d_globe
export I_MPI_PMI_VALUE_LENGTH_MAX=1800
#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
export KMP_HW_SUBSET=1T
export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK}
export KMP_AFFINITY=granularity=core,compact
export FORT_BUFFERED=true
ulimit -s unlimited
MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D
# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep 2
echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
echo "SLURM_NNODES=" $SLURM_NNODES
echo "MPI_PROCESS $MPI_PROCESS"
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
time srun -n ${MPI_PROCESS} ${MESHER_EXE}
echo " mesher done: `date`"
echo
##
## forward simulation
##
sleep 2
echo
echo `date`
echo starting run in current directory $PWD
echo
time srun -n ${MPI_PROCESS} ${SOLVER_EXE}
echo "====================="
echo `date`
ls -lrth $install_dir/TestCaseB/specfem3d_globe/OUTPUT_FILES/output_*.txt
cat $install_dir/TestCaseB/specfem3d_globe/OUTPUT_FILES/output_mesher.txt
cat $install_dir/TestCaseB/specfem3d_globe/OUTPUT_FILES/output_solver.txt
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment