Commit 5eb28dd4 authored by Cedric Jourdain's avatar Cedric Jourdain
Browse files

Add script part for davide+merge

parents c79ab795 4f9ca4a2
......@@ -5,7 +5,7 @@ echo " - occigen"
echo " - marenostrum "
echo " - marconi-knl "
echo " - daint-gpu "
echo " - daint-mc "
echo " - daint-cpu-only "
echo " - davide "
read machine
......@@ -16,8 +16,8 @@ Untar(){
#Clone the repository
cd $install_dir
git -c http.sslVerify=false clone https://github.com/geodynamics/specfem3d_globe.git
# Checkout of 31 ocotbre 2017 version
cd specfem3d_globe/
# Checkout of 31 ocotbre 2017 version
git checkout b1d6ba966496f269611eff8c2cf1f22bcdac2bd9
#Copy sources
......@@ -94,7 +94,7 @@ Clean(){
Deploy(){
echo "install_dir ="$install_dir
if [ $machine = "occigen" ] || [ $machine = "marenostrum" ] || [ $machine = "marconi-knl" ] || [ $machine = "daint-mc" ] || [ $machine = "daint-gpu" ] || [ $machine = "davide" ];then
if [ $machine = "occigen" ] || [ $machine = "marenostrum" ] || [ $machine = "marconi-knl" ] || [ $machine = "daint-cpu-only" ] || [ $machine = "daint-gpu" ] || [ $machine = "davide" ];then
echo "==> Install on $machine :"
#Clean
mkdir -p $install_dir
......
#!/bin/bash
#module purge
module load daint-gpu
module load PrgEnv-cray
export software=specfem3d_globe
export version=31octobre
export install_dir=$SCRATCH/benchmarks/daint-gpu/$software/$version/cpu-only
export CC="cc"
export FC="ftn"
export MPIFC=$FC
export FCFLAGS="-g -O3 -h omp -h thread3 -h vector3 -h fp3 -h autothread"
export CFLAGS="-g -O3 -h omp -h thread3 -h vector3 -h fp3 -h autothread"
#!/bin/bash
#module purge
module load daint-gpu
module load craype-accel-nvidia60
module load PrgEnv-cray
export machine=daint-gpu
export software=specfem3d_globe
export version=31octobre
export install_dir=$SCRATCH/benchmarks/$machine/$software/$version/gpu
export CC="cc"
export FC="ftn"
export MPIFC=$FC
export FCFLAGS=" -g -O3 -h omp -O ipa3 -h fp3 -O thread3 -h autothread" # -xhost -DUSE_FP32 -DOPT_STREAMS -fp-model fast=2 -traceback -mcmodel=large"
export CFLAGS=" -g -O3 -h omp -h ipa3 -h fp3 -h thread3 -h autothread"
#!/bin/bash -l
#SBATCH --job-name=specfem3D_test_case_A
#SBATCH --time=01:30:00
#SBATCH --nodes=24
#SBATCH --ntasks-per-core=2
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=6
#SBATCH --partition=normal
#SBATCH --constraint=gpu
#SBATCH --output=specfem3D_test_case_A_daint-cpu-%j.output
set -e
source ../env/env_daint-cpu-only
cd $install_dir/TestCaseA/specfem3d_globe
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
ulimit -s unlimited
MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D
# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep 2
echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
echo "SLURM_NNODES=" $SLURM_NNODES
echo "MPI_PROCESS $MPI_PROCESS"
time srun -n ${MPI_PROCESS} ${MESHER_EXE}
echo " mesher done: `date`"
echo
##
## forward simulation
##
sleep 2
echo
echo `date`
echo starting run in current directory $PWD
echo
time srun -n ${MPI_PROCESS} ${SOLVER_EXE}
echo "finished successfully"
echo `date`
#!/bin/bash -l
#SBATCH --job-name=specfem3D_test_case_B
#SBATCH --time=00:30:00
#SBATCH --nodes=384
#SBATCH --ntasks-per-core=2
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=6
#SBATCH --partition=normal
#SBATCH --constraint=gpu
#SBATCH --output=specfem3D_test_case_B_daint-cpu-%j.output
set -e
source ../env/env_daint-gpu
cd $install_dir/TestCaseB/specfem3d_globe
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
ulimit -s unlimited
MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D
# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep 2
echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
echo "SLURM_NNODES=" $SLURM_NNODES
echo "MPI_PROCESS $MPI_PROCESS"
time srun -n ${MPI_PROCESS} ${MESHER_EXE}
echo " mesher done: `date`"
echo
##
## forward simulation
##
sleep 2
echo
echo `date`
echo starting run in current directory $PWD
echo
time srun -n ${MPI_PROCESS} ${SOLVER_EXE}
echo "finished successfully"
echo `date`
#!/bin/bash -l
#SBATCH --job-name=specfem3D_test_case_A
#SBATCH --time=01:00:00
#SBATCH --nodes=24
#SBATCH --ntasks-per-core=2
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=6
#SBATCH --partition=normal
#SBATCH --constraint=gpu
#SBATCH --output=specfem3D_test_case_A_daint-gpu-%j.output
set -e
source ../env/env_daint-gpu
cd $install_dir/TestCaseA/specfem3d_globe
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
export CRAY_CUDA_MPS=1
ulimit -s unlimited
MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D
# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep 2
echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
echo "SLURM_NNODES=" $SLURM_NNODES
echo "MPI_PROCESS $MPI_PROCESS"
time srun -n ${MPI_PROCESS} ${MESHER_EXE}
echo " mesher done: `date`"
echo
##
## forward simulation
##
sleep 2
echo
echo `date`
echo starting run in current directory $PWD
echo
#unset FORT_BUFFERED
time srun -n ${MPI_PROCESS} ${SOLVER_EXE}
echo "finished successfully"
echo `date`
#!/bin/bash -l
#SBATCH --job-name=specfem3D_test_case_B
#SBATCH --time=01:00:00
#SBATCH --nodes=384
#SBATCH --ntasks-per-core=2
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=6
#SBATCH --partition=normal
#SBATCH --constraint=gpu
#SBATCH --output=specfem3D_test_case_B_daint-gpu-%j.output
set -e
source ../env/env_daint-gpu
cd $install_dir/TestCaseB/specfem3d_globe
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
export CRAY_CUDA_MPS=1
ulimit -s unlimited
MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D
# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep 2
echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
echo "SLURM_NNODES=" $SLURM_NNODES
echo "MPI_PROCESS $MPI_PROCESS"
time srun -n ${MPI_PROCESS} ${MESHER_EXE}
echo " mesher done: `date`"
echo
##
## forward simulation
##
sleep 2
echo
echo `date`
echo starting run in current directory $PWD
echo
#unset FORT_BUFFERED
time srun -n ${MPI_PROCESS} ${SOLVER_EXE}
echo "finished successfully"
echo `date`
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment