Commit 9ff2bbcb authored by Cedric Jourdain's avatar Cedric Jourdain
Browse files

add Davide env and submit script

parent 5eb28dd4
#!/bin/bash
module purge
module load profile/base
module load gnu/6.4.0 cuda/9.2.88 openmpi/3.1.0--gnu--6.4.0
export machine=davide
export software=specfem3d_globe
export version=31octobre
export install_dir=$CINECA_SCRATCH/benchmarks/$machine/$software/$version
#export install_dir=$HOME/benchmarks/$machine/$software/$version
# GNU
export CC="mpicc"
export FC="mpif90"
export MPIFC=mpif90
export FCFLAGS="-mcpu=power8 -O3 -funroll-loops -fopenmp -fpeel-loops -funroll-loops -fno-strict-aliasing" #-flto : generate error ; -Ofast
export CFLAGS="-mcpu=power8 -O3 -funroll-loops -fopenmp -fpeel-loops -funroll-loops -fno-strict-aliasing" #-Ofast
# Power8
#export CC="xlc"
#export FC="xlf"
#export MPIFC=$FC
#export FCFLAGS="-qarch=pwr8 -O3 -qipa -qsmp=omp -qhot"
#export CFLAGS="-mcpu=power8 -O3 -qipa -qsmp=omp -qhot"
# Intel
#export FCFLAGS=" -g -O3 -qopenmp -xCORE-AVX2 -DUSE_FP32 -DOPT_STREAMS -fp-model fast=2 -traceback -mcmodel=large"
#export CFLAGS=" -g -O3 -xCORE-AVX2 "
#!/bin/bash
#SBATCH -J Test_case_A
#SBATCH --time=01:00:00
#SBATCH --nodes=24
#SBATCH --ntasks-per-node=4
#SBATCH --ntasks-per-core=1
#SBATCH --cpus-per-task=4
#SBATCH --partition=dvd_usr_prod
#SBATCH --mem=86000
#SBATCH --out=Test_case_A_davide_%j.out
#SBATCH --err=Test_case_A_davide_%j.err
#SBATCH --account=Dec00_5IPwp7
#SBATCH --gres=gpu:4 # (N=1,4)
set -e
source ../env/env_davide
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
export OMP_PLACES=threads
export OMP_PROC_BIND=true
cd $install_dir/TestCaseA/specfem3d_globe
ulimit -s unlimited
MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D
# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep 2
echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
echo "SLURM_NNODES=" $SLURM_NNODES
echo "MPI_PROCESS $MPI_PROCESS"
time mpirun -n ${MPI_PROCESS} ${MESHER_EXE}
echo " mesher done: `date`"
echo
##
## forward simulation
##
sleep 2
echo
echo `date`
echo starting run in current directory $PWD
echo
#unset FORT_BUFFERED
time mpirun -n ${MPI_PROCESS} ${SOLVER_EXE}
echo "finished successfully"
echo `date`
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment