Commit caa76453 authored by Cedric Jourdain's avatar Cedric Jourdain
Browse files

Add Marenostrum env and script

parent 0b450330
#!/bin/bash
module purge
module load intel/2017.4 impi/2017.4 mkl/2017.4 bsc/1.0
export machine=marenostrum
export software=specfem3d_globe
export version=31octobre
#export install_dir=/gpfs/scratch/pr1eih00/pr1eih01/benchmarks/$machine/$software/$version
export install_dir=/gpfs/projects/pr1eih00/pr1eih01/benchmarks/$machine/$software/$version
export CC="mpiicc"
export FC="mpiifort"
export MPIFC=$FC
export FCFLAGS=" -g -O3 -qopenmp -xCORE-AVX512 -mtune=skylake -DUSE_FP32 -DOPT_STREAMS -fp-model fast=2 -traceback -mcmodel=large"
export CFLAGS=" -g -O3 -xCORE-AVX512 -mtune=skylake"
#!/bin/bash -l
#SBATCH --job-name=specfem3D_test_case_A
#SBATCH --time=01:00:00
#SBATCH --nodes=24
#SBATCH --ntasks-per-node=4
#SBATCH --ntasks-per-core=1
#SBATCH --cpus-per-task=12
#SBATCH --output=specfem3D_test_case_A_marenostrum-%j.output
#SBATCH --constraint=highmem
#SBATCH --acctg-freq=task=1
set -e
source ../env/env_marenostrum
cd $install_dir/TestCaseA/specfem3d_globe
export I_MPI_DOMAIN=auto
export I_MPI_PIN_RESPECT_CPUSET=0
export I_MPI_DEBUG=4
#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
export KMP_HW_SUBSET=1T
export OMP_NUM_THREADS=12
export KMP_AFFINITY=compact,verbose
ulimit -s unlimited
MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D
# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep 2
echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
echo "SLURM_NNODES=" $SLURM_NNODES
echo "MPI_PROCESS $MPI_PROCESS"
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
time srun -n ${MPI_PROCESS} ${MESHER_EXE}
echo " mesher done: `date`"
echo
##
## forward simulation
##
sleep 2
echo
echo `date`
echo starting run in current directory $PWD
echo
time srun -n ${MPI_PROCESS} ${SOLVER_EXE}
echo "finished successfully"
echo `date`
#!/bin/bash -l
#SBATCH --job-name=specfem3D_test_case_B
#SBATCH --time=00:30:00
#SBATCH --nodes=384
#SBATCH --ntasks-per-node=4
#SBATCH --ntasks-per-core=1
#SBATCH --cpus-per-task=12
#SBATCH --output=specfem3D_test_case_B_marenostrum-1536MPI_12OMP-%j.output
##SBATCH --constraint=highmem
set -e
source ../env/env_marenostrum
cd $install_dir/TestCaseB/specfem3d_globe
export I_MPI_DOMAIN=auto
export I_MPI_PIN_RESPECT_CPUSET=0
export I_MPI_DEBUG=4
#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
export KMP_HW_SUBSET=1T
export OMP_NUM_THREADS=12
export KMP_AFFINITY=compact,verbose
ulimit -s unlimited
MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D
# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep 2
echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "SLURM_NTASKS_PER_NODE = " $SLURM_NTASKS_PER_NODE
echo "SLURM_CPUS_PER_TASKS = " $SLURM_CPUS_PER_TASK
echo "SLURM_NNODES=" $SLURM_NNODES
echo "MPI_PROCESS $MPI_PROCESS"
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
time srun -n ${MPI_PROCESS} ${MESHER_EXE}
echo " mesher done: `date`"
echo
##
## forward simulation
##
sleep 2
echo
echo `date`
echo starting run in current directory $PWD
echo
time srun -n ${MPI_PROCESS} ${SOLVER_EXE}
echo "finished successfully"
echo `date`
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment