Commit 5d2c4b31 authored by Cedric Jourdain's avatar Cedric Jourdain
Browse files

Add env and script for irene skylake and irene knl

parent 70559bda
......@@ -8,6 +8,8 @@ echo " - daint-gpu "
echo " - daint-cpu-only "
echo " - davide "
echo " - juwels"
echo " - irene-skl "
echo " - irene-knl "
read machine
source ./env/env_${machine}
......@@ -95,7 +97,7 @@ Clean(){
Deploy(){
echo "install_dir ="$install_dir
if [ $machine = "occigen" ] || [ $machine = "marenostrum" ] || [ $machine = "marconi-knl" ] || [ $machine = "daint-cpu-only" ] || [ $machine = "daint-gpu" ] || [ $machine = "davide" ] || [ $machine = "juwels" ];then
if [ $machine = "occigen" ] || [ $machine = "marenostrum" ] || [ $machine = "marconi-knl" ] || [ $machine = "daint-cpu-only" ] || [ $machine = "daint-gpu" ] || [ $machine = "davide" ] || [ $machine = "juwels" ] || [ $machine = "irene-skl" ] || [ $machine = "irene-knl" ];then
echo "==> Install on $machine :"
#Clean
mkdir -p $install_dir
......
#!/bin/bash
module purge
module load ccc datadir/cines datadir/own dfldatadir/own flavor/buildtarget/x86_64 intel/19.0.0.117 hwloc/1.11.3 mpi/openmpi/2.0.4
export machine=irene-knl
export software=specfem3d_globe
export version=31octobre
export install_dir=$CCCSCRATCHDIR/benchmarks/$machine/$software/$version/
export CC="mpicc"
export FC="mpifort"
export MPIFC=$FC
export FCFLAGS=" -g -O3 -qopenmp -xMIC-AVX512 -DUSE_FP32 -DOPT_STREAMS -fp-model fast=2 -traceback -mcmodel=large -fma -align array64byte -finline-functions -ipo"
export CFLAGS=" -g -O3 -xMIC-AVX512 -fma -align -finline-functions -ipo"
export FCFLAGS_f90='-mod ./obj -I./obj -I. -I. -I${SETUP} -xMIC-AVX512'
export CPPFLAGS='-I${SETUP} -DFORCE_VECTORIZATION' # -xMIC-AVX512'
#!/bin/bash
#module purge
module load ccc datadir/cines datadir/own dfldatadir/own flavor/buildtarget/x86_64 intel/19.0.0.117 hwloc/1.11.3 mpi/openmpi/2.0.4
export machine=irene-skl
export software=specfem3d_globe
export version=31octobre
export install_dir=$CCCSCRATCHDIR/benchmarks/$machine/$software/$version/
export CC="mpicc"
export FC="mpifort"
export MPIFC=$FC
export FCFLAGS=" -g -O3 -qopenmp -xCORE-AVX512 -mtune=skylake -DUSE_FP32 -DOPT_STREAMS -fp-model fast=2 -traceback -mcmodel=large -ipo"
export CFLAGS=" -g -O3 -xCORE-AVX512 -mtune=skylake -ipo "
#!/bin/bash
#MSUB -A pa4674
#MSUB -q knl
#MSUB -r specfem3D_test_case_A
#MSUB -N 24
#MSUB -n 96 # Number of tasks to use
#MSUB -c 16 # Number of threads per task to use
#MSUB -o specfem3D_test_case_A_irene-knl-%I.output
#MSUB -T 7400
#set -e
source ../env/env_irene-knl
cd $install_dir/TestCaseA/specfem3d_globe
#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
export KMP_HW_SUBSET=1T
export OMP_NUM_THREADS=16
ulimit -s unlimited
MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D
# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep 2
echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=96 #` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "MPI_PROCESS $MPI_PROCESS"
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
time ccc_mprun -n ${MPI_PROCESS} ${MESHER_EXE}
echo " mesher done: `date`"
echo
##
## forward simulation
##
sleep 2
echo
echo `date`
echo starting run in current directory $PWD
echo
time ccc_mprun -n ${MPI_PROCESS} ${SOLVER_EXE}
echo "finished successfully"
echo `date`
#!/bin/bash
#MSUB -A pa4674
#MSUB -q knl
#MSUB -r specfem3D_test_case_B
#MSUB -N 384
#MSUB -n 1536 # Number of tasks to use
#MSUB -c 16 # Number of threads per task to use
#MSUB -o specfem3D_test_case_B_irene-knl-%I.output
#MSUB -T 20200
source ../env/env_irene-knl
cd $install_dir/TestCaseB/specfem3d_globe
#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
export KMP_HW_SUBSET=1T
export OMP_NUM_THREADS=16
ulimit -s unlimited
MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D
# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep 2
echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=1536 #` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "MPI_PROCESS $MPI_PROCESS"
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
time ccc_mprun -n ${MPI_PROCESS} ${MESHER_EXE}
echo " mesher done: `date`"
echo
##
## forward simulation
##
sleep 2
echo
echo `date`
echo starting run in current directory $PWD
echo
time ccc_mprun -n ${MPI_PROCESS} ${SOLVER_EXE}
echo "finished successfully"
echo `date`
#!/bin/bash
#MSUB -A pa4674
#MSUB -q skylake
#MSUB -r specfem3D_test_case_A
#MSUB -N 24
#MSUB -n 96 # Number of tasks to use
#MSUB -c 12 # Number of threads per task to use
#MSUB -o specfem3D_test_case_A_irene-skl-%I.output
#MSUB -T 5400
#set -e
source ../env/env_irene-skl
cd $install_dir/TestCaseA/specfem3d_globe
export I_MPI_DOMAIN=auto
export I_MPI_PIN_RESPECT_CPUSET=0
export I_MPI_DEBUG=4
#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
export KMP_HW_SUBSET=1T
export OMP_NUM_THREADS=12
ulimit -s unlimited
MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D
# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep 2
echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=96 #` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
time ccc_mprun -n ${MPI_PROCESS} ${MESHER_EXE}
echo " mesher done: `date`"
echo
##
## forward simulation
##
sleep 2
echo
echo `date`
echo starting run in current directory $PWD
echo
time ccc_mprun -n ${MPI_PROCESS} ${SOLVER_EXE}
echo "finished successfully"
echo `date`
#!/bin/bash
#MSUB -A pa4674
#MSUB -q skylake
#MSUB -r specfem3D_test_case_B
#MSUB -N 384
#MSUB -n 1536 # Number of tasks to use
#MSUB -c 12 # Number of threads per task to use
#MSUB -o specfem3D_test_case_B_irene-skl-%I.output
#MSUB -T 5400
#set -e
source ../env/env_irene-skl
cd $install_dir/TestCaseB/specfem3d_globe
export I_MPI_DOMAIN=auto
export I_MPI_PIN_RESPECT_CPUSET=0
export I_MPI_DEBUG=4
#Make sure that OMP_NUM_THREADS / KMP_HW_SUBSET = cpus-per-task
export KMP_HW_SUBSET=1T
export OMP_NUM_THREADS=12
ulimit -s unlimited
MESHER_EXE=./bin/xmeshfem3D
SOLVER_EXE=./bin/xspecfem3D
# backup files used for this simulation
cp DATA/Par_file OUTPUT_FILES/
cp DATA/STATIONS OUTPUT_FILES/
cp DATA/CMTSOLUTION OUTPUT_FILES/
##
## mesh generation
##
sleep 2
echo
echo `date`
echo "starting MPI mesher"
echo
MPI_PROCESS=1536 #` echo "$SLURM_NNODES*$SLURM_NTASKS_PER_NODE" | bc -l`
echo "MPI_PROCESS $MPI_PROCESS"
echo "OMP_NUM_THREADS=$OMP_NUM_THREADS"
time ccc_mprun -n ${MPI_PROCESS} ${MESHER_EXE}
echo " mesher done: `date`"
echo
##
## forward simulation
##
sleep 2
echo
echo `date`
echo starting run in current directory $PWD
echo
time ccc_mprun -n ${MPI_PROCESS} ${SOLVER_EXE}
echo "finished successfully"
echo `date`
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment