Newer
Older
##############################################
# ARIS slurm script template #
# #
# Submit script: sbatch MPI.slurm n1 n2 ... #
# #
##############################################
#SBATCH --output=MPI.out
#SBATCH --error=MPI.err
#SBATCH --ntasks=16 # Number of processor cores (i.e. tasks)
#SBATCH --nodes=16 # Number of nodes requested
#SBATCH --ntasks-per-node=1 # Tasks per node
#SBATCH --cpus-per-task=20 # Threads per task
#SBATCH --time=00:10:00 # walltime
#SBATCH --mem=50G # memory per NODE
#SBATCH --partition=compute # Partition
#SBATCH --account=testproj # Accounting project
export I_MPI_FABRICS=shm:dapl
## LOAD MODULES ##
module purge # clean up loaded modules
# load necessary modules
module load gnu ##/7.2.0
module load intel ##/17.0.4
module load intelmpi ##/5.1.3.258
module load binutils
module load cuda
## Change this to the directory of your executable!
gpu_prog="./MPI.exe"
gpu_prog1="./MPI-OpenMP.exe"
export OMP_PROC_BIND=spread # OpenMP thread affinity variable
for tr in 1 2 5 10 20 # Run for different OpenMP thread numbers ( tr <= cpus-per-task )