Skip to content
MPI.slurm 1.34 KiB
Newer Older
petros.anastasiadis's avatar
petros.anastasiadis committed
#!/bin/bash

##############################################
#     ARIS slurm script template  			 #
#                                  			 #
# Submit script: sbatch MPI.slurm n1 n2 ...  #
#                                  			 #
##############################################
petros.anastasiadis's avatar
petros.anastasiadis committed


#SBATCH --job-name=run_mpi    # Job name
#SBATCH --output=MPI.out 
#SBATCH --error=MPI.err 
#SBATCH --ntasks=16    # Number of processor cores (i.e. tasks)
#SBATCH --nodes=16 # Number of nodes requested
petros.anastasiadis's avatar
petros.anastasiadis committed
#SBATCH --ntasks-per-node=1    # Tasks per node
#SBATCH --cpus-per-task=20    # Threads per task
#SBATCH --time=00:10:00   # walltime
#SBATCH --mem=50G   # memory per NODE
#SBATCH --partition=compute # Partition
#SBATCH --account=testproj  # Accounting project


export I_MPI_FABRICS=shm:dapl


## LOAD MODULES ##
module purge		# clean up loaded modules 

# load necessary modules
module load gnu ##/7.2.0
module load intel ##/17.0.4
module load intelmpi ##/5.1.3.258
module load binutils
module load cuda


## Change this to the directory of your executable!
gpu_prog="./MPI.exe"
gpu_prog1="./MPI-OpenMP.exe"

export OMP_PROC_BIND=spread # OpenMP thread affinity variable
petros.anastasiadis's avatar
petros.anastasiadis committed

for n;
do
	srun $gpu_prog $n $n >> mpi.out
	for tr in 1 2 5 10 20 # Run for different OpenMP thread numbers ( tr <= cpus-per-task ) 
petros.anastasiadis's avatar
petros.anastasiadis committed
	do
		export OMP_NUM_THREADS=$tr
		srun $gpu_prog1 $n $n >> mpi.out