Skip to content
GPU.slurm 1.34 KiB
Newer Older
#!/bin/bash

##############################################
#     ARIS slurm script template  			 #
#                                  			 #
# Submit script: sbatch GPU.slurm n1 n2 ...  #
#                                  			 #
##############################################


#SBATCH --job-name=run_GPU   # Job name
#SBATCH --output=GPU.out 
#SBATCH --error=GPU.err 
#SBATCH --ntasks=32   # Number of processor cores (i.e. tasks)
#SBATCH --nodes=16    # Number of nodes requested
#SBATCH --ntasks-per-node=2     # Tasks per node
#SBATCH --cpus-per-task=1     # Threads per task
#SBATCH --gres=gpu:2 # GPUs per node
#SBATCH --time=00:40:00   # walltime
#SBATCH --mem=32G   # memory per NODE
#SBATCH --partition=gpu  # Partition
#SBATCH --account=testproj  # Accounting project

## LOAD MODULES ##
module purge		# clean up loaded modules 

# load necessary modules
module load gnu
module load intel
module load intelmpi
module load binutils
module load cuda

export I_MPI_FABRICS=shm:dapl

## Change this to the directory of your executable!
gpu_prog="./cuda_SingleGPU.exe"
gpu_prog1="./cuBLAS.exe"
gpu_prog2="./cuBLAS_MultiGPU.exe"

for n; 
do
	#srun $gpu_prog $n $n >> temp.out
	#srun $gpu_prog1 $n $n >> temp.out
#	Important note: In MultiGPU version you must use gres=ntasks-per-node values in order to utilize all GPUs !!!
  	srun $gpu_prog2 $n $n >> temp.out
	
done