Newer
Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#!/bin/bash
##############################################
# ARIS slurm script template #
# #
# Submit script: sbatch GPU.slurm n1 n2 ... #
# #
##############################################
#SBATCH --job-name=run_GPU # Job name
#SBATCH --output=GPU.out
#SBATCH --error=GPU.err
#SBATCH --ntasks=32 # Number of processor cores (i.e. tasks)
#SBATCH --nodes=16 # Number of nodes requested
#SBATCH --ntasks-per-node=2 # Tasks per node
#SBATCH --cpus-per-task=1 # Threads per task
#SBATCH --gres=gpu:2 # GPUs per node
#SBATCH --time=00:40:00 # walltime
#SBATCH --mem=32G # memory per NODE
#SBATCH --partition=gpu # Partition
#SBATCH --account=testproj # Accounting project
## LOAD MODULES ##
module purge # clean up loaded modules
# load necessary modules
module load gnu
module load intel
module load intelmpi
module load binutils
module load cuda
export I_MPI_FABRICS=shm:dapl
## Change this to the directory of your executable!
gpu_prog="./cuda_SingleGPU.exe"
gpu_prog1="./cuBLAS.exe"
gpu_prog2="./cuBLAS_MultiGPU.exe"
for n;
do
#srun $gpu_prog $n $n >> temp.out
#srun $gpu_prog1 $n $n >> temp.out
# Important note: In MultiGPU version you must use gres=ntasks-per-node values in order to utilize all GPUs !!!
srun $gpu_prog2 $n $n >> temp.out
done