#!/bin/bash #################################### # ARIS slurm script template # # # # Submit script: sbatch filename # # # #################################### #SBATCH --job-name=run_GPU # Job name #SBATCH --output=J.out # Stdout (%j expands to jobId) #SBATCH --error=J.err # Stderr (%j expands to jobId) #SBATCH --ntasks=16 # Number of processor cores (i.e. tasks) #SBATCH --nodes=8 # Number of nodes requested #SBATCH --ntasks-per-node=2 # Tasks per node #SBATCH --cpus-per-task=1 # Threads per task #SBATCH --gres=gpu:2 # GPUs per node #SBATCH --time=00:40:00 # walltime #SBATCH --mem=32G # memory per NODE #SBATCH --partition=gpu # Partition #SBATCH --account=testproj # Accounting project ## LOAD MODULES ## module purge # clean up loaded modules # load necessary modules module load gnu module load intel module load intelmpi module load binutils module load cuda export I_MPI_FABRICS=shm:dapl output="/users/guest/petyros/Training/Outputs" ##/Inputs partition="gpu" ## Change this to the directory of your executable! gpu_prog="/users/guest/petyros/Training/GPUs/cuBLAS" gpu_prog1="/users/guest/petyros/Training/GPUs/cuBLAS_MultiGPU" #rm -f "$output/Multi_GPU.$partition" "$output/Single_GPU.$partition" ## Important note!!! For full GPU utilization in MultiGPU version you must use gres=ntasks-per-node values!!! for n; do #srun $gpu_prog $n $n >> "$output/Single_GPU.$partition" srun $gpu_prog1 $n $n >> "$output/Multi_GPU.$partition" done