Skip to content
S_GPU_cuB.slurm 1.23 KiB
Newer Older
petros.anastasiadis's avatar
petros.anastasiadis committed
#!/bin/bash

####################################
#     ARIS slurm script template   #
#                                  #
# Submit script: sbatch filename   #
#                                  #
####################################


#SBATCH --job-name=run_GPU   # Job name
#SBATCH --output=J.out # Stdout (%j expands to jobId)
#SBATCH --error=J.err # Stderr (%j expands to jobId)
#SBATCH --ntasks=1     # Number of processor cores (i.e. tasks)
#SBATCH --nodes=1    # Number of nodes requested
#SBATCH --ntasks-per-node=1     # Tasks per node
#SBATCH --cpus-per-task=1     # Threads per task
#SBATCH --gres=gpu:1 # GPUs per node
#SBATCH --time=00:40:00   # walltime
#SBATCH --mem=32G   # memory per NODE
#SBATCH --partition=gpu  # Partition
#SBATCH --account=testproj  # Accounting project

## LOAD MODULES ##
module purge		# clean up loaded modules 

# load necessary modules
module load gnu
module load intel
module load intelmpi
module load binutils
module load cuda

output="/users/guest/petyros/Training/Outputs" ##/Inputs

partition="gpu"
## Change this to the directory of your executable!
gpu_prog="/users/guest/petyros/Training/GPUs/cuBLAS"
rm -f "$output/Single_GPU.$partition"

for n;
do
	srun $gpu_prog $n $n >> "$output/Single_GPU.$partition"
done