<!-- keep this as a security measure: #uncomment if the subject should only be modifiable by the listed groups # * Set ALLOWTOPICCHANGE = Main.TWikiAdminGroup,Main.CMSAdminGroup # * Set ALLOWTOPICRENAME = Main.TWikiAdminGroup,Main.CMSAdminGroup #uncomment this if you want the page only be viewable by the listed groups # * Set ALLOWTOPICVIEW = Main.TWikiAdminGroup,Main.CMSAdminGroup,Main.CMSAdminReaderGroup --> ---+ GPU Example <pre> #!/bin/bash # #SBATCH --job-name=test_job #SBATCH --account=gpu_gres # to access gpu resources #SBATCH --partition=gpu #SBATCH --nodes=1 # request to run job on single node ##SBATCH --ntasks=10 # request 10 CPU's (t3gpu01/02: balance between CPU and GPU : 5CPU/1GPU) #SBATCH --gres=gpu:2 # request for two GPU's on machine, this is total amount of GPUs for job ##SBATCH --mem=4000M # memory (per job) #SBATCH --time=0-00:30 # time in format DD-HH:MM # each node has local /scratch space to be used during job run mkdir -p /scratch/$USER/${SLURM_JOB_ID} export TMPDIR=/scratch/$USER/${SLURM_JOB_ID} # Slurm reserves two GPU's (according to requirement above), those ones that are recorded in shell variable CUDA_VISIBLE_DEVICES echo CUDA_VISIBLE_DEVICES : $CUDA_VISIBLE_DEVICES # python program script.py should use CUDA_VISIBLE_DEVICES variable (*NOT* hardcoded GPU's numbers) python script.py # cleaning of temporal working dir when job was completed: rmdir -rf /scratch/$USER/${SLURM_JOB_ID} </pre> -- Main.NinaLoktionova - 2019-09-24
This topic: CmsTier3
>
WebHome
>
SlurmUsage
>
GPUExample
Topic revision: r4 - 2020-04-27 - NinaLoktionova
Copyright © 2008-2024 by the contributing authors. All material on this collaboration platform is the property of the contributing authors.
Ideas, requests, problems regarding TWiki?
Send feedback