Difference between revisions of "Gromacs Job Scripts"

From UFRC
Jump to navigation Jump to search
 
Line 2: Line 2:
 
;Note: Use 'module spider gromacs' to find available gromacs versions. The module loads below may be outdated.
 
;Note: Use 'module spider gromacs' to find available gromacs versions. The module loads below may be outdated.
 
[[Gromacs|back to the Gromacs page]]
 
[[Gromacs|back to the Gromacs page]]
<!--
 
==Serial (CPU version) ==
 
<source lang=bash>
 
#!/bin/bash
 
#SBATCH --account=ufhpc
 
#SBATCH --qos=ufhpc
 
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
 
#SBATCH --mail-type=FAIL,BEGIN,END
 
#SBATCH --output=stdout
 
#SBATCH --ntasks=1
 
#SBATCH --nodes=1
 
#SBATCH --mem-per-cpu=200mb
 
#SBATCH --time=01:00:00
 
 
module load intel/2016.0.109
 
module load gromacs
 
 
mdrun_d < gromacs.in > gromacs.out 2>&1
 
</source>
 
-->
 
 
 
==Parallel (MPI)==
 
==Parallel (MPI)==
 
<pre>
 
<pre>
Line 43: Line 22:
 
srun --mpi=pmix_v3 gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK} -s topol.tpr
 
srun --mpi=pmix_v3 gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK} -s topol.tpr
 
</pre>
 
</pre>
 
<!--
 
==GPU Acceleration (Single GPU, Gromacs 4.6.5)==
 
<source lang=bash>
 
#!/bin/bash
 
#SBATCH --account=ufhpc
 
#SBATCH --qos=ufhpc
 
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
 
#SBATCH --mail-type=FAIL,BEGIN,END
 
#SBATCH --output=stdout
 
#SBATCH --ntasks=2
 
#SBATCH --nodes=1
 
 
module load intel/2016.0.109 gromacs/4.6.5
 
 
myHostName=`hostname -s`
 
export PBS_GPUS=`gpus=; while read gpu ; do gpus="$gpus,$gpu"; done <
 
$PBS_GPUFILE; echo $gpus | sed -e "s%^,%%"`
 
export OMP_NUM_THREADS=$PBS_NUM_PPN
 
export CUDA_VISIBLE_DEVICES=`echo $PBS_GPUS | sed -e "s%$myHostName-gpu%%g"`
 
echo CUDA_VISIBLE_DEVICES = $CUDA_VISIBLE_DEVICES
 
 
grompp -f prodmd.mdp -c nptsim1.pdb -t nptsim1.cpt -p topology1.top -o
 
prodmd.tpr
 
mdrun -v -s prodmd.tpr -c prodmd.tpr -deffnm prodmd > gromacs.log 2>&1
 
 
exit 0
 
 
</source>
 
-->
 
<!--
 
==GPU Acceleration (Multiple GPUs, Gromacs 4.6.5)==
 
<source lang=bash>
 
#!/bin/bash
 
#SBATCH --account=ufhpc
 
#SBATCH --qos=ufhpc
 
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
 
#SBATCH --mail-type=FAIL,BEGIN,END
 
#SBATCH --output=stdout
 
#SBATCH --ntasks=4
 
#SBATCH --nodes=1
 
 
module load intel/2016.0.109 gromacs/5.1.2
 
 
myHostName=`hostname -s`
 
export PBS_GPUS=`gpus=; while read gpu ; do gpus="$gpus,$gpu"; done <
 
$PBS_GPUFILE; echo $gpus | sed -e "s%^,%%"`
 
export OMP_NUM_THREADS=$PBS_NUM_PPN
 
export CUDA_VISIBLE_DEVICES=`echo $PBS_GPUS | sed -e "s%$myHostName-gpu%%g"`
 
echo CUDA_VISIBLE_DEVICES = $CUDA_VISIBLE_DEVICES
 
 
grompp -f prodmd.mdp -c nptsim1.pdb -t nptsim1.cpt -p topology1.top -o
 
prodmd.tpr
 
mdrun -v -s prodmd.tpr -c prodmd.tpr -deffnm prodmd > gromacs.log 2>&1
 
 
exit 0
 
 
</source>
 
-->
 
  
 
==GPU Acceleration (MPI + multiple GPUs)==
 
==GPU Acceleration (MPI + multiple GPUs)==
<source lang=bash>
+
<pre>
 
#!/bin/bash
 
#!/bin/bash
 
#SBATCH --job-name=multi-gpu
 
#SBATCH --job-name=multi-gpu
 
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
 
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
 
#SBATCH --mail-type=FAIL,BEGIN,END
 
#SBATCH --mail-type=FAIL,BEGIN,END
#SBATCH --output=stdout
+
#SBATCH --output=gromacs_%j.log
#SBATCH --error=stderr
 
 
#SBATCH --nodes=1
 
#SBATCH --nodes=1
 
#SBATCH --ntasks=2
 
#SBATCH --ntasks=2
Line 124: Line 43:
 
module load gcc/5.2.0  openmpi/1.10.2 gromacs/2016.3-CUDA
 
module load gcc/5.2.0  openmpi/1.10.2 gromacs/2016.3-CUDA
  
GMX=gmx
 
 
echo Host = `hostname`
 
echo Start = `date`
 
 
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
 
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
srun --mpi=pmi2 --accel-bind=g --ntasks=$SLURM_NTASKS $GMX mdrun -v  
+
srun --mpi=pmi2 --accel-bind=g --ntasks=$SLURM_NTASKS gmx mdrun -v  
echo Finish = `date`
 
  
</source>
+
</pre>

Latest revision as of 14:31, 28 July 2021

Note
Use 'module spider gromacs' to find available gromacs versions. The module loads below may be outdated.

back to the Gromacs page

Parallel (MPI)

#!/bin/bash
#SBATCH --job-name=gromacs
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
#SBATCH --mail-type=FAIL,BEGIN,END
#SBATCH --output=gmx-%j.out
#SBATCH --ntasks=2
#SBATCH --cpus-per-task=4
#SBATCH --ntasks-per-socket=1
#SBATCH --distribution=cyclic:block
#SBATCH --time=24:00:00
#SBATCH --mem-per-cpu=1gb

module purge
ml gcc/8.2.0 openmpi/4.0.1 gromacs/2019.2

export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
srun --mpi=pmix_v3 gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK} -s topol.tpr

GPU Acceleration (MPI + multiple GPUs)

#!/bin/bash
#SBATCH --job-name=multi-gpu
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
#SBATCH --mail-type=FAIL,BEGIN,END
#SBATCH --output=gromacs_%j.log
#SBATCH --nodes=1
#SBATCH --ntasks=2
#SBATCH --tasks-per-node=2
#SBATCH --cpus-per-task=7
#SBATCH --ntasks-per-socket=1
#SBATCH --distribution=cyclic:block
#SBATCH --time=2:00:00
#SBATCH --mem-per-cpu=1gb
#SBATCH --partition=hpg2-gpu
#SBATCH --gres=gpu:tesla:2

module load gcc/5.2.0  openmpi/1.10.2 gromacs/2016.3-CUDA

export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
srun --mpi=pmi2 --accel-bind=g --ntasks=$SLURM_NTASKS gmx mdrun -v