Gromacs Job Scripts

From UFRC
Revision as of 14:31, 28 July 2021 by Moskalenko (talk | contribs)
(diff) ← Older revision | Latest revision (diff) | Newer revision → (diff)
Jump to navigation Jump to search
Note
Use 'module spider gromacs' to find available gromacs versions. The module loads below may be outdated.

back to the Gromacs page

Parallel (MPI)

#!/bin/bash
#SBATCH --job-name=gromacs
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
#SBATCH --mail-type=FAIL,BEGIN,END
#SBATCH --output=gmx-%j.out
#SBATCH --ntasks=2
#SBATCH --cpus-per-task=4
#SBATCH --ntasks-per-socket=1
#SBATCH --distribution=cyclic:block
#SBATCH --time=24:00:00
#SBATCH --mem-per-cpu=1gb

module purge
ml gcc/8.2.0 openmpi/4.0.1 gromacs/2019.2

export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
srun --mpi=pmix_v3 gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK} -s topol.tpr

GPU Acceleration (MPI + multiple GPUs)

#!/bin/bash
#SBATCH --job-name=multi-gpu
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
#SBATCH --mail-type=FAIL,BEGIN,END
#SBATCH --output=gromacs_%j.log
#SBATCH --nodes=1
#SBATCH --ntasks=2
#SBATCH --tasks-per-node=2
#SBATCH --cpus-per-task=7
#SBATCH --ntasks-per-socket=1
#SBATCH --distribution=cyclic:block
#SBATCH --time=2:00:00
#SBATCH --mem-per-cpu=1gb
#SBATCH --partition=hpg2-gpu
#SBATCH --gres=gpu:tesla:2

module load gcc/5.2.0  openmpi/1.10.2 gromacs/2016.3-CUDA

export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
srun --mpi=pmi2 --accel-bind=g --ntasks=$SLURM_NTASKS gmx mdrun -v