Gromacs Job Scripts

From UFRC
Revision as of 17:10, 6 September 2017 by Yingz (talk | contribs)
(diff) ← Older revision | Latest revision (diff) | Newer revision → (diff)
Jump to navigation Jump to search

back to the Gromacs page

Parallel (MPI)

#!/bin/bash
#SBATCH --account=ufhpc
#SBATCH --qos=ufhpc
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
#SBATCH --mail-type=FAIL,BEGIN,END
#SBATCH --output=stdout
#SBATCH --ntasks=8
#SBATCH --nodes=2
#SBATCH --mem-per-cpu=200mb
#SBATCH --time=01:00:00

module load intel2016.0.109 openmpi/1.10.2 gromacs/5.1.2

mpiexec mdrun_mpi_d < gromacs.in > gromacs.out 2>&1

GPU Acceleration (MPI + multiple GPUs)

#!/bin/bash
#SBATCH --job-name=multi-gpu
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
#SBATCH --mail-type=FAIL,BEGIN,END
#SBATCH --output=stdout
#SBATCH --error=stderr
#SBATCH --nodes=1
#SBATCH --ntasks=2
#SBATCH --tasks-per-node=2
#SBATCH --cpus-per-task=7
#SBATCH --ntasks-per-socket=1
#SBATCH --distribution=cyclic:block
#SBATCH --time=2:00:00
#SBATCH --mem-per-cpu=1gb
#SBATCH --partition=hpg2-gpu
#SBATCH --gres=gpu:tesla:2

module load gcc/5.2.0  openmpi/1.10.2 gromacs/2016.3-CUDA

GMX=gmx

echo Host = `hostname`
echo Start = `date`
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
srun --mpi=pmi2 --accel-bind=g --ntasks=$SLURM_NTASKS $GMX mdrun -v 
echo Finish = `date`