Difference between revisions of "Gromacs Job Scripts"

From UFRC
Jump to navigation Jump to search
Line 1: Line 1:
 
[[Category:Software]]
 
[[Category:Software]]
 +
;Note: Use 'module spider gromacs' to find available gromacs versions. The module loads below may be outdated.
 
[[Gromacs|back to the Gromacs page]]
 
[[Gromacs|back to the Gromacs page]]
 
<!--
 
<!--
Line 23: Line 24:
  
 
==Parallel (MPI)==
 
==Parallel (MPI)==
<source lang=bash>
+
<pre>
 
#!/bin/bash
 
#!/bin/bash
#SBATCH --account=ufhpc
+
#SBATCH --job-name=gromacs
#SBATCH --qos=ufhpc
 
 
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
 
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
 
#SBATCH --mail-type=FAIL,BEGIN,END
 
#SBATCH --mail-type=FAIL,BEGIN,END
#SBATCH --output=stdout
+
#SBATCH --output=gmx-%j.out
#SBATCH --ntasks=8
+
#SBATCH --ntasks=2
#SBATCH --nodes=2
+
#SBATCH --cpus-per-task=4
#SBATCH --mem-per-cpu=200mb
+
#SBATCH --ntasks-per-socket=1
#SBATCH --time=01:00:00
+
#SBATCH --distribution=cyclic:block
 +
#SBATCH --time=24:00:00
 +
#SBATCH --mem-per-cpu=1gb
 +
 
 +
module purge
 +
ml gcc/8.2.0 openmpi/4.0.1 gromacs/2019.2
  
module load intel2016.0.109 openmpi/1.10.2 gromacs/5.1.2
+
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
 +
srun --mpi=pmix_v3 gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK} -s topol.tpr
 +
</pre>
  
mpiexec mdrun_mpi_d < gromacs.in > gromacs.out 2>&1
 
</source>
 
 
<!--
 
<!--
 
==GPU Acceleration (Single GPU, Gromacs 4.6.5)==
 
==GPU Acceleration (Single GPU, Gromacs 4.6.5)==

Revision as of 14:29, 28 July 2021

Note
Use 'module spider gromacs' to find available gromacs versions. The module loads below may be outdated.

back to the Gromacs page

Parallel (MPI)

#!/bin/bash
#SBATCH --job-name=gromacs
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
#SBATCH --mail-type=FAIL,BEGIN,END
#SBATCH --output=gmx-%j.out
#SBATCH --ntasks=2
#SBATCH --cpus-per-task=4
#SBATCH --ntasks-per-socket=1
#SBATCH --distribution=cyclic:block
#SBATCH --time=24:00:00
#SBATCH --mem-per-cpu=1gb

module purge
ml gcc/8.2.0 openmpi/4.0.1 gromacs/2019.2

export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
srun --mpi=pmix_v3 gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK} -s topol.tpr


GPU Acceleration (MPI + multiple GPUs)

#!/bin/bash
#SBATCH --job-name=multi-gpu
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
#SBATCH --mail-type=FAIL,BEGIN,END
#SBATCH --output=stdout
#SBATCH --error=stderr
#SBATCH --nodes=1
#SBATCH --ntasks=2
#SBATCH --tasks-per-node=2
#SBATCH --cpus-per-task=7
#SBATCH --ntasks-per-socket=1
#SBATCH --distribution=cyclic:block
#SBATCH --time=2:00:00
#SBATCH --mem-per-cpu=1gb
#SBATCH --partition=hpg2-gpu
#SBATCH --gres=gpu:tesla:2

module load gcc/5.2.0  openmpi/1.10.2 gromacs/2016.3-CUDA

GMX=gmx

echo Host = `hostname`
echo Start = `date`
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
srun --mpi=pmi2 --accel-bind=g --ntasks=$SLURM_NTASKS $GMX mdrun -v 
echo Finish = `date`