Difference between revisions of "Gromacs Job Scripts"

From UFRC
Jump to navigation Jump to search
 
(15 intermediate revisions by 4 users not shown)
Line 1: Line 1:
==Serial==
+
[[Category:Software]]
<source lang=bash>
+
;Note: Use 'module spider gromacs' to find available gromacs versions. The module loads below may be outdated.
#!/bin/bash
+
[[Gromacs|back to the Gromacs page]]
#
 
#PBS -r n
 
#PBS -N Gromacs
 
#PBS -o stdout
 
#PBS -e stderr
 
#PBS -j oe
 
#PBS -m abe
 
#PBS -M YOUR_MAIL_ADDRESS_HERE
 
#PBS -l nodes=1:ppn=1
 
#PBS -l pmem=200mb
 
#PBS -l walltime=01:00:00
 
 
 
module load intel
 
module load gromacs
 
 
 
cd $PBS_O_WORKDIR
 
 
 
mdrun_d < gromacs.in > gromacs.out 2>&1
 
</source>
 
 
 
 
==Parallel (MPI)==
 
==Parallel (MPI)==
<source lang=bash>
+
<pre>
 
#!/bin/bash
 
#!/bin/bash
#
+
#SBATCH --job-name=gromacs
#PBS -r n
+
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
#PBS -N Gromacs
+
#SBATCH --mail-type=FAIL,BEGIN,END
#PBS -o stdout
+
#SBATCH --output=gmx-%j.out
#PBS -e stderr
+
#SBATCH --ntasks=2
#PBS -j oe
+
#SBATCH --cpus-per-task=4
#PBS -m abe
+
#SBATCH --ntasks-per-socket=1
#PBS -M YOUR_MAIL_ADDRESS_HERE
+
#SBATCH --distribution=cyclic:block
#PBS -l nodes=2:ppn=4
+
#SBATCH --time=24:00:00
#PBS -l pmem=200mb
+
#SBATCH --mem-per-cpu=1gb
#PBS -l walltime=01:00:00
 
  
module load intel
+
module purge
module load openmpi
+
ml gcc/8.2.0 openmpi/4.0.1 gromacs/2019.2
module load gromacs
 
  
cd $PBS_O_WORKDIR
+
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
 +
srun --mpi=pmix_v3 gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK} -s topol.tpr
 +
</pre>
  
mpiexec mdrun_mpi_d < gromacs.in > gromacs.out 2>&1
+
==GPU Acceleration (MPI + multiple GPUs)==
</source>
+
<pre>
==GPU Acceleration (Single GPU, Gromacs 4.6.5)==
 
<source lang=bash>
 
 
#!/bin/bash
 
#!/bin/bash
#PBS -N gromacs
+
#SBATCH --job-name=multi-gpu
#PBS -o stdout
+
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
#PBS -m abe
+
#SBATCH --mail-type=FAIL,BEGIN,END
#PBS -M taylor@hpc.ufl.edu
+
#SBATCH --output=gromacs_%j.log
#PBS -j oe
+
#SBATCH --nodes=1
#PBS -l nodes=1:ppn=2:gpus=1
+
#SBATCH --ntasks=2
#PBS -l walltime=36:00:00
+
#SBATCH --tasks-per-node=2
#PBS -q gpu
+
#SBATCH --cpus-per-task=7
 
+
#SBATCH --ntasks-per-socket=1
module load intel/2013
+
#SBATCH --distribution=cyclic:block
module load cuda/5.5
+
#SBATCH --time=2:00:00
module load gromacs/4.6.5
+
#SBATCH --mem-per-cpu=1gb
 
+
#SBATCH --partition=hpg2-gpu
myHostName=`hostname -s`
+
#SBATCH --gres=gpu:a100:2
export PBS_GPUS=`gpus=; while read gpu ; do gpus="$gpus,$gpu"; done < $PBS_GPUFILE; echo $gpus | sed -e "s%^,%%"`
 
export OMP_NUM_THREADS=$PBS_NUM_PPN
 
export CUDA_VISIBLE_DEVICES=`echo $PBS_GPUS | sed -e "s%$myHostName-gpu%%g"`
 
echo CUDA_VISIBLE_DEVICES = $CUDA_VISIBLE_DEVICES
 
 
 
cd $PBS_O_WORKDIR
 
  
grompp -f prodmd.mdp -c nptsim1.pdb -t nptsim1.cpt -p topology1.top -o prodmd.tpr
+
module load gcc/5.2.0  openmpi/1.10.2 gromacs/2016.3-CUDA
mdrun -v -s prodmd.tpr -c prodmd.tpr -deffnm prodmd > gromacs.log 2>&1
 
  
exit 0
+
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
 +
srun --mpi=pmi2 --accel-bind=g --ntasks=$SLURM_NTASKS gmx mdrun -v
  
</source>
+
</pre>

Latest revision as of 14:40, 20 July 2022

Note
Use 'module spider gromacs' to find available gromacs versions. The module loads below may be outdated.

back to the Gromacs page

Parallel (MPI)

#!/bin/bash
#SBATCH --job-name=gromacs
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
#SBATCH --mail-type=FAIL,BEGIN,END
#SBATCH --output=gmx-%j.out
#SBATCH --ntasks=2
#SBATCH --cpus-per-task=4
#SBATCH --ntasks-per-socket=1
#SBATCH --distribution=cyclic:block
#SBATCH --time=24:00:00
#SBATCH --mem-per-cpu=1gb

module purge
ml gcc/8.2.0 openmpi/4.0.1 gromacs/2019.2

export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
srun --mpi=pmix_v3 gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK} -s topol.tpr

GPU Acceleration (MPI + multiple GPUs)

#!/bin/bash
#SBATCH --job-name=multi-gpu
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
#SBATCH --mail-type=FAIL,BEGIN,END
#SBATCH --output=gromacs_%j.log
#SBATCH --nodes=1
#SBATCH --ntasks=2
#SBATCH --tasks-per-node=2
#SBATCH --cpus-per-task=7
#SBATCH --ntasks-per-socket=1
#SBATCH --distribution=cyclic:block
#SBATCH --time=2:00:00
#SBATCH --mem-per-cpu=1gb
#SBATCH --partition=hpg2-gpu
#SBATCH --gres=gpu:a100:2

module load gcc/5.2.0  openmpi/1.10.2 gromacs/2016.3-CUDA

export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
srun --mpi=pmi2 --accel-bind=g --ntasks=$SLURM_NTASKS gmx mdrun -v