Difference between revisions of "Gromacs Job Scripts"
Jump to navigation
Jump to search
Moskalenko (talk | contribs) |
|||
Line 1: | Line 1: | ||
[[Category:Software]] | [[Category:Software]] | ||
+ | ;Note: Use 'module spider gromacs' to find available gromacs versions. The module loads below may be outdated. | ||
[[Gromacs|back to the Gromacs page]] | [[Gromacs|back to the Gromacs page]] | ||
<!-- | <!-- | ||
Line 23: | Line 24: | ||
==Parallel (MPI)== | ==Parallel (MPI)== | ||
− | < | + | <pre> |
#!/bin/bash | #!/bin/bash | ||
− | #SBATCH -- | + | #SBATCH --job-name=gromacs |
− | |||
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE | #SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE | ||
#SBATCH --mail-type=FAIL,BEGIN,END | #SBATCH --mail-type=FAIL,BEGIN,END | ||
− | #SBATCH --output= | + | #SBATCH --output=gmx-%j.out |
− | #SBATCH --ntasks= | + | #SBATCH --ntasks=2 |
− | #SBATCH -- | + | #SBATCH --cpus-per-task=4 |
− | #SBATCH -- | + | #SBATCH --ntasks-per-socket=1 |
− | #SBATCH --time= | + | #SBATCH --distribution=cyclic:block |
+ | #SBATCH --time=24:00:00 | ||
+ | #SBATCH --mem-per-cpu=1gb | ||
+ | |||
+ | module purge | ||
+ | ml gcc/8.2.0 openmpi/4.0.1 gromacs/2019.2 | ||
− | + | export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK | |
+ | srun --mpi=pmix_v3 gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK} -s topol.tpr | ||
+ | </pre> | ||
− | |||
− | |||
<!-- | <!-- | ||
==GPU Acceleration (Single GPU, Gromacs 4.6.5)== | ==GPU Acceleration (Single GPU, Gromacs 4.6.5)== |
Revision as of 14:29, 28 July 2021
- Note
- Use 'module spider gromacs' to find available gromacs versions. The module loads below may be outdated.
Parallel (MPI)
#!/bin/bash #SBATCH --job-name=gromacs #SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE #SBATCH --mail-type=FAIL,BEGIN,END #SBATCH --output=gmx-%j.out #SBATCH --ntasks=2 #SBATCH --cpus-per-task=4 #SBATCH --ntasks-per-socket=1 #SBATCH --distribution=cyclic:block #SBATCH --time=24:00:00 #SBATCH --mem-per-cpu=1gb module purge ml gcc/8.2.0 openmpi/4.0.1 gromacs/2019.2 export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK srun --mpi=pmix_v3 gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK} -s topol.tpr
GPU Acceleration (MPI + multiple GPUs)
#!/bin/bash
#SBATCH --job-name=multi-gpu
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE
#SBATCH --mail-type=FAIL,BEGIN,END
#SBATCH --output=stdout
#SBATCH --error=stderr
#SBATCH --nodes=1
#SBATCH --ntasks=2
#SBATCH --tasks-per-node=2
#SBATCH --cpus-per-task=7
#SBATCH --ntasks-per-socket=1
#SBATCH --distribution=cyclic:block
#SBATCH --time=2:00:00
#SBATCH --mem-per-cpu=1gb
#SBATCH --partition=hpg2-gpu
#SBATCH --gres=gpu:tesla:2
module load gcc/5.2.0 openmpi/1.10.2 gromacs/2016.3-CUDA
GMX=gmx
echo Host = `hostname`
echo Start = `date`
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK
srun --mpi=pmi2 --accel-bind=g --ntasks=$SLURM_NTASKS $GMX mdrun -v
echo Finish = `date`