Difference between revisions of "Gromacs Job Scripts"
Jump to navigation
Jump to search
Moskalenko (talk | contribs) |
|||
(One intermediate revision by the same user not shown) | |||
Line 1: | Line 1: | ||
[[Category:Software]] | [[Category:Software]] | ||
+ | ;Note: Use 'module spider gromacs' to find available gromacs versions. The module loads below may be outdated. | ||
[[Gromacs|back to the Gromacs page]] | [[Gromacs|back to the Gromacs page]] | ||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
==Parallel (MPI)== | ==Parallel (MPI)== | ||
− | < | + | <pre> |
#!/bin/bash | #!/bin/bash | ||
− | #SBATCH -- | + | #SBATCH --job-name=gromacs |
− | |||
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE | #SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE | ||
#SBATCH --mail-type=FAIL,BEGIN,END | #SBATCH --mail-type=FAIL,BEGIN,END | ||
− | #SBATCH --output= | + | #SBATCH --output=gmx-%j.out |
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
#SBATCH --ntasks=2 | #SBATCH --ntasks=2 | ||
− | #SBATCH -- | + | #SBATCH --cpus-per-task=4 |
+ | #SBATCH --ntasks-per-socket=1 | ||
+ | #SBATCH --distribution=cyclic:block | ||
+ | #SBATCH --time=24:00:00 | ||
+ | #SBATCH --mem-per-cpu=1gb | ||
− | module | + | module purge |
+ | ml gcc/8.2.0 openmpi/4.0.1 gromacs/2019.2 | ||
− | + | export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK | |
− | + | srun --mpi=pmix_v3 gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK} -s topol.tpr | |
− | + | </pre> | |
− | export OMP_NUM_THREADS=$ | ||
− | |||
− | |||
− | |||
− | |||
− | |||
− | mdrun - | ||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | |||
− | </ | ||
− | |||
==GPU Acceleration (MPI + multiple GPUs)== | ==GPU Acceleration (MPI + multiple GPUs)== | ||
− | < | + | <pre> |
#!/bin/bash | #!/bin/bash | ||
#SBATCH --job-name=multi-gpu | #SBATCH --job-name=multi-gpu | ||
#SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE | #SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE | ||
#SBATCH --mail-type=FAIL,BEGIN,END | #SBATCH --mail-type=FAIL,BEGIN,END | ||
− | #SBATCH --output= | + | #SBATCH --output=gromacs_%j.log |
− | |||
#SBATCH --nodes=1 | #SBATCH --nodes=1 | ||
#SBATCH --ntasks=2 | #SBATCH --ntasks=2 | ||
Line 119: | Line 43: | ||
module load gcc/5.2.0 openmpi/1.10.2 gromacs/2016.3-CUDA | module load gcc/5.2.0 openmpi/1.10.2 gromacs/2016.3-CUDA | ||
− | |||
− | |||
− | |||
− | |||
export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK | export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK | ||
− | srun --mpi=pmi2 --accel-bind=g --ntasks=$SLURM_NTASKS | + | srun --mpi=pmi2 --accel-bind=g --ntasks=$SLURM_NTASKS gmx mdrun -v |
− | |||
− | </ | + | </pre> |
Latest revision as of 14:31, 28 July 2021
- Note
- Use 'module spider gromacs' to find available gromacs versions. The module loads below may be outdated.
Parallel (MPI)
#!/bin/bash #SBATCH --job-name=gromacs #SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE #SBATCH --mail-type=FAIL,BEGIN,END #SBATCH --output=gmx-%j.out #SBATCH --ntasks=2 #SBATCH --cpus-per-task=4 #SBATCH --ntasks-per-socket=1 #SBATCH --distribution=cyclic:block #SBATCH --time=24:00:00 #SBATCH --mem-per-cpu=1gb module purge ml gcc/8.2.0 openmpi/4.0.1 gromacs/2019.2 export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK srun --mpi=pmix_v3 gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK} -s topol.tpr
GPU Acceleration (MPI + multiple GPUs)
#!/bin/bash #SBATCH --job-name=multi-gpu #SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE #SBATCH --mail-type=FAIL,BEGIN,END #SBATCH --output=gromacs_%j.log #SBATCH --nodes=1 #SBATCH --ntasks=2 #SBATCH --tasks-per-node=2 #SBATCH --cpus-per-task=7 #SBATCH --ntasks-per-socket=1 #SBATCH --distribution=cyclic:block #SBATCH --time=2:00:00 #SBATCH --mem-per-cpu=1gb #SBATCH --partition=hpg2-gpu #SBATCH --gres=gpu:tesla:2 module load gcc/5.2.0 openmpi/1.10.2 gromacs/2016.3-CUDA export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK srun --mpi=pmi2 --accel-bind=g --ntasks=$SLURM_NTASKS gmx mdrun -v