Difference between revisions of "Gromacs Job Scripts"
Jump to navigation
Jump to search
(Created page with "==Serial== <source lang=bash> #!/bin/bash # #PBS -r n #PBS -N Gromacs #PBS -o stdout #PBS -e stderr #PBS -j oe #PBS -m abe #PBS -M YOUR_MAIL_ADDRESS_HERE #PBS -l nodes=1:ppn=1...") |
|||
(16 intermediate revisions by 4 users not shown) | |||
Line 1: | Line 1: | ||
− | == | + | [[Category:Software]] |
− | < | + | ;Note: Use 'module spider gromacs' to find available gromacs versions. The module loads below may be outdated. |
+ | [[Gromacs|back to the Gromacs page]] | ||
+ | ==Parallel (MPI)== | ||
+ | <pre> | ||
#!/bin/bash | #!/bin/bash | ||
− | # | + | #SBATCH --job-name=gromacs |
− | # | + | #SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE |
− | # | + | #SBATCH --mail-type=FAIL,BEGIN,END |
− | # | + | #SBATCH --output=gmx-%j.out |
− | + | #SBATCH --ntasks=2 | |
− | + | #SBATCH --cpus-per-task=4 | |
− | # | + | #SBATCH --ntasks-per-socket=1 |
− | # | + | #SBATCH --distribution=cyclic:block |
− | # | + | #SBATCH --time=24:00:00 |
− | # | + | #SBATCH --mem-per-cpu=1gb |
− | # | ||
− | module | + | module purge |
− | + | ml gcc/8.2.0 openmpi/4.0.1 gromacs/2019.2 | |
− | + | export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK | |
+ | srun --mpi=pmix_v3 gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK} -s topol.tpr | ||
+ | </pre> | ||
− | + | ==GPU Acceleration (MPI + multiple GPUs)== | |
− | + | <pre> | |
− | |||
− | == | ||
− | < | ||
#!/bin/bash | #!/bin/bash | ||
− | # | + | #SBATCH --job-name=multi-gpu |
− | # | + | #SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE |
− | # | + | #SBATCH --mail-type=FAIL,BEGIN,END |
− | # | + | #SBATCH --output=gromacs_%j.log |
− | # | + | #SBATCH --nodes=1 |
− | # | + | #SBATCH --ntasks=2 |
− | # | + | #SBATCH --tasks-per-node=2 |
− | # | + | #SBATCH --cpus-per-task=7 |
− | # | + | #SBATCH --ntasks-per-socket=1 |
− | # | + | #SBATCH --distribution=cyclic:block |
− | # | + | #SBATCH --time=2:00:00 |
+ | #SBATCH --mem-per-cpu=1gb | ||
+ | #SBATCH --partition=hpg2-gpu | ||
+ | #SBATCH --gres=gpu:a100:2 | ||
− | module load | + | module load gcc/5.2.0 openmpi/1.10.2 gromacs/2016.3-CUDA |
− | |||
− | |||
− | + | export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK | |
+ | srun --mpi=pmi2 --accel-bind=g --ntasks=$SLURM_NTASKS gmx mdrun -v | ||
− | + | </pre> | |
− | </ |
Latest revision as of 14:40, 20 July 2022
- Note
- Use 'module spider gromacs' to find available gromacs versions. The module loads below may be outdated.
Parallel (MPI)
#!/bin/bash #SBATCH --job-name=gromacs #SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE #SBATCH --mail-type=FAIL,BEGIN,END #SBATCH --output=gmx-%j.out #SBATCH --ntasks=2 #SBATCH --cpus-per-task=4 #SBATCH --ntasks-per-socket=1 #SBATCH --distribution=cyclic:block #SBATCH --time=24:00:00 #SBATCH --mem-per-cpu=1gb module purge ml gcc/8.2.0 openmpi/4.0.1 gromacs/2019.2 export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK srun --mpi=pmix_v3 gmx mdrun -ntomp ${SLURM_CPUS_PER_TASK} -s topol.tpr
GPU Acceleration (MPI + multiple GPUs)
#!/bin/bash #SBATCH --job-name=multi-gpu #SBATCH --mail-user=YOUR_MAIL_ADDRESS_HERE #SBATCH --mail-type=FAIL,BEGIN,END #SBATCH --output=gromacs_%j.log #SBATCH --nodes=1 #SBATCH --ntasks=2 #SBATCH --tasks-per-node=2 #SBATCH --cpus-per-task=7 #SBATCH --ntasks-per-socket=1 #SBATCH --distribution=cyclic:block #SBATCH --time=2:00:00 #SBATCH --mem-per-cpu=1gb #SBATCH --partition=hpg2-gpu #SBATCH --gres=gpu:a100:2 module load gcc/5.2.0 openmpi/1.10.2 gromacs/2016.3-CUDA export OMP_NUM_THREADS=$SLURM_CPUS_PER_TASK srun --mpi=pmi2 --accel-bind=g --ntasks=$SLURM_NTASKS gmx mdrun -v