Difference between revisions of "Vasp Job Scripts"
Jump to navigation
Jump to search
(One intermediate revision by one other user not shown) | |||
Line 1: | Line 1: | ||
Sample slurm script for parallel (CPU only) vasp calculation: | Sample slurm script for parallel (CPU only) vasp calculation: | ||
------------------------------------------------------------- | ------------------------------------------------------------- | ||
− | #!/bin/bash | + | #!/bin/bash |
− | #SBATCH --job-name=vasp_job | + | #SBATCH --job-name=vasp_job |
− | #SBATCH --output=stdout | + | #SBATCH --output=stdout |
− | #SBATCH --error=stderr | + | #SBATCH --error=stderr |
− | #SBATCH --ntasks=64 | + | #SBATCH --ntasks=64 |
− | #SBATCH --cpus-per-task=1 | + | #SBATCH --cpus-per-task=1 |
− | #SBATCH --ntasks-per-socket=8 | + | #SBATCH --ntasks-per-socket=8 |
− | #SBATCH --distribution=cyclic:cyclic | + | #SBATCH --distribution=cyclic:cyclic |
− | #SBATCH --time=72:00:00 | + | #SBATCH --time=72:00:00 |
− | #SBATCH --mem-per-cpu=3600mb | + | #SBATCH --mem-per-cpu=3600mb |
− | #SBATCH --exclusive | + | #SBATCH --exclusive |
− | #SBATCH --mail-type=ALL | + | #SBATCH --mail-type=ALL |
− | module load intel/2020 | + | module load intel/2020 |
− | module load openmpi/4.1.5 | + | module load openmpi/4.1.5 |
− | module load vasp/6.4.1 | + | module load vasp/6.4.1 |
− | VASP=vasp_std | + | VASP=vasp_std |
− | export OMP_NUM_THREADS=1 | + | export OMP_NUM_THREADS=1 |
− | echo "VASP = $VASP" | + | echo "VASP = $VASP" |
− | echo "BINARY = $(which $VASP)" | + | echo "BINARY = $(which $VASP)" |
− | echo Host = `hostname` | + | echo Host = `hostname` |
− | echo Start = `date` | + | echo Start = `date` |
− | srun --mpi=pmix_v3 $VASP > vasp.log 2>&1 | + | srun --mpi=pmix_v3 $VASP > vasp.log 2>&1 |
Sample slurm script for parallel with GPU support (CPU/GPU) vasp calculation | Sample slurm script for parallel with GPU support (CPU/GPU) vasp calculation | ||
----------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ||
− | #!/bin/bash | + | #!/bin/bash |
− | #SBATCH --job-name=vasp_job | + | #SBATCH --job-name=vasp_job |
− | #SBATCH --output=stdout | + | #SBATCH --output=stdout |
− | #SBATCH --error=stderr | + | #SBATCH --error=stderr |
− | #SBATCH --partition=gpu | + | #SBATCH --partition=gpu |
− | #SBATCH --gpus=a100:10 | + | #SBATCH --gpus=a100:10 |
− | + | #SBATCH --nodes=10 | |
− | #SBATCH --ntasks=320 | + | #SBATCH --ntasks=320 |
− | #SBATCH --cpus-per-task=1 | + | #SBATCH --cpus-per-task=1 |
− | #SBATCH --ntasks-per-socket=4 | + | #SBATCH --ntasks-per-socket=4 |
− | #SBATCH --ntasks-per-node=32 | + | #SBATCH --ntasks-per-node=32 |
− | #SBATCH --distribution=cyclic:cyclic | + | #SBATCH --distribution=cyclic:cyclic |
− | #SBATCH --time=72:00:00 | + | #SBATCH --time=72:00:00 |
− | #SBATCH --mem-per-cpu=50gb | + | #SBATCH --mem-per-cpu=50gb |
− | #SBATCH --exclusive | + | #SBATCH --exclusive |
− | #SBATCH --mail-type=ALL | + | #SBATCH --mail-type=ALL |
− | module load nvhpc/23.7 | + | module load nvhpc/23.7 |
− | module load openmpi/4.1.5 | + | module load openmpi/4.1.5 |
− | module load cuda/12.2.2 | + | module load cuda/12.2.2 |
− | VASP=vasp_std | + | VASP=vasp_std |
− | export OMP_NUM_THREADS=1 | + | export OMP_NUM_THREADS=1 |
− | echo "VASP = $VASP" | + | echo "VASP = $VASP" |
− | echo "BINARY = $(which $VASP)" | + | echo "BINARY = $(which $VASP)" |
− | echo Host = `hostname` | + | echo Host = `hostname` |
− | echo Start = `date` | + | echo Start = `date` |
− | srun --mpi=pmix_v3 $VASP > vasp.log 2>&1 | + | srun --mpi=pmix_v3 $VASP > vasp.log 2>&1 |
+ | |||
+ | Disclaimer: The above slurm configurations are hypothetical. The user must customize it based on the size of the calculation, available resources etc. |
Latest revision as of 17:45, 2 April 2024
Sample slurm script for parallel (CPU only) vasp calculation:
#!/bin/bash #SBATCH --job-name=vasp_job #SBATCH --output=stdout #SBATCH --error=stderr #SBATCH --ntasks=64 #SBATCH --cpus-per-task=1 #SBATCH --ntasks-per-socket=8 #SBATCH --distribution=cyclic:cyclic #SBATCH --time=72:00:00 #SBATCH --mem-per-cpu=3600mb #SBATCH --exclusive #SBATCH --mail-type=ALL module load intel/2020 module load openmpi/4.1.5 module load vasp/6.4.1 VASP=vasp_std export OMP_NUM_THREADS=1 echo "VASP = $VASP" echo "BINARY = $(which $VASP)" echo Host = `hostname` echo Start = `date` srun --mpi=pmix_v3 $VASP > vasp.log 2>&1
Sample slurm script for parallel with GPU support (CPU/GPU) vasp calculation
#!/bin/bash #SBATCH --job-name=vasp_job #SBATCH --output=stdout #SBATCH --error=stderr #SBATCH --partition=gpu #SBATCH --gpus=a100:10 #SBATCH --nodes=10 #SBATCH --ntasks=320 #SBATCH --cpus-per-task=1 #SBATCH --ntasks-per-socket=4 #SBATCH --ntasks-per-node=32 #SBATCH --distribution=cyclic:cyclic #SBATCH --time=72:00:00 #SBATCH --mem-per-cpu=50gb #SBATCH --exclusive #SBATCH --mail-type=ALL module load nvhpc/23.7 module load openmpi/4.1.5 module load cuda/12.2.2 VASP=vasp_std export OMP_NUM_THREADS=1 echo "VASP = $VASP" echo "BINARY = $(which $VASP)" echo Host = `hostname` echo Start = `date` srun --mpi=pmix_v3 $VASP > vasp.log 2>&1
Disclaimer: The above slurm configurations are hypothetical. The user must customize it based on the size of the calculation, available resources etc.