Difference between revisions of "Vasp Job Scripts"
Jump to navigation
Jump to search
(Created page with "Sample slurm script for parallel (CPU only) vasp calculation is as follows. #!/bin/bash #SBATCH --job-name=vasp_job #SBATCH --output=stdout #SBATCH --error=stderr #SBATCH --n...") |
|||
(3 intermediate revisions by 2 users not shown) | |||
Line 1: | Line 1: | ||
− | Sample slurm script for parallel (CPU only) vasp calculation | + | Sample slurm script for parallel (CPU only) vasp calculation: |
− | #!/bin/bash | + | ------------------------------------------------------------- |
− | #SBATCH --job-name=vasp_job | + | #!/bin/bash |
− | #SBATCH --output=stdout | + | #SBATCH --job-name=vasp_job |
− | #SBATCH --error=stderr | + | #SBATCH --output=stdout |
− | #SBATCH --ntasks=64 | + | #SBATCH --error=stderr |
− | #SBATCH --cpus-per-task=1 | + | #SBATCH --ntasks=64 |
− | #SBATCH --ntasks-per-socket=8 | + | #SBATCH --cpus-per-task=1 |
− | #SBATCH --distribution=cyclic:cyclic | + | #SBATCH --ntasks-per-socket=8 |
− | #SBATCH --time=72:00:00 | + | #SBATCH --distribution=cyclic:cyclic |
− | #SBATCH --mem-per-cpu=3600mb | + | #SBATCH --time=72:00:00 |
− | #SBATCH --exclusive | + | #SBATCH --mem-per-cpu=3600mb |
− | #SBATCH --mail-type=ALL | + | #SBATCH --exclusive |
+ | #SBATCH --mail-type=ALL | ||
− | module load intel/2020 | + | module load intel/2020 |
− | module load openmpi/4.1.5 | + | module load openmpi/4.1.5 |
− | module load vasp/6.4.1 | + | module load vasp/6.4.1 |
− | VASP=vasp_std | + | VASP=vasp_std |
− | export OMP_NUM_THREADS=1 | + | export OMP_NUM_THREADS=1 |
− | echo "VASP = $VASP" | + | echo "VASP = $VASP" |
− | echo "BINARY = $(which $VASP)" | + | echo "BINARY = $(which $VASP)" |
− | echo Host = `hostname` | + | echo Host = `hostname` |
− | echo Start = `date` | + | echo Start = `date` |
− | srun --mpi=pmix_v3 $VASP > vasp.log 2>&1 | + | srun --mpi=pmix_v3 $VASP > vasp.log 2>&1 |
+ | |||
+ | Sample slurm script for parallel with GPU support (CPU/GPU) vasp calculation | ||
+ | ----------------------------------------------------------------------------- | ||
+ | #!/bin/bash | ||
+ | #SBATCH --job-name=vasp_job | ||
+ | #SBATCH --output=stdout | ||
+ | #SBATCH --error=stderr | ||
+ | #SBATCH --partition=gpu | ||
+ | #SBATCH --gpus=a100:10 | ||
+ | #SBATCH --nodes=10 | ||
+ | #SBATCH --ntasks=320 | ||
+ | #SBATCH --cpus-per-task=1 | ||
+ | #SBATCH --ntasks-per-socket=4 | ||
+ | #SBATCH --ntasks-per-node=32 | ||
+ | #SBATCH --distribution=cyclic:cyclic | ||
+ | #SBATCH --time=72:00:00 | ||
+ | #SBATCH --mem-per-cpu=50gb | ||
+ | #SBATCH --exclusive | ||
+ | #SBATCH --mail-type=ALL | ||
+ | |||
+ | module load nvhpc/23.7 | ||
+ | module load openmpi/4.1.5 | ||
+ | module load cuda/12.2.2 | ||
+ | |||
+ | VASP=vasp_std | ||
+ | |||
+ | export OMP_NUM_THREADS=1 | ||
+ | echo "VASP = $VASP" | ||
+ | echo "BINARY = $(which $VASP)" | ||
+ | |||
+ | echo Host = `hostname` | ||
+ | echo Start = `date` | ||
+ | srun --mpi=pmix_v3 $VASP > vasp.log 2>&1 | ||
+ | |||
+ | Disclaimer: The above slurm configurations are hypothetical. The user must customize it based on the size of the calculation, available resources etc. |
Latest revision as of 17:45, 2 April 2024
Sample slurm script for parallel (CPU only) vasp calculation:
#!/bin/bash #SBATCH --job-name=vasp_job #SBATCH --output=stdout #SBATCH --error=stderr #SBATCH --ntasks=64 #SBATCH --cpus-per-task=1 #SBATCH --ntasks-per-socket=8 #SBATCH --distribution=cyclic:cyclic #SBATCH --time=72:00:00 #SBATCH --mem-per-cpu=3600mb #SBATCH --exclusive #SBATCH --mail-type=ALL module load intel/2020 module load openmpi/4.1.5 module load vasp/6.4.1 VASP=vasp_std export OMP_NUM_THREADS=1 echo "VASP = $VASP" echo "BINARY = $(which $VASP)" echo Host = `hostname` echo Start = `date` srun --mpi=pmix_v3 $VASP > vasp.log 2>&1
Sample slurm script for parallel with GPU support (CPU/GPU) vasp calculation
#!/bin/bash #SBATCH --job-name=vasp_job #SBATCH --output=stdout #SBATCH --error=stderr #SBATCH --partition=gpu #SBATCH --gpus=a100:10 #SBATCH --nodes=10 #SBATCH --ntasks=320 #SBATCH --cpus-per-task=1 #SBATCH --ntasks-per-socket=4 #SBATCH --ntasks-per-node=32 #SBATCH --distribution=cyclic:cyclic #SBATCH --time=72:00:00 #SBATCH --mem-per-cpu=50gb #SBATCH --exclusive #SBATCH --mail-type=ALL module load nvhpc/23.7 module load openmpi/4.1.5 module load cuda/12.2.2 VASP=vasp_std export OMP_NUM_THREADS=1 echo "VASP = $VASP" echo "BINARY = $(which $VASP)" echo Host = `hostname` echo Start = `date` srun --mpi=pmix_v3 $VASP > vasp.log 2>&1
Disclaimer: The above slurm configurations are hypothetical. The user must customize it based on the size of the calculation, available resources etc.