Difference between revisions of "ORCA Job Scripts"

From UFRC
Jump to navigation Jump to search
Line 1: Line 1:
!/bin/bash
+
!/bin/bash
#SBATCH --job-name=parallel_job        # Job name
+
#SBATCH --job-name=parallel_job        # Job name
#SBATCH --mail-type=END,FAIL        # Mail events (NONE, BEGIN, END, FAIL, ALL)
+
#SBATCH --mail-type=END,FAIL        # Mail events (NONE, BEGIN, END, FAIL, ALL)
#SBATCH --mail-user=usename@ufl.edu  # Where to send mail
+
#SBATCH --mail-user=usename@ufl.edu  # Where to send mail
#SBATCH --nodes=1                    # Run all processes on a single node
+
#SBATCH --nodes=1                    # Run all processes on a single node
 
#SBATCH --ntasks=2                  # Run on 2 processors
 
#SBATCH --ntasks=2                  # Run on 2 processors
 
#SBATCH --ntasks-per-node=2          # Maximum number of tasks on each node
 
#SBATCH --ntasks-per-node=2          # Maximum number of tasks on each node

Revision as of 22:48, 31 March 2024

!/bin/bash
#SBATCH --job-name=parallel_job        # Job name
#SBATCH --mail-type=END,FAIL         # Mail events (NONE, BEGIN, END, FAIL, ALL)
#SBATCH --mail-user=usename@ufl.edu  # Where to send mail
#SBATCH --nodes=1                    # Run all processes on a single node
  1. SBATCH --ntasks=2 # Run on 2 processors
  2. SBATCH --ntasks-per-node=2 # Maximum number of tasks on each node
  3. SBATCH --mem-per-cpu=500mb # Memory per processor
  4. SBATCH --time=00:05:00 # Time limit hrs:min:sec
  5. SBATCH --output=parallel_%j.log # Standard output and error log

pwd; hostname; date

echo "Running orca test calculation on a with four CPU cores" echo "Date = $(date)" echo "Hostname = $(hostname -s)" echo "Working Directory = $(pwd)" echo "" echo "Number of Nodes Allocated = $SLURM_JOB_NUM_NODES" echo "Number of Tasks Allocated = $SLURM_NTASKS" echo "Number of Cores/Task Allocated = $SLURM_CPUS_PER_TASK" echo ""

module load gcc/12.2.0 openmpi/4.1.1 orca/5.0.1

which mpirun; echo $PATH; echo $LD_LIBRARY_PATH

srun --mpi=pmix_v3 /blue/ax/orca/504/orca ./Inputs/h2o-pal3.inp > h2o-pal12.out

date

Disclaimer: Above slurm configuration is hypothetical. The user must customize it based on the size of the calculation, available resources etc.