Difference between revisions of "ORCA Job Scripts"

From UFRC
Jump to navigation Jump to search
Line 1: Line 1:
 
  !/bin/bash
 
  !/bin/bash
  #SBATCH --job-name=parallel_job       # Job name
+
  #SBATCH --job-name=parallel_job     # Job name
 
  #SBATCH --mail-type=END,FAIL        # Mail events (NONE, BEGIN, END, FAIL, ALL)
 
  #SBATCH --mail-type=END,FAIL        # Mail events (NONE, BEGIN, END, FAIL, ALL)
 
  #SBATCH --mail-user=usename@ufl.edu  # Where to send mail
 
  #SBATCH --mail-user=usename@ufl.edu  # Where to send mail
 
  #SBATCH --nodes=1                    # Run all processes on a single node
 
  #SBATCH --nodes=1                    # Run all processes on a single node
#SBATCH --ntasks=2                  # Run on 2 processors
+
#SBATCH --ntasks=2                  # Run on 2 processors
#SBATCH --ntasks-per-node=2          # Maximum number of tasks on each node
+
#SBATCH --ntasks-per-node=2          # Maximum number of tasks on each node
#SBATCH --mem-per-cpu=500mb          # Memory per processor
+
#SBATCH --mem-per-cpu=500mb          # Memory per processor
#SBATCH --time=00:05:00              # Time limit hrs:min:sec
+
#SBATCH --time=00:05:00              # Time limit hrs:min:sec
#SBATCH --output=parallel_%j.log    # Standard output and error log
+
#SBATCH --output=parallel_%j.log    # Standard output and error log
pwd; hostname; date
+
pwd; hostname; date
  
echo "Running orca test calculation on a with four CPU cores"
+
echo "Running orca test calculation on a with four CPU cores"
echo "Date              = $(date)"
 
echo "Hostname          = $(hostname -s)"
 
echo "Working Directory = $(pwd)"
 
echo ""
 
echo "Number of Nodes Allocated      = $SLURM_JOB_NUM_NODES"
 
echo "Number of Tasks Allocated      = $SLURM_NTASKS"
 
echo "Number of Cores/Task Allocated = $SLURM_CPUS_PER_TASK"
 
echo ""
 
  
module load gcc/12.2.0 openmpi/4.1.1 orca/5.0.1
+
echo "Date              = $(date)"
  
which mpirun; echo $PATH; echo $LD_LIBRARY_PATH
+
echo "Hostname          = $(hostname -s)"
  
srun --mpi=pmix_v3 /blue/ax/orca/504/orca ./Inputs/h2o-pal3.inp > h2o-pal12.out
+
echo "Working Directory = $(pwd)"
 +
echo ""
 +
echo "Number of Nodes Allocated      = $SLURM_JOB_NUM_NODES"
 +
echo "Number of Tasks Allocated      = $SLURM_NTASKS"
 +
echo "Number of Cores/Task Allocated = $SLURM_CPUS_PER_TASK"
 +
echo ""
  
date
+
module load gcc/12.2.0 openmpi/4.1.1 orca/5.0.1
  
Disclaimer: Above slurm configuration is hypothetical. The user must customize it based on the size of the calculation, available resources etc.
+
which mpirun; echo $PATH; echo $LD_LIBRARY_PATH
 +
 
 +
srun --mpi=pmix_v3 orca h2o.inp > h2o.out
 +
 
 +
date
 +
 
 +
Disclaimer: The above slurm configuration is hypothetical. The user must customize it based on the size of the calculation, available resources etc.

Revision as of 22:50, 31 March 2024

!/bin/bash
#SBATCH --job-name=parallel_job      # Job name
#SBATCH --mail-type=END,FAIL         # Mail events (NONE, BEGIN, END, FAIL, ALL)
#SBATCH --mail-user=usename@ufl.edu  # Where to send mail
#SBATCH --nodes=1                    # Run all processes on a single node
#SBATCH --ntasks=2                   # Run on 2 processors
#SBATCH --ntasks-per-node=2          # Maximum number of tasks on each node
#SBATCH --mem-per-cpu=500mb          # Memory per processor
#SBATCH --time=00:05:00              # Time limit hrs:min:sec
#SBATCH --output=parallel_%j.log     # Standard output and error log
pwd; hostname; date
echo "Running orca test calculation on a with four CPU cores"
echo "Date              = $(date)"
echo "Hostname          = $(hostname -s)"
echo "Working Directory = $(pwd)"
echo ""
echo "Number of Nodes Allocated      = $SLURM_JOB_NUM_NODES"
echo "Number of Tasks Allocated      = $SLURM_NTASKS"
echo "Number of Cores/Task Allocated = $SLURM_CPUS_PER_TASK"
echo ""
module load gcc/12.2.0 openmpi/4.1.1 orca/5.0.1
which mpirun; echo $PATH; echo $LD_LIBRARY_PATH
srun --mpi=pmix_v3 orca h2o.inp > h2o.out
date
Disclaimer: The above slurm configuration is hypothetical. The user must customize it based on the size of the calculation, available resources etc.