Difference between revisions of "ORCA Job Scripts"

From UFRC
Jump to navigation Jump to search
Line 24: Line 24:
 
  echo ""
 
  echo ""
  
  module load gcc/12.2.0 openmpi/4.1.1 orca/5.0.1
+
  module load gcc/12.2.0 openmpi/4.1.1 orca/5.0.4
  
 
  which mpirun; echo $PATH; echo $LD_LIBRARY_PATH
 
  which mpirun; echo $PATH; echo $LD_LIBRARY_PATH
 
+
export ORCA_DIR=/apps/gcc/12.2.0/openmpi/4.1.1/orca/5.0.4
  srun --mpi=pmix_v3 orca h2o.inp > h2o.out
+
  $ORCA_DIR/orca job.inp > job.ou
 
 
 
  date
 
  date
  
 
  Disclaimer: The above slurm configuration is hypothetical. The user must customize it based on the size of the calculation, available resources etc.
 
  Disclaimer: The above slurm configuration is hypothetical. The user must customize it based on the size of the calculation, available resources etc.

Revision as of 21:18, 20 June 2024

!/bin/bash
#SBATCH --job-name=parallel_job      # Job name
#SBATCH --mail-type=END,FAIL         # Mail events (NONE, BEGIN, END, FAIL, ALL)
#SBATCH --mail-user=usename@ufl.edu  # Where to send mail
#SBATCH --nodes=1                    # Run all processes on a single node
#SBATCH --ntasks=2                   # Run on 2 processors
#SBATCH --ntasks-per-node=2          # Maximum number of tasks on each node
#SBATCH --mem-per-cpu=500mb          # Memory per processor
#SBATCH --time=00:05:00              # Time limit hrs:min:sec
#SBATCH --output=parallel_%j.log     # Standard output and error log
pwd; hostname; date
echo "Running orca test calculation on a with four CPU cores"
echo "Date              = $(date)"
echo "Hostname          = $(hostname -s)"
echo "Working Directory = $(pwd)"
echo ""
echo "Number of Nodes Allocated      = $SLURM_JOB_NUM_NODES"
echo "Number of Tasks Allocated      = $SLURM_NTASKS"
echo "Number of Cores/Task Allocated = $SLURM_CPUS_PER_TASK"
echo ""
module load gcc/12.2.0 openmpi/4.1.1 orca/5.0.4
which mpirun; echo $PATH; echo $LD_LIBRARY_PATH
export ORCA_DIR=/apps/gcc/12.2.0/openmpi/4.1.1/orca/5.0.4
$ORCA_DIR/orca job.inp  > job.ou
date
Disclaimer: The above slurm configuration is hypothetical. The user must customize it based on the size of the calculation, available resources etc.