Difference between revisions of "ORCA Job Scripts"

From UFRC
Jump to navigation Jump to search
 
(12 intermediate revisions by the same user not shown)
Line 1: Line 1:
 +
The input file must contain the parallel configuration section.
 +
 +
  %pal nprocs n
 +
  end
 +
  where n is the number of processors requested. The number of processors must match with the total number
 +
  of tasks requested in the slurm configuration.
 +
 +
Orca on a single node, with multiple cores:
 +
===========================================
 +
!/bin/bash
 +
#SBATCH --job-name=parallel_job      # Job name
 +
#SBATCH --mail-type=END,FAIL        # Mail events (NONE, BEGIN, END, FAIL, ALL)
 +
#SBATCH --mail-user=usename@ufl.edu  # Where to send mail
 +
#SBATCH --nodes=1                    # Run all tsks on a single node
 +
#SBATCH --ntasks=2                  # The total number of tasks
 +
#SBATCH --cpus-per-task=1            #
 +
#SBATCH --mem-per-cpu=500mb          # Memory per processor
 +
#SBATCH --time=00:05:00              # Time limit hrs:min:sec
 +
#SBATCH --output=parallel_%j.log    # Standard output and error log
 +
pwd; hostname; date
 +
 +
echo "Running orca test calculation on a with four CPU cores"
 +
echo "Date              = $(date)"
 +
echo "Hostname          = $(hostname -s)"
 +
 +
echo "Working Directory = $(pwd)"
 +
echo ""
 +
echo "Number of Nodes Allocated      = $SLURM_JOB_NUM_NODES"
 +
echo "Number of Tasks Allocated      = $SLURM_NTASKS"
 +
echo "Number of Cores/Task Allocated = $SLURM_CPUS_PER_TASK"
 +
echo ""
 +
 +
module load gcc/12.2.0 openmpi/4.1.1 orca/5.0.4
 +
 +
which mpirun; echo $PATH; echo $LD_LIBRARY_PATH
 +
export ORCA_DIR=/apps/gcc/12.2.0/openmpi/4.1.1/orca/5.0.4
 +
$ORCA_DIR/orca job.inp  > job.ou
 +
date
 +
 +
Orca on multiple nodes, with multiple cores:
 +
===========================================
 
  !/bin/bash
 
  !/bin/bash
  #SBATCH --job-name=parallel_job       # Job name
+
  #SBATCH --job-name=parallel_job     # Job name
 
  #SBATCH --mail-type=END,FAIL        # Mail events (NONE, BEGIN, END, FAIL, ALL)
 
  #SBATCH --mail-type=END,FAIL        # Mail events (NONE, BEGIN, END, FAIL, ALL)
 
  #SBATCH --mail-user=usename@ufl.edu  # Where to send mail
 
  #SBATCH --mail-user=usename@ufl.edu  # Where to send mail
  #SBATCH --nodes=1                   # Run all processes on a single node
+
  #SBATCH --nodes=2                   # The number of nodes             
#SBATCH --ntasks=2                  # Run on 2 processors
+
#SBATCH --ntasks-per-node=8          # Maximum number of tasks on each node
#SBATCH --ntasks-per-node=2          # Maximum number of tasks on each node
+
#SBATCH --cpus-per-task=1            # number of CPUs per task.
#SBATCH --mem-per-cpu=500mb          # Memory per processor
+
#SBATCH --mem-per-cpu=500mb          # Memory per processor
#SBATCH --time=00:05:00              # Time limit hrs:min:sec
+
#SBATCH --time=00:05:00              # Time limit hrs:min:sec
#SBATCH --output=parallel_%j.log    # Standard output and error log
+
#SBATCH --output=parallel_%j.log    # Standard output and error log
pwd; hostname; date
+
pwd; hostname; date
  
echo "Running orca test calculation on a with four CPU cores"
+
echo "Running orca test calculation on a with four CPU cores"
echo "Date              = $(date)"
+
echo "Date              = $(date)"
echo "Hostname          = $(hostname -s)"
+
echo "Hostname          = $(hostname -s)"
echo "Working Directory = $(pwd)"
 
echo ""
 
echo "Number of Nodes Allocated      = $SLURM_JOB_NUM_NODES"
 
echo "Number of Tasks Allocated      = $SLURM_NTASKS"
 
echo "Number of Cores/Task Allocated = $SLURM_CPUS_PER_TASK"
 
echo ""
 
  
module load gcc/12.2.0 openmpi/4.1.1 orca/5.0.1
+
echo "Working Directory = $(pwd)"
 +
echo ""
 +
echo "Number of Nodes Allocated      = $SLURM_JOB_NUM_NODES"
 +
echo "Number of Tasks Allocated      = $SLURM_NTASKS"
 +
echo "Number of Cores/Task Allocated = $SLURM_CPUS_PER_TASK"
 +
echo ""
  
which mpirun; echo $PATH; echo $LD_LIBRARY_PATH
+
module load gcc/12.2.0 openmpi/4.1.1 orca/5.0.4
  
srun --mpi=pmix_v3 /blue/ax/orca/504/orca ./Inputs/h2o-pal3.inp > h2o-pal12.out
+
which mpirun; echo $PATH; echo $LD_LIBRARY_PATH
 +
export ORCA_DIR=/apps/gcc/12.2.0/openmpi/4.1.1/orca/5.0.4
 +
$ORCA_DIR/orca job.inp > job.ou
 +
date
  
date
 
  
Disclaimer: Above slurm configuration is hypothetical. The user must customize it based on the size of the calculation, available resources etc.
+
Disclaimer: The above slurm configurations are hypothetical. The user must customize it based on the size of the calculation, available resources etc.

Latest revision as of 15:11, 21 June 2024

The input file must contain the parallel configuration section. 

 %pal nprocs n 
 end
 where n is the number of processors requested. The number of processors must match with the total number 
 of tasks requested in the slurm configuration.
Orca on a single node, with multiple cores:
===========================================
!/bin/bash
#SBATCH --job-name=parallel_job      # Job name
#SBATCH --mail-type=END,FAIL         # Mail events (NONE, BEGIN, END, FAIL, ALL)
#SBATCH --mail-user=usename@ufl.edu  # Where to send mail
#SBATCH --nodes=1                    # Run all tsks on a single node
#SBATCH --ntasks=2                   # The total number of tasks 
#SBATCH --cpus-per-task=1            # 
#SBATCH --mem-per-cpu=500mb          # Memory per processor
#SBATCH --time=00:05:00              # Time limit hrs:min:sec
#SBATCH --output=parallel_%j.log     # Standard output and error log
pwd; hostname; date
echo "Running orca test calculation on a with four CPU cores"
echo "Date              = $(date)"
echo "Hostname          = $(hostname -s)"
echo "Working Directory = $(pwd)"
echo ""
echo "Number of Nodes Allocated      = $SLURM_JOB_NUM_NODES"
echo "Number of Tasks Allocated      = $SLURM_NTASKS"
echo "Number of Cores/Task Allocated = $SLURM_CPUS_PER_TASK"
echo ""
module load gcc/12.2.0 openmpi/4.1.1 orca/5.0.4
which mpirun; echo $PATH; echo $LD_LIBRARY_PATH
export ORCA_DIR=/apps/gcc/12.2.0/openmpi/4.1.1/orca/5.0.4
$ORCA_DIR/orca job.inp  > job.ou
date
Orca on multiple nodes, with multiple cores:
===========================================
!/bin/bash
#SBATCH --job-name=parallel_job      # Job name
#SBATCH --mail-type=END,FAIL         # Mail events (NONE, BEGIN, END, FAIL, ALL)
#SBATCH --mail-user=usename@ufl.edu  # Where to send mail
#SBATCH --nodes=2                    # The number of nodes              
#SBATCH --ntasks-per-node=8          # Maximum number of tasks on each node
#SBATCH --cpus-per-task=1            # number of CPUs per task. 
#SBATCH --mem-per-cpu=500mb          # Memory per processor
#SBATCH --time=00:05:00              # Time limit hrs:min:sec
#SBATCH --output=parallel_%j.log     # Standard output and error log
pwd; hostname; date
echo "Running orca test calculation on a with four CPU cores"
echo "Date              = $(date)"
echo "Hostname          = $(hostname -s)"
echo "Working Directory = $(pwd)"
echo ""
echo "Number of Nodes Allocated      = $SLURM_JOB_NUM_NODES"
echo "Number of Tasks Allocated      = $SLURM_NTASKS"
echo "Number of Cores/Task Allocated = $SLURM_CPUS_PER_TASK"
echo ""
module load gcc/12.2.0 openmpi/4.1.1 orca/5.0.4
which mpirun; echo $PATH; echo $LD_LIBRARY_PATH
export ORCA_DIR=/apps/gcc/12.2.0/openmpi/4.1.1/orca/5.0.4
$ORCA_DIR/orca job.inp  > job.ou
date


Disclaimer: The above slurm configurations are hypothetical. The user must customize it based on the size of the calculation, available resources etc.