Difference between revisions of "Gkeyll Job Scripts"

From UFRC
Jump to navigation Jump to search
Line 1: Line 1:
#!/bin/bash
+
#!/bin/bash
#SBATCH --time                # wall time
+
#SBATCH --time                # wall time
#SBATCH --ntasks=              # number of mpi processes
+
#SBATCH --ntasks=              # number of mpi processes
\#SBATCH --cpus-per-task=      # Number of OpenMP threads for each MPI process/rank
+
#SBATCH --cpus-per-task=      # Number of OpenMP threads for each MPI process/rank
\#SBATCH --nodes=              # Number of nodes
+
#SBATCH --nodes=              # Number of nodes
\#SBATCH --ntasks-per-node=    # How many tasks on each node
+
#SBATCH --ntasks-per-node=    # How many tasks on each node
\#SBATCH --ntasks-per-socket=  # How many tasks on each CPU or socket
+
#SBATCH --ntasks-per-socket=  # How many tasks on each CPU or socket
\#SBATCH --mem-per-cpu=100mb  # Memory per core
+
#SBATCH --mem-per-cpu=100mb  # Memory per core
  
\# load modules
+
# load modules
module load openmpi/4.1.5
+
  module load openmpi/4.1.5
 +
  module load openblas/0.3.24
 +
  module load superlu_dist/6.0.1
 +
  module load metis/5.2.
  
module load openblas/0.3.24
+
# run the program
 
+
  srun --mpi=pmix_v3 gkyl innput.lua >& output.log
module load superlu_dist/6.0.1
 
 
 
module load metis/5.2.
 
 
 
\# run the program
 
srun --mpi=pmix_v3 gkyl innput.lua >& output.log
 
  
 
*Note: The user must decide the number of tasks and how they are distributed. This depends on
 
*Note: The user must decide the number of tasks and how they are distributed. This depends on

Revision as of 14:30, 3 June 2024

#!/bin/bash
#SBATCH --time                 # wall time
#SBATCH --ntasks=              # number of mpi processes
#SBATCH --cpus-per-task=      # Number of OpenMP threads for each MPI process/rank
#SBATCH --nodes=              # Number of nodes
#SBATCH --ntasks-per-node=    # How many tasks on each node
#SBATCH --ntasks-per-socket=  # How many tasks on each CPU or socket
#SBATCH --mem-per-cpu=100mb   # Memory per core
# load modules
 module load openmpi/4.1.5
 module load openblas/0.3.24
 module load superlu_dist/6.0.1
 module load metis/5.2.
# run the program
  srun --mpi=pmix_v3 gkyl innput.lua >& output.log
  • Note: The user must decide the number of tasks and how they are distributed. This depends on

on the availability of resources and the extent of the parallelization of the software. Also user must specify the the account ($SBATCH --account) and the Quality of Service (QOS) (#SBATCH --qos) must be specified.