Difference between revisions of "R MPI Example"
Jump to navigation
Jump to search
Moskalenko (talk | contribs) |
|||
(15 intermediate revisions by 4 users not shown) | |||
Line 1: | Line 1: | ||
− | [[Category:Software]] | + | [[Category:Software]][[Category:Compiler]] |
[[R|back to the main R page]] | [[R|back to the main R page]] | ||
− | Example, of using the parallel module to run MPI jobs under Rmpi. | + | Example, of using the parallel module to run MPI jobs under SLURM with Rmpi library. |
− | + | <pre> | |
− | |||
− | < | ||
# Load the R MPI package if it is not already loaded. | # Load the R MPI package if it is not already loaded. | ||
if (!is.loaded("mpi_initialize")) { | if (!is.loaded("mpi_initialize")) { | ||
library("Rmpi") | library("Rmpi") | ||
} | } | ||
− | + | ||
− | + | ns <- mpi.universe.size() - 1 | |
− | mpi.spawn.Rslaves() | + | mpi.spawn.Rslaves(nslaves=ns) |
− | + | # | |
# In case R exits unexpectedly, have it automatically clean up | # In case R exits unexpectedly, have it automatically clean up | ||
# resources taken up by Rmpi (slaves, memory, etc...) | # resources taken up by Rmpi (slaves, memory, etc...) | ||
.Last <- function(){ | .Last <- function(){ | ||
− | + | if (is.loaded("mpi_initialize")){ | |
− | + | if (mpi.comm.size(1) > 0){ | |
− | + | print("Please use mpi.close.Rslaves() to close slaves.") | |
− | + | mpi.close.Rslaves() | |
− | + | } | |
− | + | print("Please use mpi.quit() to quit R") | |
− | + | .Call("mpi_finalize") | |
− | + | } | |
} | } | ||
− | |||
# Tell all slaves to return a message identifying themselves | # Tell all slaves to return a message identifying themselves | ||
+ | mpi.bcast.cmd( id <- mpi.comm.rank() ) | ||
+ | mpi.bcast.cmd( ns <- mpi.comm.size() ) | ||
+ | mpi.bcast.cmd( host <- mpi.get.processor.name() ) | ||
mpi.remote.exec(paste("I am",mpi.comm.rank(),"of",mpi.comm.size())) | mpi.remote.exec(paste("I am",mpi.comm.rank(),"of",mpi.comm.size())) | ||
+ | |||
+ | # Test computations | ||
+ | x <- 5 | ||
+ | x <- mpi.remote.exec(rnorm, x) | ||
+ | length(x) | ||
+ | x | ||
# Tell all slaves to close down, and exit the program | # Tell all slaves to close down, and exit the program | ||
− | mpi.close.Rslaves() | + | mpi.close.Rslaves(dellog = FALSE) |
mpi.quit() | mpi.quit() | ||
− | </ | + | </pre> |
Example job script using rmpi_test.R script. | Example job script using rmpi_test.R script. | ||
− | + | <pre> | |
− | |||
− | |||
− | < | ||
#!/bin/sh | #!/bin/sh | ||
#SBATCH --job-name=mpi_job_test # Job name | #SBATCH --job-name=mpi_job_test # Job name | ||
− | #SBATCH --mail-type= | + | #SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) |
#SBATCH --mail-user=ENTER_YOUR_EMAIL_HERE # Where to send mail | #SBATCH --mail-user=ENTER_YOUR_EMAIL_HERE # Where to send mail | ||
#SBATCH --cpus-per-task=1 # Number of cores per MPI rank | #SBATCH --cpus-per-task=1 # Number of cores per MPI rank | ||
#SBATCH --nodes=2 #Number of nodes | #SBATCH --nodes=2 #Number of nodes | ||
− | #SBATCH --ntasks= | + | #SBATCH --ntasks=8 # Number of MPI ranks |
− | #SBATCH --ntasks-per-node= | + | #SBATCH --ntasks-per-node=4 #How many tasks on each node |
− | #SBATCH --ntasks-per-socket= | + | #SBATCH --ntasks-per-socket=2 #How many tasks on each CPU or socket |
#SBATCH --distribution=cyclic:cyclic #Distribute tasks cyclically on nodes and sockets | #SBATCH --distribution=cyclic:cyclic #Distribute tasks cyclically on nodes and sockets | ||
#SBATCH --mem-per-cpu=1gb # Memory per processor | #SBATCH --mem-per-cpu=1gb # Memory per processor | ||
Line 60: | Line 63: | ||
echo "Running example Rmpi script. Using $SLURM_JOB_NUM_NODES nodes with $SLURM_NTASKS | echo "Running example Rmpi script. Using $SLURM_JOB_NUM_NODES nodes with $SLURM_NTASKS | ||
tasks, each with $SLURM_CPUS_PER_TASK cores." | tasks, each with $SLURM_CPUS_PER_TASK cores." | ||
+ | module purge; module load gcc openmpi rmpi | ||
+ | |||
+ | srun --mpi=${HPC_PMIX} Rscript /data/training/SLURM/rmpi_test.R | ||
− | + | date | |
+ | </pre> | ||
− | + | For rmpi/4.0 module the following command will work | |
− | |||
− | |||
− | mpiexec - | + | mpiexec -n ${SLURM_NTASKS} Rscript rmpi_test.R |
− | + | ||
− | </ | + | Link the /apps/rmpi/conf/Rprofile as .Rprofile in the current directory configuration file that must be placed in the working directory if the rmpi module doesn't add a symlink automatically. |
+ | |||
+ | <pre> | ||
+ | ln -s /apps/rmpi/conf/Rprofile .Rprofile | ||
+ | </pre> |
Latest revision as of 16:08, 14 June 2024
Example, of using the parallel module to run MPI jobs under SLURM with Rmpi library.
# Load the R MPI package if it is not already loaded. if (!is.loaded("mpi_initialize")) { library("Rmpi") } ns <- mpi.universe.size() - 1 mpi.spawn.Rslaves(nslaves=ns) # # In case R exits unexpectedly, have it automatically clean up # resources taken up by Rmpi (slaves, memory, etc...) .Last <- function(){ if (is.loaded("mpi_initialize")){ if (mpi.comm.size(1) > 0){ print("Please use mpi.close.Rslaves() to close slaves.") mpi.close.Rslaves() } print("Please use mpi.quit() to quit R") .Call("mpi_finalize") } } # Tell all slaves to return a message identifying themselves mpi.bcast.cmd( id <- mpi.comm.rank() ) mpi.bcast.cmd( ns <- mpi.comm.size() ) mpi.bcast.cmd( host <- mpi.get.processor.name() ) mpi.remote.exec(paste("I am",mpi.comm.rank(),"of",mpi.comm.size())) # Test computations x <- 5 x <- mpi.remote.exec(rnorm, x) length(x) x # Tell all slaves to close down, and exit the program mpi.close.Rslaves(dellog = FALSE) mpi.quit()
Example job script using rmpi_test.R script.
#!/bin/sh #SBATCH --job-name=mpi_job_test # Job name #SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL) #SBATCH --mail-user=ENTER_YOUR_EMAIL_HERE # Where to send mail #SBATCH --cpus-per-task=1 # Number of cores per MPI rank #SBATCH --nodes=2 #Number of nodes #SBATCH --ntasks=8 # Number of MPI ranks #SBATCH --ntasks-per-node=4 #How many tasks on each node #SBATCH --ntasks-per-socket=2 #How many tasks on each CPU or socket #SBATCH --distribution=cyclic:cyclic #Distribute tasks cyclically on nodes and sockets #SBATCH --mem-per-cpu=1gb # Memory per processor #SBATCH --time=00:05:00 # Time limit hrs:min:sec #SBATCH --output=mpi_test_%j.out # Standard output and error log pwd; hostname; date echo "Running example Rmpi script. Using $SLURM_JOB_NUM_NODES nodes with $SLURM_NTASKS tasks, each with $SLURM_CPUS_PER_TASK cores." module purge; module load gcc openmpi rmpi srun --mpi=${HPC_PMIX} Rscript /data/training/SLURM/rmpi_test.R date
For rmpi/4.0 module the following command will work
mpiexec -n ${SLURM_NTASKS} Rscript rmpi_test.R
Link the /apps/rmpi/conf/Rprofile as .Rprofile in the current directory configuration file that must be placed in the working directory if the rmpi module doesn't add a symlink automatically.
ln -s /apps/rmpi/conf/Rprofile .Rprofile