R MPI Example: Difference between revisions

From UFRC
Jump to navigation Jump to search
Maxprok (talk | contribs)
No edit summary
No edit summary
 
(18 intermediate revisions by 5 users not shown)
Line 1: Line 1:
[[Category:Software]]
[[Category:Software]][[Category:Compiler]]
[[R|back to the main R page]]
[[R|back to the main R page]]


Example, of using the parallel module to run MPI jobs under Rmpi.
Example, of using the parallel module to run MPI jobs under SLURM with Rmpi library.


{{#fileAnchor: rmpi_test.R}}
<pre>
Download raw source of the [{{#fileLink: rmpi_test.R}} rmpi_test.R] file.
<source lang=bash>
# Load the R MPI package if it is not already loaded.
# Load the R MPI package if it is not already loaded.
if (!is.loaded("mpi_initialize")) {
if (!is.loaded("mpi_initialize")) {
     library("Rmpi")
     library("Rmpi")
     }
     }
                                                                               
 
# Spawn as many slaves as possible
ns <- mpi.universe.size() - 1
mpi.spawn.Rslaves()
mpi.spawn.Rslaves(nslaves=ns)
                                                                               
#
# In case R exits unexpectedly, have it automatically clean up
# In case R exits unexpectedly, have it automatically clean up
# resources taken up by Rmpi (slaves, memory, etc...)
# resources taken up by Rmpi (slaves, memory, etc...)
.Last <- function(){
.Last <- function(){
    if (is.loaded("mpi_initialize")){
      if (is.loaded("mpi_initialize")){
        if (mpi.comm.size(1) > 0){
          if (mpi.comm.size(1) > 0){
            print("Please use mpi.close.Rslaves() to close slaves.")
              print("Please use mpi.close.Rslaves() to close slaves.")
            mpi.close.Rslaves()
              mpi.close.Rslaves()
        }
          }
        print("Please use mpi.quit() to quit R")
          print("Please use mpi.quit() to quit R")
        .Call("mpi_finalize")
          .Call("mpi_finalize")
    }
      }
}
}
# Tell all slaves to return a message identifying themselves
# Tell all slaves to return a message identifying themselves
mpi.bcast.cmd( id <- mpi.comm.rank() )
mpi.bcast.cmd( ns <- mpi.comm.size() )
mpi.bcast.cmd( host <- mpi.get.processor.name() )
mpi.remote.exec(paste("I am",mpi.comm.rank(),"of",mpi.comm.size()))
mpi.remote.exec(paste("I am",mpi.comm.rank(),"of",mpi.comm.size()))
# Test computations
x <- 5
x <- mpi.remote.exec(rnorm, x)
length(x)
x


# Tell all slaves to close down, and exit the program
# Tell all slaves to close down, and exit the program
mpi.close.Rslaves()
mpi.close.Rslaves(dellog = FALSE)
mpi.quit()
mpi.quit()
</source>
</pre>
 


Example job script using rmpi_test.R script.
Example job script using rmpi_test.R script.


<source lang=bash>
<pre>
#!/bin/sh
#!/bin/sh
#SBATCH --job-name=mpi_job_test # Job name
#SBATCH --job-name=mpi_job_test # Job name
#SBATCH --mail-type=ALL # Mail events (NONE, BEGIN, END, FAIL, ALL)
#SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL)
#SBATCH --mail-user=ENTER_YOUR_EMAIL_HERE # Where to send mail
#SBATCH --mail-user=ENTER_YOUR_EMAIL_HERE # Where to send mail
#SBATCH --ntasks=24 # Number of MPI ranks
#SBATCH --cpus-per-task=1 # Number of cores per MPI rank  
#SBATCH --cpus-per-task=1 # Number of cores per MPI rank  
#SBATCH --nodes=2 #Number of nodes
#SBATCH --nodes=2 #Number of nodes
#SBATCH --ntasks-per-node=12 #How many tasks on each node
#SBATCH --ntasks=8 # Number of MPI ranks
#SBATCH --ntasks-per-socket=6 #How many tasks on each CPU or socket
#SBATCH --ntasks-per-node=4 #How many tasks on each node
#SBATCH --ntasks-per-socket=2 #How many tasks on each CPU or socket
#SBATCH --distribution=cyclic:cyclic #Distribute tasks cyclically on nodes and sockets
#SBATCH --distribution=cyclic:cyclic #Distribute tasks cyclically on nodes and sockets
#SBATCH --mem-per-cpu=1gb # Memory per processor
#SBATCH --mem-per-cpu=1gb # Memory per processor
Line 56: Line 63:
echo "Running example Rmpi script. Using $SLURM_JOB_NUM_NODES nodes with $SLURM_NTASKS  
echo "Running example Rmpi script. Using $SLURM_JOB_NUM_NODES nodes with $SLURM_NTASKS  
tasks, each with $SLURM_CPUS_PER_TASK cores."
tasks, each with $SLURM_CPUS_PER_TASK cores."
module purge; module load gcc openmpi rmpi


module load intel/2016.0.109 openmpi/1.10.2 Rmpi/3.3.1
srun --mpi=${HPC_PMIX} Rscript /data/training/SLURM/rmpi_test.R


mpiexec Rscript /ufrc/data/training/SLURM/prime/rmpi_test.R
date
</pre>


date
For rmpi/4.0 module the following command will work
</source>
 
mpiexec -n ${SLURM_NTASKS} Rscript rmpi_test.R
 
 
Link the /apps/rmpi/conf/Rprofile as .Rprofile in the current directory configuration file that must be placed in the working directory if the rmpi module doesn't add a symlink automatically.
 
<pre>
ln -s /apps/rmpi/conf/Rprofile .Rprofile
</pre>

Latest revision as of 16:08, 14 June 2024

back to the main R page

Example, of using the parallel module to run MPI jobs under SLURM with Rmpi library.

# Load the R MPI package if it is not already loaded.
if (!is.loaded("mpi_initialize")) {
    library("Rmpi")
    }

ns <- mpi.universe.size() - 1
mpi.spawn.Rslaves(nslaves=ns)
#
# In case R exits unexpectedly, have it automatically clean up
# resources taken up by Rmpi (slaves, memory, etc...)
.Last <- function(){
       if (is.loaded("mpi_initialize")){
           if (mpi.comm.size(1) > 0){
               print("Please use mpi.close.Rslaves() to close slaves.")
               mpi.close.Rslaves()
           }
           print("Please use mpi.quit() to quit R")
           .Call("mpi_finalize")
       }
}
# Tell all slaves to return a message identifying themselves
mpi.bcast.cmd( id <- mpi.comm.rank() )
mpi.bcast.cmd( ns <- mpi.comm.size() )
mpi.bcast.cmd( host <- mpi.get.processor.name() )
mpi.remote.exec(paste("I am",mpi.comm.rank(),"of",mpi.comm.size()))

# Test computations
x <- 5
x <- mpi.remote.exec(rnorm, x)
length(x)
x

# Tell all slaves to close down, and exit the program
mpi.close.Rslaves(dellog = FALSE)
mpi.quit()


Example job script using rmpi_test.R script.

#!/bin/sh
#SBATCH --job-name=mpi_job_test # Job name
#SBATCH --mail-type=END,FAIL # Mail events (NONE, BEGIN, END, FAIL, ALL)
#SBATCH --mail-user=ENTER_YOUR_EMAIL_HERE # Where to send mail	
#SBATCH --cpus-per-task=1 # Number of cores per MPI rank 
#SBATCH --nodes=2 #Number of nodes
#SBATCH --ntasks=8 # Number of MPI ranks
#SBATCH --ntasks-per-node=4 #How many tasks on each node
#SBATCH --ntasks-per-socket=2 #How many tasks on each CPU or socket
#SBATCH --distribution=cyclic:cyclic #Distribute tasks cyclically on nodes and sockets
#SBATCH --mem-per-cpu=1gb # Memory per processor
#SBATCH --time=00:05:00 # Time limit hrs:min:sec
#SBATCH --output=mpi_test_%j.out # Standard output and error log
pwd; hostname; date

echo "Running example Rmpi script. Using $SLURM_JOB_NUM_NODES nodes with $SLURM_NTASKS 
tasks, each with $SLURM_CPUS_PER_TASK cores."
module purge; module load gcc openmpi rmpi

srun --mpi=${HPC_PMIX} Rscript /data/training/SLURM/rmpi_test.R

date

For rmpi/4.0 module the following command will work

mpiexec -n ${SLURM_NTASKS} Rscript rmpi_test.R


Link the /apps/rmpi/conf/Rprofile as .Rprofile in the current directory configuration file that must be placed in the working directory if the rmpi module doesn't add a symlink automatically.

ln -s /apps/rmpi/conf/Rprofile .Rprofile