Slurm scripts for MSU university

From Define Wiki
Revision as of 11:44, 14 January 2015 by Chenhui (talk | contribs) (Created page with "=== MPI Job === <syntaxhighlight> #!/bin/bash #---------------------------------------------------- # Example SLURM job script to run MPI applications on # MSU's system. # # $Id: job.mp...")
(diff) ← Older revision | Latest revision (diff) | Newer revision → (diff)
Jump to navigation Jump to search

MPI Job

#!/bin/bash
#----------------------------------------------------
# Example SLURM job script to run MPI applications on 
# MSU's system.
#
# $Id: job.mpi 1580 2013-01-08 04:10:50Z karl $
#----------------------------------------------------

#SBATCH -J mpiJob              # Job name
#SBATCH -o mpiJob.%j.out       # Name of stdout output file (%j expands to jobId)
#SBATCH -p defq        # Queue name
#SBATCH -N 2                  # Total number of nodes requested (16 cores/node)
#SBATCH -n 32                 # Total number of mpi tasks requested
#SBATCH -t 01:30:00           # Run time (hh:mm:ss) - 1.5 hours

# Launch the MPI executable named "a.out"

mpirun ./whereamic

Multiple MPI jobs

#!/bin/bash
#----------------------------------------------------
# Example SLURM job script to run multiple mpi 
# applications within one batch job on MSU's 
# system.
#----------------------------------------------------
#SBATCH -J multiple_mpi_job     # Job name
#SBATCH -o multiple_mpi_job.o%j # Name of stdout output file(%j expands to jobId) 
#SBATCH -e multiple_mpi_job.o%j # Name of stderr output file(%j expands to jobId)
#SBATCH -p defq          # Queue name
#SBATCH -N 4                    # Total number of nodes requested (16 cores/node)
#SBATCH -n 64                   # Total number of mpi tasks requested
#SBATCH -t 01:30:00             # Run time (hh:mm:ss) - 1.5 hours
# The next line is required if the user has more than one project
# #SBATCH -A A-yourproject  # <-- Allocation name to charge job against

# This example will run 3 MPI applications using 32 tasks, 
# 16 tasks, and 16 tasks

#DO NOT use tacc_affinity with multiple MPI applications 
# within the same batch script!
# If running in a hybrid mode, please contact the help desk 
# for support.

# Launch each MPI application using the "-o" and "-n" flags 
# in the background
#Application 1
srun -o app1 -n 32 ./whereamic &

#Application 2
srun -o app2 -n 16 ./hello_mpic &

#Application 3
srun -o app3 -n 16 ./hello_mpic &

#Wait for all the MPI applications to finish
wait

OpenMP Job

#!/bin/bash
#----------------------------------------------------
# Example SLURM job script to run openmp applications 
# on MSU's system.
#----------------------------------------------------
#SBATCH -J openmp_job     # Job name
#SBATCH -o openmp_job.o%j # Name of stdout output file(%j expands to jobId) 
#SBATCH -e openmp_job.o%j # Name of stderr output file(%j expands to jobId)
#SBATCH -p defq         # Serial queue for serial and OpenMP jobs
#SBATCH -N 1              # Total number of nodes requested (16 cores/node)
#SBATCH -n 8              # Total number of mpi tasks requested
#SBATCH -t 01:30:00       # Run time (hh:mm:ss) - 1.5 hours
# The next line is required if the user has more than one project
# #SBATCH -A A-yourproject  # <-- Allocation name to charge job against

# This example will run an OpenMP application using 16 threads

# Set the number of threads per task(Default=1)
export OMP_NUM_THREADS=8

# Run the OpenMP application
./omp_helloc

Hybrid Job

#!/bin/bash
#----------------------------------------------------
# Example SLURM job script to run hybrid applications 
# (MPI/OpenMP or MPI/pthreads) on MSU's system.
#----------------------------------------------------
#SBATCH -J hybrid_job     # Job name
#SBATCH -o hybrid_job.o%j # Name of stdout output file(%j expands to jobId) 
#SBATCH -e hybrid_job.o%j # Name of stderr output file(%j expands to jobId)
#SBATCH -p defq    # Queue name
#SBATCH -N 2              # Total number of nodes requested (16 cores/node)
#SBATCH -n 4              # Total number of mpi tasks requested
#SBATCH -t 01:30:00       # Run time (hh:mm:ss) - 1.5 hours
# The next line is required if the user has more than one project
# #SBATCH -A A-yourproject  # <-- Allocation name to charge job against

# This example will run 4 MPI tasks on 2 nodes with each task 
# using 8 threads

# Set the number of threads per task(Default=1)
export OMP_NUM_THREADS=8

# Launch the MPI application using tacc_affinity to ensure proper 
# thread placement
srun ./hybrid_hello