<?xml version="1.0"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
	<id>http://wiki.define-technology.com/mediawiki-1.35.0/index.php?action=history&amp;feed=atom&amp;title=Slurm_scripts_for_MSU_university</id>
	<title>Slurm scripts for MSU university - Revision history</title>
	<link rel="self" type="application/atom+xml" href="http://wiki.define-technology.com/mediawiki-1.35.0/index.php?action=history&amp;feed=atom&amp;title=Slurm_scripts_for_MSU_university"/>
	<link rel="alternate" type="text/html" href="http://wiki.define-technology.com/mediawiki-1.35.0/index.php?title=Slurm_scripts_for_MSU_university&amp;action=history"/>
	<updated>2026-05-04T20:15:48Z</updated>
	<subtitle>Revision history for this page on the wiki</subtitle>
	<generator>MediaWiki 1.35.0</generator>
	<entry>
		<id>http://wiki.define-technology.com/mediawiki-1.35.0/index.php?title=Slurm_scripts_for_MSU_university&amp;diff=5754&amp;oldid=prev</id>
		<title>Chenhui: Created page with &quot;=== MPI Job === &lt;syntaxhighlight&gt; #!/bin/bash #---------------------------------------------------- # Example SLURM job script to run MPI applications on  # MSU&#039;s system. # # $Id: job.mp...&quot;</title>
		<link rel="alternate" type="text/html" href="http://wiki.define-technology.com/mediawiki-1.35.0/index.php?title=Slurm_scripts_for_MSU_university&amp;diff=5754&amp;oldid=prev"/>
		<updated>2015-01-14T11:44:26Z</updated>

		<summary type="html">&lt;p&gt;Created page with &amp;quot;=== MPI Job === &amp;lt;syntaxhighlight&amp;gt; #!/bin/bash #---------------------------------------------------- # Example SLURM job script to run MPI applications on  # MSU&amp;#039;s system. # # $Id: job.mp...&amp;quot;&lt;/p&gt;
&lt;p&gt;&lt;b&gt;New page&lt;/b&gt;&lt;/p&gt;&lt;div&gt;=== MPI Job ===&lt;br /&gt;
&amp;lt;syntaxhighlight&amp;gt;&lt;br /&gt;
#!/bin/bash&lt;br /&gt;
#----------------------------------------------------&lt;br /&gt;
# Example SLURM job script to run MPI applications on &lt;br /&gt;
# MSU&amp;#039;s system.&lt;br /&gt;
#&lt;br /&gt;
# $Id: job.mpi 1580 2013-01-08 04:10:50Z karl $&lt;br /&gt;
#----------------------------------------------------&lt;br /&gt;
&lt;br /&gt;
#SBATCH -J mpiJob              # Job name&lt;br /&gt;
#SBATCH -o mpiJob.%j.out       # Name of stdout output file (%j expands to jobId)&lt;br /&gt;
#SBATCH -p defq        # Queue name&lt;br /&gt;
#SBATCH -N 2                  # Total number of nodes requested (16 cores/node)&lt;br /&gt;
#SBATCH -n 32                 # Total number of mpi tasks requested&lt;br /&gt;
#SBATCH -t 01:30:00           # Run time (hh:mm:ss) - 1.5 hours&lt;br /&gt;
&lt;br /&gt;
# Launch the MPI executable named &amp;quot;a.out&amp;quot;&lt;br /&gt;
&lt;br /&gt;
mpirun ./whereamic&lt;br /&gt;
&amp;lt;/syntaxhighlight&amp;gt;&lt;br /&gt;
&lt;br /&gt;
=== Multiple MPI jobs ===&lt;br /&gt;
&amp;lt;syntaxhighlight&amp;gt;&lt;br /&gt;
#!/bin/bash&lt;br /&gt;
#----------------------------------------------------&lt;br /&gt;
# Example SLURM job script to run multiple mpi &lt;br /&gt;
# applications within one batch job on MSU&amp;#039;s &lt;br /&gt;
# system.&lt;br /&gt;
#----------------------------------------------------&lt;br /&gt;
#SBATCH -J multiple_mpi_job     # Job name&lt;br /&gt;
#SBATCH -o multiple_mpi_job.o%j # Name of stdout output file(%j expands to jobId) &lt;br /&gt;
#SBATCH -e multiple_mpi_job.o%j # Name of stderr output file(%j expands to jobId)&lt;br /&gt;
#SBATCH -p defq          # Queue name&lt;br /&gt;
#SBATCH -N 4                    # Total number of nodes requested (16 cores/node)&lt;br /&gt;
#SBATCH -n 64                   # Total number of mpi tasks requested&lt;br /&gt;
#SBATCH -t 01:30:00             # Run time (hh:mm:ss) - 1.5 hours&lt;br /&gt;
# The next line is required if the user has more than one project&lt;br /&gt;
# #SBATCH -A A-yourproject  # &amp;lt;-- Allocation name to charge job against&lt;br /&gt;
&lt;br /&gt;
# This example will run 3 MPI applications using 32 tasks, &lt;br /&gt;
# 16 tasks, and 16 tasks&lt;br /&gt;
&lt;br /&gt;
#DO NOT use tacc_affinity with multiple MPI applications &lt;br /&gt;
# within the same batch script!&lt;br /&gt;
# If running in a hybrid mode, please contact the help desk &lt;br /&gt;
# for support.&lt;br /&gt;
&lt;br /&gt;
# Launch each MPI application using the &amp;quot;-o&amp;quot; and &amp;quot;-n&amp;quot; flags &lt;br /&gt;
# in the background&lt;br /&gt;
#Application 1&lt;br /&gt;
srun -o app1 -n 32 ./whereamic &amp;amp;&lt;br /&gt;
&lt;br /&gt;
#Application 2&lt;br /&gt;
srun -o app2 -n 16 ./hello_mpic &amp;amp;&lt;br /&gt;
&lt;br /&gt;
#Application 3&lt;br /&gt;
srun -o app3 -n 16 ./hello_mpic &amp;amp;&lt;br /&gt;
&lt;br /&gt;
#Wait for all the MPI applications to finish&lt;br /&gt;
wait&lt;br /&gt;
&lt;br /&gt;
&amp;lt;/syntaxhighlight&amp;gt;&lt;br /&gt;
&lt;br /&gt;
=== OpenMP Job ===&lt;br /&gt;
&amp;lt;syntaxhighlight&amp;gt;&lt;br /&gt;
#!/bin/bash&lt;br /&gt;
#----------------------------------------------------&lt;br /&gt;
# Example SLURM job script to run openmp applications &lt;br /&gt;
# on MSU&amp;#039;s system.&lt;br /&gt;
#----------------------------------------------------&lt;br /&gt;
#SBATCH -J openmp_job     # Job name&lt;br /&gt;
#SBATCH -o openmp_job.o%j # Name of stdout output file(%j expands to jobId) &lt;br /&gt;
#SBATCH -e openmp_job.o%j # Name of stderr output file(%j expands to jobId)&lt;br /&gt;
#SBATCH -p defq         # Serial queue for serial and OpenMP jobs&lt;br /&gt;
#SBATCH -N 1              # Total number of nodes requested (16 cores/node)&lt;br /&gt;
#SBATCH -n 8              # Total number of mpi tasks requested&lt;br /&gt;
#SBATCH -t 01:30:00       # Run time (hh:mm:ss) - 1.5 hours&lt;br /&gt;
# The next line is required if the user has more than one project&lt;br /&gt;
# #SBATCH -A A-yourproject  # &amp;lt;-- Allocation name to charge job against&lt;br /&gt;
&lt;br /&gt;
# This example will run an OpenMP application using 16 threads&lt;br /&gt;
&lt;br /&gt;
# Set the number of threads per task(Default=1)&lt;br /&gt;
export OMP_NUM_THREADS=8&lt;br /&gt;
&lt;br /&gt;
# Run the OpenMP application&lt;br /&gt;
./omp_helloc&lt;br /&gt;
&lt;br /&gt;
&amp;lt;/syntaxhighlight&amp;gt;&lt;br /&gt;
=== Hybrid Job ===&lt;br /&gt;
&amp;lt;syntaxhighlight&amp;gt;&lt;br /&gt;
#!/bin/bash&lt;br /&gt;
#----------------------------------------------------&lt;br /&gt;
# Example SLURM job script to run hybrid applications &lt;br /&gt;
# (MPI/OpenMP or MPI/pthreads) on MSU&amp;#039;s system.&lt;br /&gt;
#----------------------------------------------------&lt;br /&gt;
#SBATCH -J hybrid_job     # Job name&lt;br /&gt;
#SBATCH -o hybrid_job.o%j # Name of stdout output file(%j expands to jobId) &lt;br /&gt;
#SBATCH -e hybrid_job.o%j # Name of stderr output file(%j expands to jobId)&lt;br /&gt;
#SBATCH -p defq    # Queue name&lt;br /&gt;
#SBATCH -N 2              # Total number of nodes requested (16 cores/node)&lt;br /&gt;
#SBATCH -n 4              # Total number of mpi tasks requested&lt;br /&gt;
#SBATCH -t 01:30:00       # Run time (hh:mm:ss) - 1.5 hours&lt;br /&gt;
# The next line is required if the user has more than one project&lt;br /&gt;
# #SBATCH -A A-yourproject  # &amp;lt;-- Allocation name to charge job against&lt;br /&gt;
&lt;br /&gt;
# This example will run 4 MPI tasks on 2 nodes with each task &lt;br /&gt;
# using 8 threads&lt;br /&gt;
&lt;br /&gt;
# Set the number of threads per task(Default=1)&lt;br /&gt;
export OMP_NUM_THREADS=8&lt;br /&gt;
&lt;br /&gt;
# Launch the MPI application using tacc_affinity to ensure proper &lt;br /&gt;
# thread placement&lt;br /&gt;
srun ./hybrid_hello&lt;br /&gt;
&lt;br /&gt;
&amp;lt;/syntaxhighlight&amp;gt;&lt;/div&gt;</summary>
		<author><name>Chenhui</name></author>
	</entry>
</feed>