[abe] # Machine Database # Machine description nickname = abe name = Abe location = NCSA description = The new large Linux cluster at NCSA webpage = http://www.ncsa.uiuc.edu/UserInfo/Resources/Hardware/Intel64Cluster/ status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = honest3.ncsa.uiuc.edu # iomachine # trampoline rsynccmd = /u/ac/eschnett/rsync-3.0.5/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = export LM_LICENSE_FILE=1702@barbossa:1702@nani:1702@lilo:1702@stitch:1708@barbossa:1704@barbossa.ncsa.uiuc.edu aliaspattern = ^honest[34](\.ncsa\.uiuc\.edu)?$ # Source tree management sourcebasedir = /u/ac/@USER@ optionlist = abe-mvapich2.cfg thornlist = wavetoy-generic.th submitscript = abe-mvapich2.sh make = make -j4 # Simulation management basedir = /u/ac/@USER@/scratch-global/simulations # quota cpu = Intel 64 (Clovertown) cpufreq = 2.33 flop/cycle = 4 ppn = 8 spn = 2 mpn = 1 max-num-threads = 8 num-threads = 8 memory = 8192 nodes = 1200 min-ppn = 8 allocation = loni_cactus05 queue = normal maxwalltime = 48:00:00 # maxqueueslots submit = /usr/local/bin/qsub @SCRIPTFILE@ # run # run2 getstatus = /usr/local/torque/bin/qstat @JOB_ID@ stop = /usr/local/torque/bin/qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /cfs/scratch/users/${USER}/${PBS_JOBID} exechost = /usr/local/torque/bin/qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = cat /u/ac/@USER@/.pbs_spool/@JOB_ID@.abem5.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && time ${MPICHDIR}/bin/mpirun -machinefile ${MPI_NODEFILE} -np @NUM_PROCS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat /u/ac/@USER@/.pbs_spool/@JOB_ID@.abem5.ER precmd = : postcmd = : stdout-follow = tail -n 100 -f /u/ac/@USER@/.pbs_spool/@JOB_ID@.abem5.OU /u/ac/@USER@/.pbs_spool/@JOB_ID@.abem5.ER [ac] # Machine description nickname = ac name = AC location = NCSA description = NCSA's Accelerator Cluster webpage = http://www.ncsa.uiuc.edu/Projects/GPUcluster/ status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = ac.ncsa.uiuc.edu # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^ac(\.ncsa\.uiuc\.edu)?$ # Source tree management sourcebasedir = /home/ac/@USER@ optionlist = ac-mvapich2.cfg thornlist = wavetoy-generic.th submitscript = ac-mvapich2.sh make = make -j2 # Simulation management basedir = /home/ac/@USER@/simulations # quota cpu = Dual-Core AMD Opteron(tm) Processor 2216 cpufreq = 2.4 flop/cycle = 4 ppn = 4 spn = 1 mpn = 1 max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 32 min-ppn = 4 # allocation queue = batch # maxwalltime # maxqueueslots submit = /usr/local/bin/qsub @SCRIPTFILE@ # run # run2 getstatus = /usr/local/bin/qstat @JOB_ID@ stop = /usr/local/bin/qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /home/ac/@USER@/scratch exechost = /usr/local/bin/qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = cat /var/torque/spool/@JOB_ID@.acm.OU mpirun = stderr = cat /var/torque/spool/@JOB_ID@.acm.ER precmd = : postcmd = : stdout-follow = tail -n 100 -f /var/torque/spool/@JOB_ID@.acm.OU /var/torque/spool/@JOB_ID@.acm.ER [athena] # Machine description nickname = athena name = Athena location = NICS description = A Cray XT4 at NICS webpage = http://www.nics.tennessee.edu/computing-resources/athena status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = athena.nics.utk.edu # iomachine # trampoline rsynccmd = /nics/b/home/eschnett/rsync-3.0.3/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^athena-pwd[12](\.nics\.utk\.edu)?$ # Source tree management sourcebasedir = /nics/a/proj/cactus/@USER@/xt4 optionlist = athena.cfg thornlist = wavetoy-generic.th submitscript = athena.sh make = make -j2 # Simulation management basedir = /lustre/scratch/@USER@/simulations # quota cpu = "quad-core AMD Opteron " cpufreq = 2.3 # flop/cycle ppn = 4 spn = 1 mpn = 1 max-num-threads = 4 num-threads = 4 memory = 4096 nodes = 4510 min-ppn = 4 # allocation queue = batch maxwalltime = 48:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+)[.]nid[0-9]* statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /lustre/scratch/@USER@/scratch/@JOB_ID@ exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = cat /var/spool/torque/mom_priv/jobs/@JOB_ID@.nid0.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && #aprun -n @NUM_PROCS@ -d @NUM_THREADS@ -ss gdb -batch -x gdb.cmds --args ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat /var/spool/torque/mom_priv/jobs/@JOB_ID@.nid0.ER precmd = : postcmd = : stdout-follow = tail -n 100 -f /var/spool/torque/mom_priv/jobs/@JOB_ID@.nid0.OU /var/spool/torque/mom_priv/jobs/@JOB_ID@.nid0.ER [bassi] # Machine description nickname = bassi name = Bassi location = NERSC description = A P5 at NERSC webpage = http://www.nersc.gov/nusers/systems/bassi/ status = outdated # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = bassi.nersc.gov # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^b0[23]01(\.nersc\.gov)?$ # Source tree management sourcebasedir = /project/projectdirs/m152/@USER@/bassi optionlist = bassi.cfg thornlist = wavetoy-generic.th submitscript = bassi.sh make = gmake -j8 # Simulation management basedir = /scratch/scratchdirs/@USER@/simulations # quota cpu = Power5+ cpufreq = 1.9 flop/cycle = 4 ppn = 8 # spn # mpn max-num-threads = 8 num-threads = 8 memory = 32768 nodes = 48 min-ppn = 8 allocation = m152 queue = regular maxwalltime = 36:00:00 # maxqueueslots submit = llsubmit @SCRIPTFILE@ # run # run2 getstatus = llq @JOB_ID@ stop = llcancel @JOB_ID@ submitpattern = "\"(.*)\"" statuspattern = ^ *@JOB_ID@ queuedpattern = " I " runningpattern = " R " scratchdir = /scratch/scratchdirs/${USER}/${PBS_JOBID} exechost = false exechostpattern = $^ stdout = false mpirun = /usr/bin/poe ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = false precmd = : postcmd = : stdout-follow = false [beast] # Machine description nickname = beast name = Beast location = LITE description = The large SGI Altix of LITE webpage = http://www.lite3d.com/ status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = beast iomachine = prism trampoline = prism rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^beast(\.LITE|\.louisiana\.edu)?$ # Source tree management sourcebasedir = /store/home/@USER@ optionlist = beast.cfg thornlist = wavetoy-generic.th submitscript = beast.sh make = gmake -j8 # Simulation management basedir = /store/home/@USER@/simulations # quota # cpu # cpufreq # flop/cycle ppn = 160 # spn # mpn max-num-threads = 160 num-threads = 1 memory = 4194304 nodes = 1 min-ppn = 1 # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! # run # run2 getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = /store/home/schnetter/mpich-1.2.7p1/bin/mpirun -np @NUM_PROCS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [belladonna] # Machine description nickname = belladonna name = Belladonna location = AEI description = The old new AEI numrel cluster webpage = http://supercomputers.aei.mpg.de/belladonna status = outdated # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = belladonna.aei.mpg.de # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^belladonna(\.aei\.mpg\.de|\.belladonna\.admin)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = belladonna.cfg thornlist = wavetoy-generic.th submitscript = belladonna.sh make = make -j2 # Simulation management basedir = /data20/@USER@/simulations # quota # cpu # cpufreq # flop/cycle ppn = 4 # spn # mpn max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 52 min-ppn = 4 # allocation queue = default maxwalltime = 168:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /scratch/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/PBS/spool/@JOB_ID@.maste.OU mpirun = time mpiexec -n @PROCS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = ssh @EXECHOST@ cat /var/spool/PBS/spool/@JOB_ID@.maste.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/PBS/spool/@JOB_ID@.maste.OU /var/spool/PBS/spool/@JOB_ID@.maste.ER [bethe] # Machine description nickname = bethe name = Bethe location = Caltech, Christian Ott's office description = Christian Ott's workstation at Caltech # webpage status = personal # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = bethe.tapir.caltech.edu # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^bethe\.tapir\.caltech\.edu$ # Source tree management sourcebasedir = /home/@USER@ optionlist = bethe.cfg thornlist = wavetoy-generic.th submitscript = bethe.sh make = make -j8 # Simulation management basedir = /home/@USER@/simulations quota = 50 cpu = Intel(R) Xeon(R) CPU (Nehalem) cpufreq = 2.67 # flop/cycle ppn = 16 spn = 2 # mpn max-num-threads = 16 num-threads = 16 memory = 16384 nodes = 1 min-ppn = 1 # allocation queue = n/a # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! # run # run2 getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && ${MPICH_DIR}/bin/mpirun -np @NUM_PROCS@ ./start-exe -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [bluedawg] # Machine description nickname = bluedawg name = Bluedawg location = LONI, LA Tech description = The LONI IBM P5 at LA Tech webpage = http://www.loni.org/systems/system.php?system=Bluedawg status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = bluedawg.loni.org # iomachine # trampoline rsynccmd = /work/default/eschnett/rsync-3.0.4/bin/rsync rsyncopts = -c sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^(bluedawg(\.loni\.org)?|l1f1n\d\d(\.sys\.loni\.org)?)$ # Source tree management sourcebasedir = /work/default/@USER@ optionlist = ducky.cfg thornlist = wavetoy-generic.th submitscript = ducky.sh make = mkdir -p /work/default/@USER@/tmp && env TMPDIR=/work/default/@USER@/tmp gmake -j4 # Simulation management basedir = /mnt/lpfs.nfs102/@USER@/simulations quota = 20 cpu = Power5+ cpufreq = 1.9 flop/cycle = 4 ppn = 8 # spn # mpn max-num-threads = 8 num-threads = 8 memory = 16384 nodes = 13 min-ppn = 8 allocation = loni_numrel04 queue = checkpt maxwalltime = 120:00:00 # maxqueueslots submit = llsubmit @SCRIPTFILE@ # run # run2 getstatus = llq @JOB_ID@ stop = llcancel @JOB_ID@ submitpattern = "\"(.*)\"" statuspattern = ^ *@JOB_ID@ queuedpattern = " I " runningpattern = " R " scratchdir = scratchdir exechost = llq -f '%h' @JOB_ID@b | tail +3 | head -1 exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && /usr/bin/poe ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = /opt/freeware/bin/tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [bp] # Machine description nickname = bp name = BluePrint location = NCSA description = Blue Waters Power5+ test system webpage = https://bw-wiki.ncsa.uiuc.edu/display/HWandSW/Preliminary+Test+Systems status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = bp-login1.ncsa.uiuc.edu # iomachine trampoline = abe rsynccmd = /u/home/ac/eschnett/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^bp-login[12]\.ncsa\.uiuc\.edu$|^f10n12(\.local)?$ # Source tree management sourcebasedir = /u/home/ac/@USER@ optionlist = bp.cfg thornlist = wavetoy-generic.th submitscript = bp.sh make = mkdir -p /u/home/ac/@USER@/tmp && env TMPDIR=/u/home/ac/@USER@/tmp gmake -j4 # Simulation management basedir = /scr/@USER@/simulations # quota cpu = Power5+ cpufreq = 1.9 flop/cycle = 4 ppn = 16 # spn # mpn max-num-threads = 16 num-threads = 16 memory = 65536 nodes = 114 min-ppn = 8 allocation = NoAllocation queue = No_Class maxwalltime = 72:00:00 # maxqueueslots submit = llsubmit @SCRIPTFILE@ # run # run2 getstatus = llq @JOB_ID@.0 stop = llcancel @JOB_ID@.0 submitpattern = "\"bp-login1\.local\.(.*)\"" statuspattern = ^bp-login1\.@JOB_ID@\.0 queuedpattern = " I " runningpattern = " (R|ST|H) " scratchdir = scratchdir exechost = llq -f '%h' @JOB_ID@.0 | tail +3 | head -1 exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && /usr/bin/poe ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = /opt/freeware/bin/tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [carver] # Machine description nickname = carver name = Carver location = NERSC description = IBM iDataPlex at NERSC webpage = http://www.nersc.gov/nusers/systems/carver/ status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = carver.nersc.gov # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = export LM_LICENSE_FILE=/usr/common/usg/pgi/10.0-0/../license.dat aliaspattern = ^cvrsvc\d\d(\.nersc\.gov)?$ # Source tree management sourcebasedir = /project/projectdirs/m152/@USER@/carver optionlist = carver.cfg thornlist = wavetoy-generic.th submitscript = carver.sh make = make -j4 # Simulation management basedir = /global/scratch/sd/@USER@/simulations # quota cpu = Intel Nehalem cpufreq = 2.67 flop/cycle = 4 ppn = 8 spn = 2 mpn = 1 max-num-threads = 8 num-threads = 4 memory = 20480 nodes = 64 min-ppn = 8 allocation = m152 queue = regular maxwalltime = 24:00:00 # maxqueueslots submit = /usr/syscom/opt/torque/default/bin/qsub @SCRIPTFILE@ # run # run2 getstatus = /usr/syscom/opt/torque/default/bin/qstat -u @USER@ | awk '$1 == @ #?JOB_ID@ {print $1,$5}' stop = /usr/syscom/opt/torque/default/bin/qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[ \t] queuedpattern = ^@JOB_ID@[ \t]+(qw|hqw) runningpattern = ^@JOB_ID@[ \t]+r scratchdir = /global/scratch/sd/@USER@/scratch/${PBS_JOBID} exechost = /usr/syscom/opt/torque/default/bin/qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = cat @RUNDIR@/@SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && ${OPENMPI_DIR}/bin/mpirun -np @NUM_PROCS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat @RUNDIR@/@SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @RUNDIR@/@SIMULATION_NAME@.out @RUNDIR@/@SIMULATION_NAME@.err [catbert] # Machine description nickname = catbert name = Catbert location = LSU, CCT description = One of the numrel workstations # webpage status = outdated # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = catbert.phys.lsu.edu # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^catbert(\.phys\.lsu\.edu|\.cct\.lsu\.edu)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = catbert-intel.cfg thornlist = wavetoy-generic.th submitscript = catbert-intel.sh make = make -j2 # Simulation management basedir = /home/@USER@/simulations # quota # cpu # cpufreq # flop/cycle ppn = 2 # spn # mpn max-num-threads = 2 num-threads = 2 memory = 8192 nodes = 1 # min-ppn # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! # run # run2 getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [celeritas] # Machine description nickname = celeritas name = Celeritas location = LSU, CCT description = Thomas Sterling's research cluster # webpage status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = celeritas.cct.lsu.edu # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^celeritas(\.cct\.lsu\.edu)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = celeritas.cfg thornlist = wavetoy-generic.th submitscript = celeritas.sh make = make -j2 # Simulation management basedir = /home/@USER@/simulations # quota # cpu # cpufreq # flop/cycle ppn = 2 # spn # mpn max-num-threads = 2 num-threads = 2 memory = 8192 nodes = 1 # min-ppn # allocation # queue # maxwalltime # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /scratch/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh $(echo @EXECHOST@ | sed -e "s/ic/kali/") cat /var/spool/pbs/spool/@JOB_ID@.peyot.OU mpirun = MPICHDIR=/home/packages/mpich1-eth-ch_p4 && time ${MPICHDIR}/bin/mpirun -np @PROCS@ -machinefile ${PBS_NODEFILE} ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = ssh $(echo @EXECHOST@ | sed -e "s/ic/kali/") cat /var/spool/pbs/spool/@JOB_ID@.peyot.ER precmd = : postcmd = : stdout-follow = ssh $(echo @EXECHOST@ | sed -e "s/ic/kali/") tail -n 100 -f /var/spool/pbs/spool/@JOB_ID@.peyot.OU /var/spool/pbs/spool/@JOB_ID@.peyot.ER [cobalt] # Machine description nickname = cobalt name = Cobalt location = NCSA description = A large SGI Altix at NCSA webpage = http://www.ncsa.uiuc.edu/UserInfo/Resources/Hardware/SGIAltix/ status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = cobalt.ncsa.uiuc.edu # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^co-login1(\.ncsa\.uiuc\.edu)?$ # Source tree management sourcebasedir = /u/ac/@USER@ optionlist = cobalt.cfg thornlist = wavetoy-generic.th submitscript = cobalt.sh make = make -j4 # Simulation management basedir = /u/ac/@USER@/scratch-global/simulations # quota cpu = Itanium 2 cpufreq = 1.6 # flop/cycle ppn = 512 spn = 256 # mpn max-num-threads = 512 num-threads = 1 memory = 4194304 nodes = 1 min-ppn = 1 allocation = out # queue maxwalltime = 18:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /scratch/batch/${PBS_JOBID} exechost = false exechostpattern = $^ stdout = false mpirun = time mpirun -np @PROCS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = false precmd = : postcmd = : stdout-follow = false [damiana] # Machine description nickname = damiana name = Damiana location = AEI description = The new AEI numrel cluster webpage = http://supercomputers.aei.mpg.de/damiana status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = login-damiana.aei.mpg.de # iomachine # trampoline rsynccmd = /home/eschnett/rsync-3.0.4/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^(((login-)?damiana)|(sl-\d\d))(\.aei\.mpg\.de|\.damiana\.admin)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = damiana.cfg thornlist = wavetoy-generic.th submitscript = damiana.sh make = make -j4 # Simulation management basedir = /lustre/AEI/@USER@/simulations # quota cpu = Intel Xeon 5160 Woodcrest 4MB shared L2-Cache # cpufreq # flop/cycle ppn = 4 # spn # mpn max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 170 min-ppn = 4 # allocation queue = intel.q maxwalltime = 24:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat -u @USER@ | awk '$1 == @JOB_ID@ {print $1,$5}' stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[ \t] queuedpattern = ^@JOB_ID@[ \t]+(qw|hqw) runningpattern = ^@JOB_ID@[ \t]+r scratchdir = /scratch/${USER}/${JOB_ID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/pbs/spool/@JOB_ID@.damia.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && time ${MPIDIR}/bin/mpirun -v --mca btl openib,self --mca mpi_leave_pinned 0 -np @NUM_PROCS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = ssh @EXECHOST@ cat /var/spool/pbs/spool/@JOB_ID@.damia.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/pbs/spool/@JOB_ID@.damia.OU /var/spool/pbs/spool/@JOB_ID@.damia.ER [damiana2] # Machine description nickname = damiana2 name = Damiana location = AEI description = The new AEI numrel cluster webpage = http://supercomputers.aei.mpg.de/damiana status = trampoline # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = damiana2.aei.mpg.de # iomachine # trampoline rsynccmd = /home/eschnett/rsync-3.0.4/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^damiana2(\.aei\.mpg\.de|\.damiana\.admin)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = damiana.cfg thornlist = wavetoy-generic.th submitscript = damiana.sh make = make -j4 # Simulation management basedir = /lustre/AEI/@USER@/simulations # quota cpu = Intel Xeon 5160 Woodcrest 4MB shared L2-Cache # cpufreq # flop/cycle ppn = 4 # spn # mpn max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 170 # min-ppn # allocation queue = intel.q maxwalltime = 48:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat -u @USER@ | awk '$1 == @JOB_ID@ {print $1,$5}' stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[ \t] queuedpattern = ^@JOB_ID@[ \t]+(qw|hqw) runningpattern = ^@JOB_ID@[ \t]+r scratchdir = /scratch/${USER}/${JOB_ID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/pbs/spool/@JOB_ID@.damia.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && time ${MPIDIR}/bin/mpirun -v --mca btl openib,self --mca mpi_leave_pinned 0 -np @NUM_PROCS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = ssh @EXECHOST@ cat /var/spool/pbs/spool/@JOB_ID@.damia.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/pbs/spool/@JOB_ID@.damia.OU /var/spool/pbs/spool/@JOB_ID@.damia.ER [ducky] # Machine description nickname = ducky name = Ducky location = LONI, Tulane, New Orleans description = The LONI IBM P5 machine at Tulane, New Orleans webpage = http://www.loni.org/systems/system.php?system=Ducky status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = ducky.loni.org # iomachine trampoline = is rsynccmd = /work/default/eschnett/rsync-3.0.4/bin/rsync rsyncopts = -c sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^(ducky(\.loni\.org)?|l2f1n\d\d(\.sys\.loni\.org)?)$ # Source tree management sourcebasedir = /work/default/@USER@ optionlist = ducky.cfg thornlist = wavetoy-generic.th submitscript = ducky.sh make = mkdir -p /work/default/@USER@/tmp && env TMPDIR=/work/default/@USER@/tmp gmake -j4 # Simulation management basedir = /work/nfs203/@USER@/simulations quota = 20 cpu = Power5+ cpufreq = 1.9 flop/cycle = 4 ppn = 8 # spn # mpn max-num-threads = 8 num-threads = 8 memory = 16384 nodes = 13 min-ppn = 8 allocation = loni_numrel04 queue = checkpt maxwalltime = 120:00:00 # maxqueueslots submit = llsubmit @SCRIPTFILE@ # run # run2 getstatus = llq @JOB_ID@ stop = llcancel @JOB_ID@ submitpattern = "\"(.*)\"" statuspattern = ^ *@JOB_ID@ queuedpattern = " I " runningpattern = " R " scratchdir = scratchdir exechost = llq -f '%h' @JOB_ID@b | tail +3 | head -1 exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = /opt/freeware/bin/tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [ducky-globus] # Machine description nickname = ducky-globus name = Ducky location = LONI description = The LONI network webpage = http://www.loni.org/systems/ status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = ducky.loni.org # iomachine trampoline = is rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = $^ # Source tree management sourcebasedir = /work/default/@USER@ optionlist = ducky-globus.cfg thornlist = wavetoy-generic.th submitscript = ducky-globus.sh make = mkdir -p /work/default/@USER@/tmp && env TMPDIR=/work/default/@USER@/tmp gmake -j4 # Simulation management basedir = /work/default/@USER@/simulations # quota cpu = Power5+ cpufreq = 1.9 flop/cycle = 4 ppn = 8 # spn # mpn max-num-threads = 8 num-threads = 8 memory = 16384 nodes = 13 min-ppn = 8 allocation = loni_numrel04 queue = checkpt maxwalltime = 120:00:00 # maxqueueslots submit = globusrun -f @SCRIPTFILE@ < /dev/null > globusrun.$$.out 2> globusrun.$$.err & echo $! # run # run2 getstatus = false stop = false submitpattern = $^ statuspattern = $^ queuedpattern = $^ runningpattern = $^ scratchdir = scratchdir # exechost # exechostpattern # stdout mpirun = export CACTUS_STARTTIME=$(date +%s) && /usr/bin/poe ./@EXECUTABLE@ -L 3 @PARFILE@ # stderr precmd = : postcmd = : # # stdout-follow [eric] # Machine description nickname = eric name = Eric location = LONI, LSU description = The LONI Linux cluster at LSU webpage = http://www.loni.org/systems/system.php?system=Eric status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = eric.loni.org # iomachine trampoline = is rsynccmd = /home/eschnett/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^eric2(\.loni\.org)?$ # Source tree management sourcebasedir = /work/@USER@ optionlist = eric-mvapich2-new.cfg thornlist = wavetoy-generic.th submitscript = eric-mvapich2-new.sh make = make -j4 # Simulation management basedir = /work/@USER@/simulations # quota cpu = Dual Core Xeon 64-bit Processors cpufreq = 2.33 flop/cycle = 4 ppn = 4 spn = 2 mpn = 1 max-num-threads = 4 num-threads = 4 memory = 4096 nodes = 48 min-ppn = 4 allocation = loni_numrel04 queue = checkpt maxwalltime = 72:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+)[.]eric2 statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /var/scratch/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.eric.OU mpirun = export MPICHDIR=/usr/local/packages/mvapich2/1.4/intel-11.1 && export MPI_NODEFILE=@MPI_NODEFILE@ && export CACTUS_STARTTIME=$(date +%s) && time ${MPICHDIR}/bin/mpirun_rsh -np @NUM_PROCS@ -hostfile ${MPI_NODEFILE} /bin/env MV2_ENABLE_AFFINITY=0 OMP_NUM_THREADS=@NUM_THREADS@ @EXECUTABLE@ -L 3 @PARFILE@ stderr = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.eric.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/torque/spool/@JOB_ID@.eric.OU /var/spool/torque/spool/@JOB_ID@.eric.ER [franklin] # Machine description nickname = franklin name = Franklin location = NERSC description = A Cray XT4 at NERSC webpage = http://www.nersc.gov/nusers/systems/franklin/ status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = franklin.nersc.gov # iomachine # trampoline rsynccmd = /global/homes/s/schnette/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^nid[0-9][0-9][0-9][0-9][0-9](\.nersc\.gov)?$ # Source tree management sourcebasedir = /project/projectdirs/m152/@USER@/franklin optionlist = franklin.cfg thornlist = wavetoy-generic.th submitscript = franklin.sh make = make -j2 # Simulation management basedir = /scratch/scratchdirs/@USER@/simulations # quota cpu = quad-core AMD Opteron processor (Budapest) cpufreq = 2.3 flop/cycle = 4 ppn = 4 spn = 1 mpn = 1 max-num-threads = 4 num-threads = 2 memory = 7566 nodes = 8502 min-ppn = 4 allocation = m152 queue = regular maxwalltime = 24:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+)[.]nid[0-9]* statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /scratch/scratchdirs/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = cat /var/spool/torque/mom_priv/jobs/@JOB_ID@.nid0.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && aprun -n @NUM_PROCS@ -N @(@PPN_USED@/@NUM_THREADS@)@ -d @NUM_THREADS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat /var/spool/torque/mom_priv/jobs/@JOB_ID@.nid0.ER precmd = : postcmd = : stdout-follow = tail -n 100 -f /var/spool/torque/mom_priv/jobs/@JOB_ID@.nid0.OU /var/spool/torque/mom_priv/jobs/@JOB_ID@.nid0.ER [gauss] # Machine description nickname = gauss name = Gauss location = Texas A&M University, SURAgrid description = Linux webpage = https://gridportal.sura.org/gridsphere/dist/resources/calclab.html status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = gauss.math.tamu.edu # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = gsissh sshopts = localsshsetup = test $(grid-proxy-info -timeleft 2> /dev/null) -gt 0 2> /dev/null || grid-proxy-init environcmd = : sshsetup = : aliaspattern = ^gauss\.math\.tamu\.edu$ # Source tree management sourcebasedir = /u/sura/@USER@ optionlist = gauss.cfg thornlist = wavetoy-generic.th submitscript = gauss.sh make = make -j2 # Simulation management basedir = /u/sura/@USER@/simulations # quota cpu = AMD Opteron(tm) Processor 246 cpufreq = 1.99 # flop/cycle ppn = 2 # spn # mpn # max-num-threads # num-threads # memory nodes = 390 # min-ppn # allocation # queue maxwalltime = 12:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = scratchdir exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ # stdout mpirun = export CACTUS_STARTTIME=$(date +%s) && time openmpirun -np @NUM_PROCS@ ./@EXECUTABLE@ -L 3 @PARFILE@ # stderr precmd = : postcmd = : # # stdout-follow [genius] # Machine description nickname = genius name = Genius location = RZG description = Blue Gene/P webpage = https://www.rzg.mpg.de/computing/hardware/BGP/ status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = genius.rzg.mpg.de # iomachine trampoline = vip rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^genius\d*$ # Source tree management sourcebasedir = /u/@USER@/BlueGene optionlist = genius.cfg thornlist = wavetoy-generic.th submitscript = genius.sh make = make -j4 # Simulation management basedir = /ptmp/@USER@/BlueGene/simulations # quota cpu = PowerPC 450 cpufreq = 0.85 flop/cycle = 2 ppn = 4 # spn # mpn max-num-threads = 4 num-threads = 4 memory = 2048 nodes = 1024 min-ppn = 4 allocation = petascaling queue = default maxwalltime = 1:00:00 maxqueueslots = 20 submit = chmod a+x @SCRIPTFILE@ && qsub -A @ALLOCATION@ -q @QUEUE@ -t @WALLTIME@ --mode @(@NUM_THREADS@==4 ? "smp" : @NUM_THREADS@==2 ? "dual" : "vn")@ -n @NODES@ -M @USER@@alcf.anl.gov -O @SIMULATION_NAME@ -o @RUNDIR@/@SIMULATION_NAME@.out -e @RUNDIR@/@SIMULATION_NAME@.err --cwd @RUNDIR@-active --env=OMP_NUM_THREADS=@NUM_THREADS@ --env=BG_MAPPING=TXYZ @RUNDIR@-active/@EXECUTABLE@ -L 3 @PARFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[. ] queuedpattern = " queued " runningpattern = " running " scratchdir = scratchdir exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.qb2.OU mpirun = stderr = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.qb2.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/torque/spool/@JOB_ID@.qb2.OU /var/spool/torque/spool/@JOB_ID@.qb2.ERR [hlrb2] # Machine description nickname = hlrb2 name = HLRB II location = LRZ description = An SGI Altix at the LRZ webpage = http://www.lrz-muenchen.de/services/compute/hlrb/ status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = hlrb2.lrz-muenchen.de # iomachine trampoline = numrel02 rsynccmd = /home/hlrb2/h0152/lu78kog/rsync-3.0.4/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^a01\.hlrb2\.lrz-muenchen\.de$ # Source tree management sourcebasedir = /home/hlrb2/h0152/@USER@ optionlist = hlrb2.cfg thornlist = wavetoy-generic.th submitscript = hlrb2.sh make = make -j16 # Simulation management basedir = /ptmp2/h0152/@USER@/simulations # quota cpu = Intel Itanium2 Montecito Dual Core cpufreq = 1.6 flop/cycle = 4 ppn = 9728 spn = 4864 mpn = 4864 max-num-threads = 2 num-threads = 2 memory = 39845888 nodes = 1 min-ppn = 1 allocation = h0152 # queue maxwalltime = 48:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /ptmp1/h0152/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.santak.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && time mpiexec omplace ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.santak.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/torque/spool/@JOB_ID@.santak.OU /var/spool/torque/spool/@JOB_ID@.santak.ER [hopper] # Machine description nickname = hopper name = Hopper location = NERSC description = A Cray XT5 at NERSC webpage = http://www.nersc.gov/nusers/systems/hopper/ status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = hopper.nersc.gov # iomachine # trampoline rsynccmd = /global/homes/s/schnette/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^hopper[0-9][0-9](\.nersc\.gov)?$ # Source tree management sourcebasedir = /project/projectdirs/m152/@USER@/hopper optionlist = hopper.cfg thornlist = wavetoy-generic.th submitscript = hopper.sh make = make -j8 # Simulation management basedir = /scratch/scratchdirs/@USER@/simulations # quota cpu = Quad-Core AMD Opteron(tm) Processor 8378 (Shanghai) cpufreq = 2.4 flop/cycle = 4 ppn = 8 spn = 2 # mpn max-num-threads = 8 num-threads = 4 memory = 16384 nodes = 664 min-ppn = 8 allocation = m152 queue = regular maxwalltime = 24:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+)[.]nid[0-9]* statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /scratch/scratchdirs/${USER}/scratch/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = cat /var/spool/torque/mom_priv/jobs/@JOB_ID@.hopper0.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && aprun -n @NUM_PROCS@ -N @(@PPN_USED@/@NUM_THREADS@)@ -d @NUM_THREADS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat /var/spool/torque/mom_priv/jobs/@JOB_ID@.hopper0.ER precmd = : postcmd = : stdout-follow = tail -n 100 -f /var/spool/torque/mom_priv/jobs/@JOB_ID@.hopper0.OU /var/spool/torque/mom_priv/jobs/@JOB_ID@.hopper0.ER [hyperion] # Machine description nickname = hyperion name = Hyperion location = LLNL description = A large Linux cluster at LLNL webpage = http://computing.llnl.gov/ status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = hyperion0-pub.llnl.gov # iomachine trampoline = is rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^hyperion0$ # Source tree management sourcebasedir = /home/@USER@ optionlist = hyperion.cfg thornlist = wavetoy-generic.th submitscript = hyperion.sh make = make -j4 # Simulation management basedir = /work/@USER@/simulations # quota cpu = Dual Core Xeon 64-bit Processors cpufreq = 2.33 flop/cycle = 4 ppn = 4 spn = 2 mpn = 1 max-num-threads = 4 num-threads = 4 memory = 4096 nodes = 48 min-ppn = 4 allocation = loni_numrel04 queue = checkpt maxwalltime = 72:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+)[.]eric2 statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /var/scratch/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.eric.OU mpirun = stderr = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.eric.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/torque/spool/@JOB_ID@.eric.OU /var/spool/torque/spool/@JOB_ID@.eric.ER [intrepid] # Machine description nickname = intrepid name = Intrepid location = ALCF description = Blue Gene/P webpage = http://www.alcf.anl.gov/resources/storage.php status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = intrepid.alcf.anl.gov # iomachine # trampoline rsynccmd = /home/knarf/utils/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^login[0-9]\.intrepid # Source tree management sourcebasedir = /home/@USER@ optionlist = intrepid-xlc.cfg thornlist = wavetoy-generic.th submitscript = intrepid-xlc.sh make = make -j2 # Simulation management basedir = /gpfs1/@USER@/simulations # quota cpu = PowerPC 450 cpufreq = 0.85 flop/cycle = 4 ppn = 4 # spn # mpn max-num-threads = 4 num-threads = 4 memory = 2048 nodes = 40960 min-ppn = 4 allocation = McLachlanGB2009 queue = prod maxwalltime = 6:00:00 maxqueueslots = 20 submit = chmod a+x @SCRIPTFILE@ && qsub --mode vn --cwd @RUNDIR@-active -A '@ALLOCATION@' -q '@QUEUE@' -t '@WALLTIME@' -n '@NODES@' -M '@USER@@alcf.anl.gov' -O '@SIMULATION_NAME@' -o '@RUNDIR@/@SIMULATION_NAME@.out' -e '@RUNDIR@/@SIMULATION_NAME@.err' @RUNDIR@-active/@EXECUTABLE@ -L 3 @PARFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[. ] queuedpattern = " queued " runningpattern = " running " scratchdir = scratchdir exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.qb2.OU mpirun = cobalt-mpirun -verbose 2 -np @NUM_PROCS@ -mode "$mode" -env OMP_NUM_THREADS=${OMP_NUM_THREADS}:CACTUS_STARTTIME=${CACTUS_STARTTIME}:BG_MAPPING=TXYZ ./@EXECUTABLE@ -L 3 @PARFILE@ && cobalt-mpirun -verbose 2 -np @NUM_PROCS@ -mode "$mode" -env OMP_NUM_THREADS=${OMP_NUM_THREADS}:CACTUS_STARTTIME=${CACTUS_STARTTIME}:BG_MAPPING=TXYZ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.qb2.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/torque/spool/@JOB_ID@.qb2.OU /var/spool/torque/spool/@JOB_ID@.qb2.ER [is] # Machine description nickname = is name = IS location = LSU, CCT description = The main file server at the CCT # webpage status = personal # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = is.cct.lsu.edu # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^is(\.cct\.lsu\.edu)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = numrel-intel.cfg thornlist = wavetoy-generic.th submitscript = numrel-intel.sh make = make -j4 # Simulation management basedir = /home/@USER@/simulations # quota # cpu # cpufreq # flop/cycle ppn = 2 # spn # mpn max-num-threads = 2 num-threads = 2 memory = 4096 nodes = 1 # min-ppn # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! # run # run2 getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && ${MPICH_DIR}/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./start-exe -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [kraken] # Machine description nickname = kraken name = Kraken location = NICS description = A Cray XT5 at NICS webpage = http://www.nics.tennessee.edu/computing-resources/kraken status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = kraken-gsi2.nics.teragrid.org # iomachine # trampoline rsynccmd = /nics/b/home/eschnett/rsync-3.0.3/bin/rsync rsyncopts = sshcmd = gsissh sshopts = localsshsetup = test $(grid-proxy-info -timeleft 2> /dev/null) -gt 0 2> /dev/null || myproxy-logon -p 7514 -s myproxy.teragrid.org -T -l @USER@ environcmd = : sshsetup = source /etc/profile aliaspattern = ^kraken(-pwd)?[1234](\.nics\.utk\.edu)?$ # Source tree management sourcebasedir = /nics/a/proj/cactus/@USER@/xt5 optionlist = kraken.cfg thornlist = wavetoy-generic.th submitscript = kraken.sh make = make -j2 # Simulation management basedir = /lustre/scratch/@USER@/simulations # quota cpu = six-core AMD Opteron (Istanbul) cpufreq = 2.6 flop/cycle = 4 ppn = 12 spn = 2 # mpn max-num-threads = 12 num-threads = 6 memory = 16384 nodes = 8256 min-ppn = 12 allocation = loni_cactus05 queue = batch maxwalltime = 24:00:00 # maxqueueslots submit = /opt/torque/2.3.5/bin/qsub @SCRIPTFILE@ # run # run2 getstatus = /opt/torque/2.3.5/bin/qstat @JOB_ID@ stop = /opt/torque/2.3.5/bin/qdel @JOB_ID@ submitpattern = ([[:digit:]]+[.]nid[0-9]*) statuspattern = "^@JOB_ID@[. ].* [QRH] " queuedpattern = "^@JOB_ID@[. ].* Q " runningpattern = "^@JOB_ID@[. ].* R " scratchdir = /lustre/scratch/@USER@/scratch/@JOB_ID@ exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = cat @JOB_ID@.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && #aprun -n @NUM_PROCS@ -d @NUM_THREADS@ gdb -batch -x gdb.cmds --args ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat @JOB_ID@.ER precmd = : postcmd = : stdout-follow = tail -n 100 -f @JOB_ID@.ER @JOB_ID@.OU [lacumba] # Machine description nickname = lacumba name = LaCumba location = LONI, Southern, Baton Rouge description = The LONI IBM P5 machine at Southern, Baton Rouge Orleans webpage = http://www.loni.org/systems/system.php?system=LaCumba status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = lacumba.loni.org # iomachine trampoline = is rsynccmd = /work/default/eschnett/rsync-3.0.4/bin/rsync rsyncopts = -c sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^(lacumba(\.loni\.org)?|l5f1n\d\d(\.sys\.loni\.org)?)$ # Source tree management sourcebasedir = /work/default/@USER@ optionlist = ducky.cfg thornlist = wavetoy-generic.th submitscript = ducky.sh make = mkdir -p /work/default/@USER@/tmp && env TMPDIR=/work/default/@USER@/tmp gmake -j4 # Simulation management basedir = /work/default/@USER@/simulations quota = 20 # cpu # cpufreq # flop/cycle ppn = 8 # spn # mpn max-num-threads = 8 num-threads = 8 memory = 16384 nodes = 13 min-ppn = 8 allocation = loni_numrel04 queue = checkpt maxwalltime = 120:00:00 # maxqueueslots submit = llsubmit @SCRIPTFILE@ # run # run2 getstatus = llq @JOB_ID@ stop = llcancel @JOB_ID@ submitpattern = "\"(.*)\"" statuspattern = ^ *@JOB_ID@ queuedpattern = " I " runningpattern = " R " scratchdir = scratchdir exechost = llq -f '%h' @JOB_ID@b | tail +3 | head -1 exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && /usr/bin/poe ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = /opt/freeware/bin/tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [lincoln] # Machine description nickname = lincoln name = Lincoln location = NCSA description = A Linux cluster with Tesla GPU accelerators at NCSA webpage = http://www.ncsa.illinois.edu/UserInfo/Resources/Hardware/Intel64TeslaCluster/ status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = honest1.ncsa.uiuc.edu # iomachine # trampoline rsynccmd = /u/ac/eschnett/rsync-3.0.5/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = export LM_LICENSE_FILE=1702@barbossa:1702@nani:1702@lilo:1702@stitch:1708@barbossa:1704@barbossa.ncsa.uiuc.edu aliaspattern = ^honest[1](\.ncsa\.uiuc\.edu)?$ # Source tree management sourcebasedir = /u/ac/@USER@/lincoln optionlist = lincoln-mvapich2.cfg thornlist = wavetoy-generic.th submitscript = lincoln-mvapich2.sh make = make -j4 # Simulation management basedir = /u/ac/@USER@/scratch-global/lincoln/simulations # quota cpu = Intel 64 (Harperrtown) cpufreq = 2.33 flop/cycle = 4 ppn = 8 spn = 2 mpn = 1 max-num-threads = 8 num-threads = 8 memory = 16384 nodes = 192 min-ppn = 8 allocation = out queue = lincoln maxwalltime = 241:00:00 # maxqueueslots submit = /usr/local/bin/qsub @SCRIPTFILE@ # run # run2 getstatus = /usr/local/torque/bin/qstat @JOB_ID@ stop = /usr/local/torque/bin/qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /cfs/scratch/users/${USER}/${PBS_JOBID} exechost = /usr/local/torque/bin/qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = cat /u/ac/@USER@/.pbs_spool/@JOB_ID@.abem5.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && time ${MPICHDIR}/bin/mpirun -machinefile ${MPI_NODEFILE} -np @NUM_PROCS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat /u/ac/@USER@/.pbs_spool/@JOB_ID@.abem5.ER precmd = : postcmd = : stdout-follow = tail -n 100 -f /u/ac/@USER@/.pbs_spool/@JOB_ID@.abem5.OU /u/ac/@USER@/.pbs_spool/@JOB_ID@.abem5.ER [lonestar] # Machine description nickname = lonestar name = Lonestar location = TACC description = A large Linux cluster at TACC webpage = http://www.tacc.utexas.edu/services/userguides/lonestar/ status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = lonestar.tacc.utexas.edu # iomachine # trampoline rsynccmd = /home/utexas/ph/eschnett/rsync-3.0.4/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = source /etc/profile.d/modules.sh && module load TACC aliaspattern = ^lslogin[12](\.ls\.tacc\.utexas\.edu)?$ # Source tree management sourcebasedir = /work/utexas/ph/@USER@ optionlist = lonestar.cfg thornlist = wavetoy-generic.th submitscript = lonestar.sh make = make -j2 # Simulation management basedir = /work/utexas/ph/@USER@/simulations # quota cpu = Xeon Intel Duo-Core 64-bit processors cpufreq = 2.66 flop/cycle = 4 ppn = 4 spn = 2 mpn = 1 max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 128 min-ppn = 4 allocation = TG-MCA02N014 queue = normal maxwalltime = 48:00:00 # maxqueueslots submit = /opt/lsf/bin/bsub < @SCRIPTFILE@ # run # run2 getstatus = /opt/lsf/bin/bjobs @JOB_ID@ stop = /opt/lsf/bin/bkill @JOB_ID@ submitpattern = <([[:digit:]]*)> statuspattern = @JOB_ID@[ \t]+[^ ]+[ \t]+(PEND|RUN) queuedpattern = " PEND " runningpattern = " RUN " scratchdir = scratchdir exechost = false exechostpattern = $^ stdout = /opt/lsf/bin/bpeek @JOB_ID@ mpirun = export CACTUS_STARTTIME=$(date +%s) && ibrun ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = true precmd = : postcmd = : stdout-follow = /opt/lsf/bin/bpeek -f @JOB_ID@ [louie] # Machine description nickname = louie name = Louie location = LONI, Tulane description = The LONI Linux cluster at Tulane webpage = http://www.loni.org/systems/system.php?system=Louie status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = louie.loni.org # iomachine trampoline = is rsynccmd = /home/eschnett/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^louie2(\.loni\.org)?$ # Source tree management sourcebasedir = /work/@USER@ optionlist = eric-mvapich2-new.cfg thornlist = wavetoy-generic.th submitscript = eric-mvapich2-new.sh make = make -j4 # Simulation management basedir = /work/@USER@/simulations # quota cpu = Dual Core Xeon 64-bit Processors cpufreq = 2.33 flop/cycle = 4 ppn = 4 spn = 2 mpn = 1 max-num-threads = 4 num-threads = 4 memory = 4096 nodes = 64 min-ppn = 4 allocation = loni_numrel04 queue = checkpt maxwalltime = 72:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+)[.]louie2 statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /var/scratch/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.loui.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && time ${MPICHDIR}/bin/mpirun_rsh -np @NUM_PROCS@ -hostfile ${MPI_NODEFILE} /bin/env MV2_ENABLE_AFFINITY=0 OMP_NUM_THREADS=@NUM_THREADS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.loui.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/torque/spool/@JOB_ID@.loui.OU /var/spool/torque/spool/@JOB_ID@.loui.ER [mercury] # Machine description nickname = mercury name = Mercury location = NCSA description = An Itanium cluster at NCSA webpage = http://www.ncsa.uiuc.edu/UserInfo/Resources/Hardware/TGIA64LinuxCluster/ status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = login-hg.ncsa.teragrid.org # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^tg-login1(\.ncsa\.teragrid\.org)?$ # Source tree management sourcebasedir = /home/ac/@USER@ optionlist = mercury.cfg thornlist = wavetoy-generic.th submitscript = mercury.sh make = make -j2 # Simulation management basedir = /gpfs_scratch1/@USER@/simulations # quota cpu = Intel Itanium 2 cpufreq = 1.5 flop/cycle = 4 ppn = 2 spn = 1 mpn = 1 max-num-threads = 2 num-threads = 2 memory = 4096 nodes = 631 min-ppn = 2 allocation = out queue = dque maxwalltime = 24:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /scr/${USER}/${PBS_JOBID} # exechost # exechostpattern # stdout mpirun = export CACTUS_STARTTIME=$(date +%s) && time mpirun -np @PROCS@ ./@EXECUTABLE@ -L 3 @PARFILE@ # stderr precmd = : postcmd = : # # stdout-follow [mileva] # Machine description nickname = mileva name = Mileva location = Old Dominion University, SURAgrid description = Linux webpage = https://gridportal.sura.org/gridsphere/dist/resources/mileva.html status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = mileva.hpc.odu.edu # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = gsissh sshopts = -p 2222 localsshsetup = test $(grid-proxy-info -timeleft 2> /dev/null) -gt 0 2> /dev/null || grid-proxy-init environcmd = : sshsetup = : aliaspattern = ^mileva\.hpc\.odu\.edu$ # Source tree management sourcebasedir = /home/@USER@ optionlist = mileva.cfg thornlist = wavetoy-generic.th submitscript = mileva.sh make = make -j2 # Simulation management basedir = /home/@USER@/simulations # quota cpu = Dual Core AMD Opteron(tm) Processor 175 cpufreq = 1 # flop/cycle ppn = 2 # spn # mpn # max-num-threads # num-threads # memory nodes = 16 # min-ppn # allocation # queue # maxwalltime # maxqueueslots submit = qsub -S /bin/bash -terse @SCRIPTFILE@ # run # run2 getstatus = qstat -u @USER@ | awk '$1 == @JOB_ID@ {print $1,$5}' stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^ *@JOB_ID@[ \t] queuedpattern = ^ *@JOB_ID@[ \t]+(qw|hqw) runningpattern = ^ *@JOB_ID@[ \t]+r scratchdir = scratchdir exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ # stdout mpirun = export CACTUS_STARTTIME=$(date +%s) && time ${MPICHDIR}/bin/mpirun -np @PROCS@ -machinefile ${MPI_NODEFILE} ./start-exe -L 3 @PARFILE@ # stderr precmd = : postcmd = : # # stdout-follow [mn] # Machine description nickname = mn name = Mare Nostrum location = BSC description = PowerPC webpage = http://www.bsc.es/plantillaA.php?cat_id=5 status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = mn1.bsc.es # iomachine # trampoline rsynccmd = /home/uib68/uib68311/rsync-3.0.7/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^login[0-9]$ # Source tree management sourcebasedir = /home/uib68/@USER@ optionlist = mn.cfg thornlist = wavetoy-generic.th submitscript = mn.sh make = make -j2 # Simulation management basedir = /gpfs/scratch/uib68/@USER@ # quota cpu = IBM Power PC 970MP cpufreq = 2.3 flop/cycle = 4 ppn = 4 spn = 2 mpn = 2 #max-num-threads = num-threads # num-threads memory = 8192 nodes = 5120 min-ppn = 4 # allocation # queue # maxwalltime maxqueueslots = 20 submit = chmod a+x @SCRIPTFILE@ && qsub -A @ALLOCATION@ -q @QUEUE@ -t @WALLTIME@ --mode @(@NUM_THREADS@==4 ? "smp" : @NUM_THREADS@==2 ? "dual" : "vn")@ -n @NODES@ -M @USER@@alcf.anl.gov -O @SIMULATION_NAME@ -o @RUNDIR@/@SIMULATION_NAME@.out -e @RUNDIR@/@SIMULATION_NAME@.err --cwd @RUNDIR@-active --env=OMP_NUM_THREADS=@NUM_THREADS@ --env=BG_MAPPING=TXYZ @RUNDIR@-active/@EXECUTABLE@ -L 3 @PARFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[. ] queuedpattern = " queued " runningpattern = " running " scratchdir = scratchdir # exechost # exechostpattern # stdout mpirun = # stderr precmd = : postcmd = : # stdout-follow [neptune] # Machine description nickname = neptune name = Neptune location = LONI, UNO description = The LONI IBM P5 machine at UNO webpage = http://www.hpc.lsu.edu/systems/system.php?system=Neptune status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = neptune.loni.org # iomachine trampoline = is rsynccmd = /work/default/eschnett/rsync-3.0.4/bin/rsync rsyncopts = -c sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^(neptune(\.loni\.org)?|l4f1n\d\d(\.sys\.loni\.org)?)$ # Source tree management sourcebasedir = /work/default/@USER@ optionlist = ducky.cfg thornlist = wavetoy-generic.th submitscript = ducky.sh make = mkdir -p /work/default/@USER@/tmp && env TMPDIR=/work/default/@USER@/tmp gmake -j4 # Simulation management basedir = /mnt/lpfs.nfs403/@USER@/simulations quota = 20 cpu = Power5+ cpufreq = 1.9 flop/cycle = 4 ppn = 8 # spn # mpn max-num-threads = 8 num-threads = 8 memory = 16384 nodes = 13 min-ppn = 8 allocation = loni_numrel04 queue = checkpt maxwalltime = 120:00:00 # maxqueueslots submit = llsubmit @SCRIPTFILE@ # run # run2 getstatus = llq @JOB_ID@ stop = llcancel @JOB_ID@ submitpattern = "\"(.*)\"" statuspattern = ^ *@JOB_ID@ queuedpattern = " I " runningpattern = " R " scratchdir = scratchdir exechost = llq -f '%h' @JOB_ID@b | tail +3 | head -1 exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && /usr/bin/poe ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = /opt/freeware/bin/tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [nmi] # Machine description nickname = nmi name = NMI location = University of Wisconsin-Madison description = NMI Build and Test Lab webpage = https://nmi.cs.wisc.edu/ status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = nmi-s001.cs.wisc.edu # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^nmi-.*\.cs\.wisc\.edu$ # Source tree management sourcebasedir = /home/@USER@ optionlist = nmi.cfg thornlist = wavetoy-generic.th submitscript = nmi.sh make = make -j2 # Simulation management basedir = /home/@USER@/simulations # quota # cpu # cpufreq # flop/cycle ppn = 0 # spn # mpn # max-num-threads # num-threads # memory nodes = 1 # min-ppn # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! # run # run2 getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir # exechost # exechostpattern # stdout mpirun = # stderr precmd = : postcmd = : # stdout-follow [numrel01] # Machine description nickname = numrel01 name = numrel01 location = LSU, CCT description = One of the numrel workstations at the CCT # webpage status = personal # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = numrel01.cct.lsu.edu # iomachine # trampoline rsynccmd = /usr/local/packages/numrel/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^numrel01(\.cct\.lsu\.edu)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = numrel-intel.cfg thornlist = wavetoy-generic.th submitscript = numrel-intel.sh make = make -j4 # Simulation management basedir = /home/@USER@/simulations quota = 50 cpu = Dual Core AMD Opteron Processor 280 cpufreq = 2.4 flop/cycle = 2 ppn = 4 spn = 2 # mpn max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 1 min-ppn = 1 # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! run = /usr/local/packages/numrel/mpich-1.2.7p1/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./RunCmd run2 = exec /bin/env OMP_NUM_THREADS=@NUM_THREADS@ nice -19 @RUNDIR@/@EXECUTABLE@ $* getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && ${MPICH_DIR}/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./start-exe -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [numrel02] # Machine description nickname = numrel02 name = numrel02 location = LSU, CCT description = One of the numrel workstations at the CCT # webpage status = personal # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = numrel02.cct.lsu.edu # iomachine # trampoline rsynccmd = /usr/local/packages/numrel/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^numrel02(\.cct\.lsu\.edu)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = numrel-intel.cfg thornlist = wavetoy-generic.th submitscript = numrel-intel.sh make = make -j4 # Simulation management basedir = /home/@USER@/simulations quota = 50 cpu = Dual Core AMD Opteron Processor 280 cpufreq = 2.4 flop/cycle = 2 ppn = 4 spn = 2 # mpn max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 1 min-ppn = 1 # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! run = /usr/local/packages/numrel/mpich-1.2.7p1/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./RunCmd run2 = exec /bin/env OMP_NUM_THREADS=@NUM_THREADS@ nice -19 @RUNDIR@/@EXECUTABLE@ $* getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && ${MPICH_DIR}/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./start-exe -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [numrel03] # Machine description nickname = numrel03 name = numrel03 location = LSU, CCT description = One of the numrel workstations at the CCT # webpage status = personal # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = numrel03.cct.lsu.edu # iomachine # trampoline rsynccmd = /usr/local/packages/numrel/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^numrel03(\.cct\.lsu\.edu)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = numrel-intel.cfg thornlist = wavetoy-generic.th submitscript = numrel-intel.sh make = make -j4 # Simulation management basedir = /home/@USER@/simulations quota = 50 cpu = Dual Core AMD Opteron Processor 280 cpufreq = 2.4 flop/cycle = 2 ppn = 4 spn = 2 # mpn max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 1 min-ppn = 1 # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! run = /usr/local/packages/numrel/mpich-1.2.7p1/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./RunCmd run2 = exec /bin/env OMP_NUM_THREADS=@NUM_THREADS@ nice -19 @RUNDIR@/@EXECUTABLE@ $* getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && ${MPICH_DIR}/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./start-exe -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [numrel04] # Machine description nickname = numrel04 name = numrel04 location = LSU, CCT description = One of the numrel workstations at the CCT # webpage status = personal # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = numrel04.cct.lsu.edu # iomachine # trampoline rsynccmd = /usr/local/packages/numrel/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^numrel04(\.cct\.lsu\.edu)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = numrel-intel.cfg thornlist = wavetoy-generic.th submitscript = numrel-intel.sh make = make -j4 # Simulation management basedir = /home/@USER@/simulations quota = 50 cpu = Dual Core AMD Opteron Processor 280 cpufreq = 2.4 flop/cycle = 2 ppn = 4 spn = 2 # mpn max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 1 min-ppn = 1 # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! run = /usr/local/packages/numrel/mpich-1.2.7p1/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./RunCmd run2 = exec /bin/env OMP_NUM_THREADS=@NUM_THREADS@ nice -19 @RUNDIR@/@EXECUTABLE@ $* getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && ${MPICH_DIR}/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./start-exe -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [numrel05] # Machine description nickname = numrel05 name = numrel05 location = LSU, CCT description = One of the numrel workstations at the CCT # webpage status = personal # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = numrel05.cct.lsu.edu # iomachine # trampoline rsynccmd = /usr/local/packages/numrel/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^numrel05(\.cct\.lsu\.edu)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = numrel-intel.cfg thornlist = wavetoy-generic.th submitscript = numrel-intel.sh make = make -j4 # Simulation management basedir = /home/@USER@/simulations quota = 50 cpu = Dual Core AMD Opteron Processor 280 cpufreq = 2.4 flop/cycle = 2 ppn = 4 spn = 2 # mpn max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 1 min-ppn = 1 # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! run = /usr/local/packages/numrel/mpich-1.2.7p1/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./RunCmd run2 = exec /bin/env OMP_NUM_THREADS=@NUM_THREADS@ nice -19 @RUNDIR@/@EXECUTABLE@ $* getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && ${MPICH_DIR}/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./start-exe -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [numrel06] # Machine description nickname = numrel06 name = numrel06 location = LSU, CCT description = One of the numrel workstations at the CCT # webpage status = personal # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = numrel06.cct.lsu.edu # iomachine # trampoline rsynccmd = /usr/local/packages/numrel/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^numrel06(\.cct\.lsu\.edu)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = numrel-intel.cfg thornlist = wavetoy-generic.th submitscript = numrel-intel.sh make = make -j4 # Simulation management basedir = /home/@USER@/simulations quota = 50 cpu = Dual Core AMD Opteron Processor 280 cpufreq = 2.4 flop/cycle = 2 ppn = 4 spn = 2 # mpn max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 1 min-ppn = 1 # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! run = /usr/local/packages/numrel/mpich-1.2.7p1/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./RunCmd run2 = exec /bin/env OMP_NUM_THREADS=@NUM_THREADS@ nice -19 @RUNDIR@/@EXECUTABLE@ $* getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && ${MPICH_DIR}/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./start-exe -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [numrel07] # Machine description nickname = numrel07 name = numrel07 location = LSU, CCT description = One of the numrel workstations at the CCT # webpage status = personal # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = numrel07.cct.lsu.edu # iomachine # trampoline rsynccmd = /usr/local/packages/numrel/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^numrel07(\.cct\.lsu\.edu)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = numrel-intel.cfg thornlist = wavetoy-generic.th submitscript = numrel-intel.sh make = make -j4 # Simulation management basedir = /home/@USER@/simulations quota = 50 cpu = Dual Core AMD Opteron Processor 280 cpufreq = 2.4 flop/cycle = 2 ppn = 4 spn = 2 # mpn max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 1 min-ppn = 1 # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! run = /usr/local/packages/numrel/mpich-1.2.7p1/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./RunCmd run2 = exec /bin/env OMP_NUM_THREADS=@NUM_THREADS@ nice -19 @RUNDIR@/@EXECUTABLE@ $* getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && ${MPICH_DIR}/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./start-exe -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [numrel08] # Machine description nickname = numrel08 name = numrel08 location = LSU, CCT description = One of the numrel workstations at the CCT # webpage status = personal # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = numrel08.cct.lsu.edu # iomachine # trampoline rsynccmd = /usr/local/packages/numrel/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^numrel08(\.cct\.lsu\.edu)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = numrel-intel.cfg thornlist = wavetoy-generic.th submitscript = numrel-intel.sh make = make -j4 # Simulation management basedir = /home/@USER@/simulations quota = 50 cpu = Dual Core AMD Opteron Processor 280 cpufreq = 2.4 flop/cycle = 2 ppn = 4 spn = 2 # mpn max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 1 min-ppn = 1 # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! run = /usr/local/packages/numrel/mpich-1.2.7p1/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./RunCmd run2 = exec /bin/env OMP_NUM_THREADS=@NUM_THREADS@ nice -19 @RUNDIR@/@EXECUTABLE@ $* getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && ${MPICH_DIR}/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./start-exe -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [numrel09] # Machine description nickname = numrel09 name = numrel09 location = LSU, CCT description = One of the numrel workstations at the CCT # webpage status = personal # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = numrel09.cct.lsu.edu # iomachine # trampoline rsynccmd = /usr/local/packages/numrel/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^numrel09(\.cct\.lsu\.edu)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = numrel-intel.cfg thornlist = wavetoy-generic.th submitscript = numrel-intel.sh make = make -j4 # Simulation management basedir = /home/@USER@/simulations quota = 50 cpu = Dual Core AMD Opteron Processor 280 cpufreq = 2.4 flop/cycle = 2 ppn = 4 spn = 2 # mpn max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 1 min-ppn = 1 # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! run = /usr/local/packages/numrel/mpich-1.2.7p1/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./RunCmd run2 = exec /bin/env OMP_NUM_THREADS=@NUM_THREADS@ nice -19 @RUNDIR@/@EXECUTABLE@ $* getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && ${MPICH_DIR}/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./start-exe -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [numrel10] # Machine description nickname = numrel10 name = numrel10 location = LSU, CCT description = One of the numrel workstations at the CCT # webpage status = personal # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = numrel10.cct.lsu.edu # iomachine # trampoline rsynccmd = /usr/local/packages/numrel/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^numrel10(\.cct\.lsu\.edu)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = numrel-intel.cfg thornlist = wavetoy-generic.th submitscript = numrel-intel.sh make = make -j4 # Simulation management basedir = /home/@USER@/simulations quota = 50 cpu = Dual Core AMD Opteron Processor 280 cpufreq = 2.4 flop/cycle = 2 ppn = 4 spn = 2 # mpn max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 1 min-ppn = 1 # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! run = /usr/local/packages/numrel/mpich-1.2.7p1/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./RunCmd run2 = exec /bin/env OMP_NUM_THREADS=@NUM_THREADS@ nice -19 @RUNDIR@/@EXECUTABLE@ $* getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && ${MPICH_DIR}/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./start-exe -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [oliver] # Machine description nickname = oliver name = Oliver location = ULL description = The LONI Linux cluster at ULL webpage = http://www.loni.org/systems/system.php?system=Oliver status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = oliver.loni.org # iomachine trampoline = is rsynccmd = /home/eschnett/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^oliver2(\.loni\.org)?$ # Source tree management sourcebasedir = /work/@USER@ optionlist = eric-mvapich2-new.cfg thornlist = wavetoy-generic.th submitscript = eric-mvapich2-new.sh make = make -j4 # Simulation management basedir = /work/@USER@/simulations # quota cpu = Dual Core Xeon 64-bit Processors cpufreq = 2.33 flop/cycle = 4 ppn = 4 spn = 2 mpn = 1 max-num-threads = 4 num-threads = 4 memory = 4096 nodes = 48 min-ppn = 4 allocation = loni_numrel04 queue = checkpt maxwalltime = 72:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+)[.]oliver2 statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /var/scratch/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.oliv.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && time ${MPICHDIR}/bin/mpirun_rsh -np @NUM_PROCS@ -hostfile ${MPI_NODEFILE} /bin/env MV2_ENABLE_AFFINITY=0 OMP_NUM_THREADS=@NUM_THREADS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.oliv.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/torque/spool/@JOB_ID@.olive.OU /var/spool/torque/spool/@JOB_ID@.oliv.ER [painter] # Machine description nickname = painter name = Painter location = LA Tech description = The LONI Linux cluster at LA Tech webpage = http://www.loni.org/systems/system.php?system=Painter status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = painter.loni.org # iomachine trampoline = is rsynccmd = /home/eschnett/rsync-3.0.2/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^painter2(\.loni\.org)?$ # Source tree management sourcebasedir = /work/@USER@ optionlist = eric-mvapich2.cfg thornlist = wavetoy-generic.th submitscript = eric-mvapich2.sh make = make -j4 # Simulation management basedir = /work/@USER@/simulations # quota cpu = Dual Core Xeon 64-bit Processors cpufreq = 2.33 flop/cycle = 4 ppn = 4 spn = 2 mpn = 1 max-num-threads = 4 num-threads = 4 memory = 4096 nodes = 48 min-ppn = 4 allocation = loni_numrel04 queue = checkpt maxwalltime = 72:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+)[.]painter2 statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /var/scratch/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.olive.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && time ${MPICHDIR}/bin/mpirun_rsh -hostfile ${PBS_NODEFILE} -np @NUM_PROCS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.olive.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/torque/spool/@JOB_ID@.olive.OU /var/spool/torque/spool/@JOB_ID@.olive.ER [pelican] # Machine description nickname = pelican name = Pelican location = LSU description = An IBM P5 at LSU webpage = http://www.hpc.lsu.edu/systems/system.php?system=Pelican status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = pelican.lsu.edu # iomachine trampoline = is rsynccmd = /work/default/eschnett/rsync-3.0.4/bin/rsync rsyncopts = -c sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^pelican(\.lsu\.edu)?|peg304(\.hpc\.lsu\.edu)?$ # Source tree management sourcebasedir = /work/default/@USER@ optionlist = pelican.cfg thornlist = wavetoy-generic.th submitscript = pelican.sh make = mkdir -p /work/default/@USER@/tmp && env TMPDIR=/work/default/@USER@/tmp gmake -j4 # Simulation management basedir = /work/default/@USER@/simulations # quota cpu = Power5+ cpufreq = 1.9 flop/cycle = 4 ppn = 16 # spn # mpn max-num-threads = 16 num-threads = 8 memory = 32768 nodes = 16 min-ppn = 16 allocation = NoAllocation queue = MP5L maxwalltime = 168:00:00 # maxqueueslots submit = llsubmit @SCRIPTFILE@ # run # run2 getstatus = llq @JOB_ID@ stop = llcancel @JOB_ID@ submitpattern = "\"(.*)\"" statuspattern = ^ *@JOB_ID@ queuedpattern = " I " runningpattern = " R " scratchdir = scratchdir exechost = llq -f '%h' @JOB_ID@b | tail +3 | head -1 exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && /usr/bin/poe ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = /opt/freeware/bin/tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [peyote] # Machine description nickname = peyote name = Peyote location = AEI description = The old AEI numrel cluster webpage = http://supercomputers.aei.mpg.de/peyote status = outdated # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = peyote.aei.mpg.de # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^(peyote|peyoteb|peyotec)(\.aei\.mpg\.de)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = peyote-mpich.cfg thornlist = wavetoy-generic.th submitscript = peyote-mpich.sh make = ssh peyoteb "cd $(pwd) && make -j4" # Simulation management basedir = /data2/@USER@/simulations # quota # cpu # cpufreq # flop/cycle ppn = 2 # spn # mpn max-num-threads = 2 num-threads = 2 memory = 4096 nodes = 128 # min-ppn # allocation queue = old maxwalltime = 2400:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /scratch/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh $(echo @EXECHOST@ | sed -e "s/ic/kali/") cat /var/spool/pbs/spool/@JOB_ID@.peyot.OU mpirun = MPICHDIR=/opt/mpich/1.2.7/intel7.1-c034-f037 && time ${MPICHDIR}/bin/mpirun -np @PROCS@ -machinefile ${PBS_NODEFILE} ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = ssh $(echo @EXECHOST@ | sed -e "s/ic/kali/") cat /var/spool/pbs/spool/@JOB_ID@.peyot.ER precmd = : postcmd = : stdout-follow = ssh $(echo @EXECHOST@ | sed -e "s/ic/kali/") tail -n 100 -f /var/spool/pbs/spool/@JOB_ID@.peyot.OU /var/spool/pbs/spool/@JOB_ID@.peyot.ER [philip] # Machine description nickname = philip name = Philip location = LSU description = 37 mixed compute node cluster meant for single node use webpage = http://www.hpc.lsu.edu/help/philipguide.php status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = philip1.hpc.lsu.edu # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^(philip1)(\.hpc\.lsu\.edu)?$ # Source tree management sourcebasedir = /project/numrel/@USER@/philip optionlist = philip-mpich.cfg thornlist = wavetoy-generic.th submitscript = philip-mpich.sh make = make -j8 # Simulation management basedir = /work/@USER@/philip/simulations # quota cpu = Intel(R) Xeon(R) CPU X5570 cpufreq = 2.93 flop/cycle = 2 ppn = 8 spn = 1 mpn = 1 max-num-threads = 16 num-threads = 8 memory = 24576 nodes = 36 min-ppn = 8 allocation = NoAllocation queue = workq maxwalltime = 72:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+)[.]philip statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /var/scratch/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.philip1.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && time ${MPICHDIR}/bin/mpirun -np @NUM_PROCS@ -machinefile $PBS_NODEFILE ./start-exe -L 3 @PARFILE@ stderr = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.philip1.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/torque/spool/@JOB_ID@.philip1.OU /var/spool/torque/spool/@JOB_ID@.philip1.ER [poseidon] # Machine description nickname = poseidon name = Poseidon location = LONI, Tulane description = The LONI Linux cluster at UNO webpage = http://www.loni.org/systems/system.php?system=Poseidon status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = poseidon.loni.org # iomachine trampoline = is rsynccmd = /home/eschnett/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^poseidon2(\.loni\.org)?$ # Source tree management sourcebasedir = /work/@USER@ optionlist = eric-mvapich2-new.cfg thornlist = wavetoy-generic.th submitscript = eric-mvapich2-new.sh make = make -j4 # Simulation management basedir = /work/@USER@/simulations # quota cpu = Dual Core Xeon 64-bit Processors cpufreq = 2.33 flop/cycle = 4 ppn = 4 spn = 2 mpn = 1 max-num-threads = 4 num-threads = 4 memory = 4096 nodes = 64 min-ppn = 4 allocation = loni_numrel04 queue = checkpt maxwalltime = 72:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+)[.]poseidon2 statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /var/scratch/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.posei.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && time ${MPICHDIR}/bin/mpirun_rsh -np @NUM_PROCS@ -hostfile ${MPI_NODEFILE} /bin/env MV2_ENABLE_AFFINITY=0 OMP_NUM_THREADS=@NUM_THREADS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.posei.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/torque/spool/@JOB_ID@.posei.OU /var/spool/torque/spool/@JOB_ID@.posei.ER [prism] # Machine description nickname = prism name = Prism location = LITE description = One of the frontends of the large SGI Altix of LITE webpage = http://www.lite3d.com/ status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = 69.1.166.227 # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^prism[123](\.LITE|\.louisiana\.edu)?$ # Source tree management sourcebasedir = /store/home/@USER@ optionlist = prism.cfg thornlist = wavetoy-generic.th submitscript = prism.sh make = make -j8 # Simulation management basedir = /store/home/@USER@/simulations # quota # cpu # cpufreq # flop/cycle ppn = 16 # spn # mpn max-num-threads = 16 num-threads = 16 memory = 98304 nodes = 1 min-ppn = 16 # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! # run # run2 getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && /opt/mpich/ch-p4/bin/mpirun -np @PROCS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [q] # Machine description nickname = q name = Q location = CCT description = Ed's workstation # webpage status = personal # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = q.cct.lsu.edu # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^q\.cct\.lsu\.edu?$ # Source tree management sourcebasedir = /Users/@USER@ optionlist = q.cfg thornlist = wavetoy-generic.th submitscript = q.sh make = make # Simulation management basedir = /Users/@USER@/simulations quota = 200 # cpu # cpufreq # flop/cycle ppn = 4 # spn # mpn max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 1 # min-ppn # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! # run # run2 getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = mpirun -machinefile localhost -np @NUM_PROCS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [queenbee] # Machine description nickname = queenbee name = Queen Bee location = LONI, LSU description = The large LONI Linux cluster webpage = http://www.loni.org/systems/system.php?system=QueenBee status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = qb4.loni.org # iomachine # trampoline rsynccmd = /home/eschnett/rsync-3.0.6/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^qb[0-9](\.loni\.org)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = eric-mvapich2-new.cfg thornlist = wavetoy-generic.th submitscript = eric-mvapich2-new.sh make = make -j4 # Simulation management basedir = /scratch/@USER@/simulations # quota cpu = Quad Core Xeon 64-bit Processors cpufreq = 2.33 flop/cycle = 4 ppn = 8 spn = 2 mpn = 1 max-num-threads = 8 num-threads = 8 memory = 8192 nodes = 256 min-ppn = 8 allocation = loni_cactus05 queue = checkpt maxwalltime = 48:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+)[.]qb2 statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /var/scratch/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.qb2.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && time ${MPICHDIR}/bin/mpirun_rsh -np @NUM_PROCS@ -hostfile ${MPI_NODEFILE} /bin/env MV2_ENABLE_AFFINITY=0 OMP_NUM_THREADS=@NUM_THREADS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.qb2.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/torque/spool/@JOB_ID@.qb2.OU /var/spool/torque/spool/@JOB_ID@.qb2.ER [ranger] # Machine description nickname = ranger name = Ranger location = TACC description = A very large Linux cluster at TACC webpage = http://www.tacc.utexas.edu/services/userguides/ranger/ status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = login4.ranger.tacc.utexas.edu # iomachine # trampoline rsynccmd = /share/home/00507/eschnett/rsync-3.0.7/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = source /etc/profile && source /usr/local/etc/profile aliaspattern = ^login[1234](\.ranger\.tacc\.utexas\.edu)?$ # Source tree management sourcebasedir = /work/00507/@USER@ optionlist = ranger-mvapich2.cfg thornlist = wavetoy-generic.th submitscript = ranger-mvapich2.sh make = make -j8 # Simulation management basedir = /scratch/00507/@USER@/simulations # quota cpu = AMD Opteron Quad-Core 64-bit processors cpufreq = 2.3 flop/cycle = 4 ppn = 16 spn = 4 mpn = 4 max-num-threads = 16 num-threads = 4 memory = 32768 nodes = 768 min-ppn = 16 allocation = loni_cactus05 queue = long maxwalltime = 48:00:00 maxqueueslots = 49 submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat -u @USER@ | awk '$1 == @JOB_ID@ {print $1,$5}' stop = qdel @JOB_ID@ submitpattern = Your job ([[:digit:]]+) \(.*?\) has been submitted statuspattern = ^@JOB_ID@[ \t] queuedpattern = ^@JOB_ID@[ \t]+(qw|hqw) runningpattern = ^@JOB_ID@[ \t]+r scratchdir = scratchdir exechost = false exechostpattern = $^ stdout = bpeek @JOB_ID@ mpirun = export CACTUS_STARTTIME=$(date +%s) && #time mpiexec -machinefile NODES -np @NUM_PROCS@ tacc_affinity ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = true precmd = : postcmd = : stdout-follow = bpeek -f @JOB_ID@ [redshift] # Machine description nickname = redshift name = Something location = Somewhere description = A generic machine # webpage status = personal # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = redshift.local # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^redshift(\.local)?$ # Source tree management sourcebasedir = /Users/@USER@ optionlist = generic.cfg thornlist = wavetoy-generic.th submitscript = generic.sh make = make # Simulation management basedir = /Users/@USER@/simulations quota = 10 cpu = Whatever cpufreq = 0 flop/cycle = 1 ppn = 1 spn = 1 mpn = 1 max-num-threads = 1 num-threads = 1 memory = 1024 nodes = 1 # min-ppn # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! # run # run2 getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && # mpirun -machinefile localhost -np @NUM_PROCS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [santaka] # Machine description nickname = santaka name = Santaka location = LSU description = An SGI Altix at LSU webpage = http://www.hpc.lsu.edu/systems/system.php?system=Santaka status = outdated # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = santaka.cct.lsu.edu # iomachine trampoline = is rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^santaka[12](\.cct\.lsu\.edu)?$ # Source tree management sourcebasedir = /home/@USER@ optionlist = santaka.cfg thornlist = wavetoy-generic.th submitscript = santaka.sh make = make -j2 # Simulation management basedir = /home/@USER@/simulations # quota cpu = Intel Itanium 2 cpufreq = 1.5 flop/cycle = 4 ppn = 30 # spn # mpn max-num-threads = 30 num-threads = 8 memory = 131072 nodes = 1 min-ppn = 1 # allocation queue = workq maxwalltime = 48:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /tmp/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.santak.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && time mpirun -np @NUM_PROCS@ /usr/bin/env OMP_NUM_THREADS=${OMP_NUM_THREADS} ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.santak.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/torque/spool/@JOB_ID@.santak.OU /var/spool/torque/spool/@JOB_ID@.santak.ER [sicortex] # Machine description nickname = sicortex name = SiCortex location = SiCortex, Houston description = SiCortex machine webpage = http://sicortex.com/ status = outdated # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = hou-snow iomachine = sicortex1 trampoline = sicortex1 rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^sci-m8n6\.scsystem$ # Source tree management sourcebasedir = /home/@USER@ optionlist = sicortex.cfg thornlist = wavetoy-generic.th submitscript = sicortex.sh make = make -j2 # Simulation management basedir = /home/@USER@/simulations # quota cpu = SiCortex ICE9B V1.0 FPU V0.1 cpufreq = 0.7 # flop/cycle ppn = 6 # spn # mpn max-num-threads = 6 num-threads = 6 memory = 4096 nodes = 235 min-ppn = 6 # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! # run # run2 getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = /home/@USER@/scratch exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && srun -p hou-comp -N $nodes -c $cores @RUNDIR@/@EXECUTABLE@ -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [sicortex1] # Machine description nickname = sicortex1 name = SiCortex1 location = SiCortex firewall, Houston description = First firewall for SiCortex machine webpage = http://sicortex.com/ status = trampoline # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = ssh.houston.sicortex.com # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^hou-IT$ # Source tree management sourcebasedir = /home/@USER@/eschnett optionlist = no-such-optionlist.cfg thornlist = wavetoy-generic.th submitscript = no-such-submitscript.sh make = make # Simulation management basedir = /home/@USER@/eschnett/simulations # quota # cpu # cpufreq # flop/cycle ppn = 0 # spn # mpn # max-num-threads # num-threads # memory nodes = 0 # min-ppn # allocation # queue # maxwalltime # maxqueueslots submit = no-such-command # run # run2 getstatus = no-such-pattern stop = no-such-command submitpattern = no-such-pattern statuspattern = no-such-pattern queuedpattern = no-such-pattern runningpattern = no-such-pattern scratchdir = no-such-directory # exechost # exechostpattern # stdout mpirun = # stderr precmd = : postcmd = : # stdout-follow [sicortex2] # Machine description nickname = sicortex2 name = SiCortex2 location = SSiCortex firewall, Houston description = Second firewall for SiCortex machine webpage = http://sicortex.com/ status = trampoline # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = hou-ssp iomachine = sicortex1 trampoline = sicortex1 rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^hou-sc\d\d\d\d-ssp\.houston\.sicortex\.com$ # Source tree management sourcebasedir = /home/@USER@/eschnett optionlist = no-such-optionlist.cfg thornlist = wavetoy-generic.th submitscript = no-such-submitscript.sh make = make # Simulation management basedir = /home/@USER@/eschnett/simulations # quota # cpu # cpufreq # flop/cycle ppn = 0 # spn # mpn # max-num-threads # num-threads # memory nodes = 0 # min-ppn # allocation # queue # maxwalltime # maxqueueslots submit = no-such-command # run # run2 getstatus = no-such-pattern stop = no-such-command submitpattern = no-such-pattern statuspattern = no-such-pattern queuedpattern = no-such-pattern runningpattern = no-such-pattern scratchdir = no-such-directory # exechost # exechostpattern # stdout mpirun = # stderr precmd = : postcmd = : # stdout-follow [spider] # Machine description nickname = spider name = Spider location = LONI, LSU description = A visualisation workstation at LSU # webpage status = storage # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = spider.loni.org # iomachine trampoline = is rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^spider\.loni\.org$ # Source tree management sourcebasedir = /home/@USER@ optionlist = no-such-optionlise.cfg thornlist = wavetoy-generic.th submitscript = no-such-submitscript.sh make = make # Simulation management basedir = /scratch/@USER@/simulations # quota cpu = Intel(R) Xeon(R) CPU E5335 @ 2.00GHz cpufreq = 2 # flop/cycle ppn = 8 # spn # mpn # max-num-threads # num-threads memory = 16384 nodes = 1 # min-ppn # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! # run # run2 getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [steele] # Machine description nickname = steele name = Steele location = Purdue description = A Linux cluster at Purdue webpage = http://www.rcac.purdue.edu/userinfo/resources/steele/ status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = tg-steele.rcac.purdue.edu # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^tg-steele\.rcac\.purdue\.edu$ # Source tree management sourcebasedir = /autohome/u114/@USER@ optionlist = steele.cfg thornlist = wavetoy-generic.th submitscript = steele.sh make = make -j4 # Simulation management basedir = /scratch/scratch96/e/${USER}/simulations # quota cpu = Dual 2.33 GHz Quad-Core Intel E5410 cpufreq = 2.33 # flop/cycle ppn = 8 spn = 2 mpn = 1 max-num-threads = 8 num-threads = 8 memory = 16384 nodes = 224 min-ppn = 8 allocation = TG-MCA02N014 queue = tg_workq maxwalltime = 720:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+)[.]tezpur2 statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /scratch/scratch96/e/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/pbs/spool/@JOB_ID@.steele.OU mpirun = stderr = ssh @EXECHOST@ cat /var/spool/pbs/spool/@JOB_ID@.steele.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/pbs/spool/@JOB_ID@.steele.OU /var/spool/pbs/spool/@JOB_ID@.steele.ER [surveyor] # Machine description nickname = surveyor name = Surveyor location = ALCF description = Blue Gene/P webpage = http://www.alcf.anl.gov/resources/storage.php status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = surveyor.alcf.anl.gov # iomachine # trampoline rsynccmd = /home/eschnett/rsync-3.0.4/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^login[0-9]\.surveyor # Source tree management sourcebasedir = /home/@USER@ optionlist = intrepid-xlc.cfg thornlist = wavetoy-generic.th submitscript = intrepid-xlc.sh make = make -j2 # Simulation management basedir = /pvfs-surveyor/@USER@/simulations # quota cpu = PowerPC 450 cpufreq = 0.85 flop/cycle = 4 ppn = 4 # spn # mpn max-num-threads = 4 num-threads = 4 memory = 2048 nodes = 1024 min-ppn = 4 allocation = petascaling queue = default maxwalltime = 1:00:00 maxqueueslots = 20 submit = chmod a+x @SCRIPTFILE@ && qsub -A @ALLOCATION@ -q @QUEUE@ -t @WALLTIME@ --mode @(@NUM_THREADS@==4 ? "smp" : @NUM_THREADS@==2 ? "dual" : "vn")@ -n @NODES@ -M @USER@@alcf.anl.gov -O @SIMULATION_NAME@ -o @RUNDIR@/@SIMULATION_NAME@.out -e @RUNDIR@/@SIMULATION_NAME@.err --cwd @RUNDIR@-active --env=OMP_NUM_THREADS=@NUM_THREADS@ --env=BG_MAPPING=TXYZ @RUNDIR@-active/@EXECUTABLE@ -L 3 @PARFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+) statuspattern = ^@JOB_ID@[. ] queuedpattern = " queued " runningpattern = " running " scratchdir = scratchdir # exechost # exechostpattern # stdout mpirun = cobalt-mpirun -verbose 2 -np @NUM_PROCS@ -mode "$mode" -env OMP_NUM_THREADS=${OMP_NUM_THREADS}:CACTUS_STARTTIME=${CACTUS_STARTTIME}:BG_MAPPING=TXYZ ./@EXECUTABLE@ -L 3 @PARFILE@ && cobalt-mpirun -verbose 2 -np @NUM_PROCS@ -mode "$mode" -env OMP_NUM_THREADS=${OMP_NUM_THREADS}:CACTUS_STARTTIME=${CACTUS_STARTTIME}:BG_MAPPING=TXYZ ./@EXECUTABLE@ -L 3 @PARFILE@ # stderr precmd = : postcmd = : # stdout-follow [tezpur] # Machine description nickname = tezpur name = Tezpur location = LSU description = The LSU HPC Linux cluster webpage = http://www.hpc.lsu.edu/systems/system.php?system=Tezpur status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = tezpur1.hpc.lsu.edu # iomachine trampoline = is rsynccmd = /home/eschnett/rsync-3.0.2/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^tezpur1(\.hpc\.lsu\.edu)?$ # Source tree management sourcebasedir = /project/numrel/@USER@/tezpur optionlist = eric-mvapich2.cfg thornlist = wavetoy-generic.th submitscript = eric-mvapich2.sh make = make -j4 # Simulation management basedir = /work/@USER@/tezpur/simulations # quota cpu = Dual Core Xeon 64-bit Processors cpufreq = 2.33 flop/cycle = 4 ppn = 4 spn = 2 mpn = 1 max-num-threads = 30 num-threads = 4 memory = 4096 nodes = 180 min-ppn = 4 allocation = NoAllocation queue = checkpt maxwalltime = 72:00:00 # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = ([[:digit:]]+)[.]tezpur2 statuspattern = ^@JOB_ID@[. ] queuedpattern = " Q " runningpattern = " R " scratchdir = /var/scratch/${USER}/${PBS_JOBID} exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.tezpu.OU mpirun = export CACTUS_STARTTIME=$(date +%s) && time ${MPICHDIR}/bin/mpirun_rsh -hostfile ${PBS_NODEFILE} -np @NUM_PROCS@ ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = ssh @EXECHOST@ cat /var/spool/torque/spool/@JOB_ID@.tezpu.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/torque/spool/@JOB_ID@.tezpu.OU /var/spool/torque/spool/@JOB_ID@.tezpu.ER [tgsdsc] # Machine description nickname = tgsdsc name = TGSDSC location = SDSC description = The TeraGrid cluster at SDSC webpage = http://www.sdsc.edu/us/resources/ia64/ status = experimental # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = tg-login.sdsc.teragrid.org # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^tg-login(\.sdsc\.teragrid\.org)?$ # Source tree management sourcebasedir = /users/@USER@ optionlist = tgsdsc.cfg thornlist = wavetoy-generic.th submitscript = tgsdsc.sh make = gmake -j2 # Simulation management basedir = /users/@USER@/simulations # quota cpu = Intel Itanium 2 cpufreq = 1.5 flop/cycle = 4 ppn = 2 # spn # mpn max-num-threads = 2 num-threads = 2 memory = 4096 nodes = 262 # min-ppn # allocation queue = XXX # maxwalltime # maxqueueslots submit = qsub @SCRIPTFILE@ # run # run2 getstatus = qstat @JOB_ID@ stop = qdel @JOB_ID@ submitpattern = "\"(.*)\"" statuspattern = ^ *@JOB_ID@ queuedpattern = " Q " runningpattern = " R " scratchdir = scratchdir exechost = qstat -f @JOB_ID@ exechostpattern = exec_host = (\w+)/ stdout = ssh @EXECHOST@ cat /var/spool/PBS/spool/@JOB_ID@.XXX.OU mpirun = stderr = ssh @EXECHOST@ cat /var/spool/PBS/spool/@JOB_ID@.XXX.ER precmd = : postcmd = : stdout-follow = ssh @EXECHOST@ tail -n 100 -f /var/spool/PBS/spool/@JOB_ID@.XXX.OU /var/spool/PBS/spool/@JOB_ID@.XXX.ER [tungsten] # Machine description nickname = tungsten name = Tungsten location = NCSA description = A large Linux cluster at NCSA webpage = http://www.ncsa.uiuc.edu/UserInfo/Resources/Hardware/XeonCluster/ status = outdated # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = tungsten.ncsa.uiuc.edu # iomachine # trampoline rsynccmd = rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^tun[abcde](\.ncsa\.uiuc\.edu)?$ # Source tree management sourcebasedir = /u/ac/@USER@ optionlist = tungsten.cfg thornlist = wavetoy-generic.th submitscript = tungsten.sh make = make -j2 # Simulation management basedir = /u/ac/@USER@/scratch-global/simulations # quota # cpu # cpufreq # flop/cycle ppn = 2 # spn # mpn max-num-threads = 2 num-threads = 2 memory = 3072 nodes = 512 min-ppn = 2 allocation = out queue = normal maxwalltime = 48:00:00 # maxqueueslots submit = bsub < @SCRIPTFILE@ # run # run2 getstatus = bjobs @JOB_ID@ stop = bkill @JOB_ID@ submitpattern = <([[:digit:]]*)> statuspattern = ^ *@JOB_ID@ (?!.* DONE ) queuedpattern = " PEND " runningpattern = " RUN " scratchdir = /cfs/scratch/batch/${LSB_JOBID} exechost = false exechostpattern = $^ stdout = bpeek @JOB_ID@ mpirun = export CACTUS_STARTTIME=$(date +%s) && cmpirun -np @PROCS@ -lsf ${scale} ./@EXECUTABLE@ -L 3 @PARFILE@ stderr = true precmd = : postcmd = : stdout-follow = bpeek -f @JOB_ID@ [vip] # Machine description nickname = vip name = VIP location = RZG description = An IBM Power6 at the RZG webpage = https://www.rzg.mpg.de/computing/hardware/Power6/ status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = vip.rzg.mpg.de # iomachine # trampoline rsynccmd = /u/eschnett/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = export PATH=${PATH}:/afs/ipp/rs_aix53/bin:/afs/ipp/rs_aix53/soft/gnu/bin:/afs/ipp/rs_aix53/soft/X11/bin aliaspattern = ^vip(\d\d\d)?(i)?(\.rzg\.mpg\.de)$ # Source tree management sourcebasedir = /u/@USER@ optionlist = vip.cfg thornlist = wavetoy-generic.th submitscript = vip.sh make = gmake -j16 # Simulation management basedir = /ptmp/@USER@/simulations # quota cpu = Power6 cpufreq = 4.7 flop/cycle = 2 ppn = 64 # spn # mpn max-num-threads = 64 num-threads = 16 memory = 65536 nodes = 205 min-ppn = 64 # allocation # queue maxwalltime = 24:00:00 # maxqueueslots submit = llsubmit @SCRIPTFILE@ # run # run2 getstatus = llq p6io1.@JOB_ID@ stop = llcancel p6io1.@JOB_ID@ submitpattern = "\".*[.](\d+)\"" statuspattern = ^p6io1[.]@JOB_ID@ queuedpattern = " I " runningpattern = " R " scratchdir = /ptmp/@USER@/@JOB_ID@ # exechost # exechostpattern # stdout mpirun = export CACTUS_STARTTIME=$(date +%s) && /usr/bin/poe ./@EXECUTABLE@ -L 3 @PARFILE@ # stderr precmd = : postcmd = : # stdout-follow [wilson] # Machine description nickname = wilson name = Wilson location = Caltech description = Christian D. Ott's workstation webpage = http://www.physicstoday.org/obits/notice_180.shtml status = personal # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = wilson.tapir.caltech.edu # iomachine # trampoline rsynccmd = /home/schnetter/bin/rsync rsyncopts = sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^wilson\.tapir\.caltech\.edu # Source tree management sourcebasedir = /home/@USER@ optionlist = wilson.cfg thornlist = wavetoy-generic.th submitscript = wilson.sh make = make -j4 # Simulation management basedir = /data/@USER@/simulations # quota # cpu # cpufreq # flop/cycle ppn = 4 # spn # mpn max-num-threads = 4 num-threads = 4 memory = 8192 nodes = 1 # min-ppn # allocation # queue # maxwalltime # maxqueueslots submit = sh @SCRIPTFILE@ < /dev/null > /dev/null 2> /dev/null & echo $! # run # run2 getstatus = ps @JOB_ID@ stop = kill @JOB_ID@ submitpattern = (.*) statuspattern = "^ *@JOB_ID@ " queuedpattern = $^ runningpattern = ^ scratchdir = scratchdir exechost = echo localhost exechostpattern = (.*) stdout = cat @SIMULATION_NAME@.out mpirun = export CACTUS_STARTTIME=$(date +%s) && ${MPICH_DIR}/bin/mpirun -np @NUM_PROCS@ -machinefile localhost ./start-exe -L 3 @PARFILE@ stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err [zeke] # Machine description nickname = zeke name = Zeke location = LONI, UL Lafayette description = The LONI IBM P5 machine at UL Lafayette webpage = http://www.loni.org/systems/system.php?system=Zeke status = production # Access to this machine user = mwt email = mthomas@cct.lsu.edu hostname = zeke.loni.org # iomachine trampoline = is rsynccmd = /work/default/eschnett/rsync-3.0.4/bin/rsync rsyncopts = -c sshcmd = ssh sshopts = localsshsetup = : environcmd = : sshsetup = : aliaspattern = ^(zeke(\.loni\.org)?|l3f1n\d\d(\.sys\.loni\.org)?)$ # Source tree management sourcebasedir = /work/default/@USER@ optionlist = ducky.cfg thornlist = wavetoy-generic.th submitscript = ducky.sh make = mkdir -p /work/default/@USER@/tmp && env TMPDIR=/work/default/@USER@/tmp gmake -j4 # Simulation management basedir = /mnt/lpfs.nfs302/@USER@/simulations quota = 20 cpu = Power5+ cpufreq = 1.9 flop/cycle = 4 ppn = 8 # spn # mpn max-num-threads = 8 num-threads = 8 memory = 16384 nodes = 13 min-ppn = 8 allocation = loni_numrel04 queue = checkpt maxwalltime = 120:00:00 # maxqueueslots submit = llsubmit @SCRIPTFILE@ # run # run2 getstatus = llq @JOB_ID@ stop = llcancel @JOB_ID@ submitpattern = "\"(.*)\"" statuspattern = ^ *@JOB_ID@ queuedpattern = " I " runningpattern = " R " scratchdir = scratchdir exechost = llq -f '%h' @JOB_ID@b | tail +3 | head -1 exechostpattern = (.*) mpirun = export CACTUS_STARTTIME=$(date +%s) && /usr/bin/poe ./@EXECUTABLE@ -L 3 @PARFILE@ stdout = cat @SIMULATION_NAME@.out stderr = cat @SIMULATION_NAME@.err precmd = : postcmd = : stdout-follow = /opt/freeware/bin/tail -n 100 -f @SIMULATION_NAME@.out @SIMULATION_NAME@.err