diff --git a/.travis.yml b/.travis.yml
index 4e8e1bf9..305f4e2d 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,25 +2,23 @@
language: python
sudo: false
python:
+ - 3.7-dev
- 3.6
- 3.5
- 3.4
env:
- JHUB_VER=0.7.2
- JHUB_VER=0.8.1
- - JHUB_VER=0.9.0b2
+ - JHUB_VER=0.9.1
matrix:
include:
- python: 3.7-dev
- env: JHUB_VER=0.9.0b2
- - python: 3.6
env: JHUB_VER=master
exclude:
- python: 3.4
- env: JHUB_VER=0.9.0b2
+ env: JHUB_VER=0.9.1
allow_failures:
- env: JHUB_VER=master
- - python: 3.7-dev
before_install:
- npm install -g configurable-http-proxy
@@ -28,7 +26,8 @@ before_install:
- git clone --quiet --branch $JHUB_VER https://github.com/jupyter/jupyterhub.git jupyterhub
install:
# Don't let requirements pull in tornado 5 yet except for jupyterhub master
- - if [ $JHUB_VER != "master" -a $JHUB_VER != "0.9.0b2" ]; then pip install "tornado<5.0"; fi
+ - if [ $JHUB_VER != "master" -a $JHUB_VER != "0.9.1" ]; then pip install "tornado<5.0"; fi
+ - pip install jsonschema!=3.0.0a1 # issue #110, remove later
- pip install --pre -f travis-wheels/wheelhouse -r jupyterhub/dev-requirements.txt
- pip install --pre -e jupyterhub
diff --git a/README.md b/README.md
index 4205400a..3feb88f4 100644
--- a/README.md
+++ b/README.md
@@ -24,6 +24,8 @@ This package formerly included WrapSpawner and ProfilesSpawner, which provide me
## Batch Spawners
+For information on the specific spawners, see [SPAWNERS.md](SPAWNERS.md).
+
### Overview
This file contains an abstraction layer for batch job queueing systems (`BatchSpawnerBase`), and implements
@@ -82,6 +84,19 @@ to run Jupyter notebooks on an academic supercomputer cluster.
c.TorqueSpawner.state_exechost_exp = r'int-\1.mesabi.xyz.edu'
```
+### Security
+
+Unless otherwise stated for a specific spawner, assume that spawners
+*do* evaluate shell environment for users and thus the [security
+requiremnts of JupyterHub security for untrusted
+users](https://jupyterhub.readthedocs.io/en/stable/reference/websecurity.html)
+are not fulfilled because some (most?) spawners *do* start a user
+shell which will execute arbitrary user environment configuration
+(``.profile``, ``.bashrc`` and the like) unless users do not have
+access to their own cluster user account. This is something which we
+are working on.
+
+
## Provide different configurations of BatchSpawner
### Overview
@@ -141,11 +156,33 @@ clusters, as well as an option to run a local notebook directly on the jupyterhu
### dev (requires minimum JupyterHub 0.7.2 and Python 3.4)
+Added (user)
+
* Add Jinja2 templating as an option for all scripts and commands. If '{{' or `{%` is used anywhere in the string, it is used as a jinja2 template.
-* Update Slurm batch script. Now, the single-user notebook is run in a job step, with a wrapper of `srun`. This may need to be removed if you don't want environment variables limited.
* Add new option exec_prefix, which defaults to `sudo -E -u {username}`. This replaces explicit `sudo` in every batch command - changes in local commands may be needed.
+* New option: `req_keepvars_extra`, which allows keeping extra variables in addition to what is defined by JupyterHub itself (addition of variables to keep instead of replacement). #99
+* Add `req_prologue` and `req_epilogue` options to scripts which are inserted before/after the main jupyterhub-singleuser command, which allow for generic setup/cleanup without overriding the entire script. #96
+* SlurmSpawner: add the `req_reservation` option. #
+
+Added (developer)
+
* Add many more tests.
+* Add a new page `SPAWNERS.md` which information on specific spawners. Begin trying to collect a list of spawner-specific contacts. #97
+
+Changed
+
* Update minimum requirements to JupyterHub 0.8.1 and Python 3.4.
+* Update Slurm batch script. Now, the single-user notebook is run in a job step, with a wrapper of `srun`. This may need to be removed using `req_srun=''` if you don't want environment variables limited.
+* Pass the environment dictionary to the queue and cancel commands as well. This is mostly user environment, but may be useful to these commands as well in some cases. #108, #111 If these envioronment variables were used for authentication as an admin, be aware that there are pre-existing security issues because they may be passed to the user via the batch submit command, see #82.
+
+Fixed
+
+* Improve debugging on failed submission by raising errors including error messages from the commands. #106
+* Many other non-user or developer visible changes. #107 #106 #100
+* In Travis CI, blacklist jsonschema=3.0.0a1 because it breaks tests
+
+Removed
+
### v0.8.1 (bugfix release)
diff --git a/SPAWNERS.md b/SPAWNERS.md
new file mode 100644
index 00000000..6548eb34
--- /dev/null
+++ b/SPAWNERS.md
@@ -0,0 +1,60 @@
+# Notes on specific spawners
+
+## `TorqueSpawner`
+
+Maintainers:
+
+
+## `MoabSpawner`
+
+Subclass of TorqueSpawner
+
+Maintainers:
+
+
+## `SlurmSpawner`
+
+Maintainers: @rkdarst
+
+This spawner enforces the environment if `srun` is used to wrap the
+spawner command, which is the default. If you *do* want user
+environment to be used, set `req_srun=''`. However, this is not
+perfect: there is still a bash shell begun as the user which could run
+arbitrary startup, define shell aliases for `srun`, etc.
+
+Use of `srun` is required to gracefully terminate.
+
+
+## `GridengineSpawner`
+
+Maintainers:
+
+
+## `CondorSpawner`
+
+Maintainers:
+
+
+## `LsfSpawner`
+
+Maintainers:
+
+
+# Checklist for making spawners
+
+Please document each of these things under the spawner list above, -
+even if it is "OK", we need to track status of all spawners. If it is
+a bug, users really need to know.
+
+- Does your spawner read shell environment before starting? (See
+ [Jupyterhub
+ Security](https://jupyterhub.readthedocs.io/en/stable/reference/websecurity.html).
+
+- Does your spawner send SIGTERM to the jupyterhub-singleuser process
+ before SIGKILL? It should, so that the process can terminate
+ gracefully. Add `echo "terminated gracefully"` to the end of the
+ batch script - if you see this in your singleuser server output, you
+ know that you DO receive SIGTERM and terminate gracefully. If your
+ batch system can not automatically send SIGTERM before SIGKILL, PR
+ #75 might help here, ask for it to be finished.
+
diff --git a/batchspawner/batchspawner.py b/batchspawner/batchspawner.py
index ac2a68f9..9f34854c 100644
--- a/batchspawner/batchspawner.py
+++ b/batchspawner/batchspawner.py
@@ -17,6 +17,7 @@
"""
import pwd
import os
+import re
import xml.etree.ElementTree as ET
@@ -46,7 +47,7 @@ def format_template(template, *args, **kwargs):
"""
if isinstance(template, Template):
return template.render(*args, **kwargs)
- elif '{{' in template or '{%' in template:
+ elif '{{' in template or '{%' in template:
return Template(template).render(*args, **kwargs)
return template.format(*args, **kwargs)
@@ -78,52 +79,52 @@ class BatchSpawnerBase(Spawner):
# override default server ip since batch jobs normally running remotely
ip = Unicode("0.0.0.0", help="Address for singleuser server to listen at").tag(config=True)
- exec_prefix = Unicode('sudo -E -u {username}', \
+ exec_prefix = Unicode('sudo -E -u {username}',
help="Standard executon prefix (e.g. the default sudo -E -u {username})"
).tag(config=True)
# all these req_foo traits will be available as substvars for templated strings
- req_queue = Unicode('', \
+ req_queue = Unicode('',
help="Queue name to submit job to resource manager"
).tag(config=True)
- req_host = Unicode('', \
+ req_host = Unicode('',
help="Host name of batch server to submit job to resource manager"
).tag(config=True)
- req_memory = Unicode('', \
+ req_memory = Unicode('',
help="Memory to request from resource manager"
).tag(config=True)
- req_nprocs = Unicode('', \
+ req_nprocs = Unicode('',
help="Number of processors to request from resource manager"
).tag(config=True)
- req_ngpus = Unicode('', \
+ req_ngpus = Unicode('',
help="Number of GPUs to request from resource manager"
).tag(config=True)
- req_runtime = Unicode('', \
+ req_runtime = Unicode('',
help="Length of time for submitted job to run"
).tag(config=True)
- req_partition = Unicode('', \
+ req_partition = Unicode('',
help="Partition name to submit job to resource manager"
).tag(config=True)
- req_account = Unicode('', \
+ req_account = Unicode('',
help="Account name string to pass to the resource manager"
).tag(config=True)
- req_options = Unicode('', \
+ req_options = Unicode('',
help="Other options to include into job submission script"
).tag(config=True)
- req_prologue = Unicode('', \
+ req_prologue = Unicode('',
help="Script to run before single user server starts."
).tag(config=True)
- req_epilogue = Unicode('', \
+ req_epilogue = Unicode('',
help="Script to run after single user server ends."
).tag(config=True)
@@ -143,7 +144,12 @@ def _req_homedir_default(self):
def _req_keepvars_default(self):
return ','.join(self.get_env().keys())
- batch_script = Unicode('', \
+ req_keepvars_extra = Unicode(
+ help="Extra environment variables which should be configured, "
+ "added to the defaults in keepvars, "
+ "comma separated list.")
+
+ batch_script = Unicode('',
help="Template for job submission script. Traits on this class named like req_xyz "
"will be substituted in the template for {xyz} using string.Formatter. "
"Must include {cmd} which will be replaced with the jupyterhub-singleuser command line."
@@ -164,9 +170,11 @@ def get_req_subvars(self):
subvars = {}
for t in reqlist:
subvars[t[4:]] = getattr(self, t)
+ if subvars.get('keepvars_extra'):
+ subvars['keepvars'] += ',' + subvars['keepvars_extra']
return subvars
- batch_submit_cmd = Unicode('', \
+ batch_submit_cmd = Unicode('',
help="Command to run to submit batch scripts. Formatted using req_xyz traits as {xyz}."
).tag(config=True)
@@ -189,8 +197,8 @@ def run_command(self, cmd, input=None, env=None):
# Apparently harmless
pass
proc.stdin.close()
- out = yield proc.stdout.read_until_close()
- eout = yield proc.stderr.read_until_close()
+ out, eout = yield [proc.stdout.read_until_close(),
+ proc.stderr.read_until_close()]
proc.stdout.close()
proc.stderr.close()
eout = eout.decode().strip()
@@ -198,8 +206,11 @@ def run_command(self, cmd, input=None, env=None):
err = yield proc.wait_for_exit()
except CalledProcessError:
self.log.error("Subprocess returned exitcode %s" % proc.returncode)
+ self.log.error('Stdout:')
+ self.log.error(out)
+ self.log.error('Stderr:')
self.log.error(eout)
- raise RuntimeError(eout)
+ raise RuntimeError('{} exit status {}: {}'.format(cmd, proc.returncode, eout))
if err != 0:
return err # exit error?
else:
@@ -215,8 +226,8 @@ def _get_batch_script(self, **subvars):
@gen.coroutine
def submit_batch_script(self):
subvars = self.get_req_subvars()
- cmd = self.exec_prefix + ' ' + self.batch_submit_cmd
- cmd = format_template(cmd, **subvars)
+ cmd = ' '.join((format_template(self.exec_prefix, **subvars),
+ format_template(self.batch_submit_cmd, **subvars)))
subvars['cmd'] = self.cmd_formatted_for_batch()
if hasattr(self, 'user_options'):
subvars.update(self.user_options)
@@ -233,7 +244,7 @@ def submit_batch_script(self):
return self.job_id
# Override if your batch system needs something more elaborate to read the job status
- batch_query_cmd = Unicode('', \
+ batch_query_cmd = Unicode('',
help="Command to run to read job status. Formatted using req_xyz traits as {xyz} "
"and self.job_id as {job_id}."
).tag(config=True)
@@ -246,11 +257,11 @@ def read_job_state(self):
return self.job_status
subvars = self.get_req_subvars()
subvars['job_id'] = self.job_id
- cmd = self.exec_prefix + ' ' + self.batch_query_cmd
- cmd = format_template(cmd, **subvars)
+ cmd = ' '.join((format_template(self.exec_prefix, **subvars),
+ format_template(self.batch_query_cmd, **subvars)))
self.log.debug('Spawner querying job: ' + cmd)
try:
- out = yield self.run_command(cmd)
+ out = yield self.run_command(cmd, env=self.get_env())
self.job_status = out
except Exception as e:
self.log.error('Error querying job ' + self.job_id)
@@ -266,10 +277,10 @@ def read_job_state(self):
def cancel_batch_job(self):
subvars = self.get_req_subvars()
subvars['job_id'] = self.job_id
- cmd = self.exec_prefix + ' ' + self.batch_cancel_cmd
- cmd = format_template(cmd, **subvars)
+ cmd = ' '.join((format_template(self.exec_prefix, **subvars),
+ format_template(self.batch_cancel_cmd, **subvars)))
self.log.info('Cancelling job ' + self.job_id + ': ' + cmd)
- yield self.run_command(cmd)
+ yield self.run_command(cmd, env=self.get_env())
def load_state(self, state):
"""load job_id from state"""
@@ -324,7 +335,7 @@ def poll(self):
self.clear_state()
return 1
- startup_poll_interval = Float(0.5, \
+ startup_poll_interval = Float(0.5,
help="Polling interval (seconds) to check job state during startup"
).tag(config=True)
@@ -334,8 +345,9 @@ def start(self):
if self.user and self.user.server and self.user.server.port:
self.port = self.user.server.port
self.db.commit()
- elif (jupyterhub.version_info < (0,7) and not self.user.server.port) or \
- (jupyterhub.version_info >= (0,7) and not self.port):
+ elif (jupyterhub.version_info < (0,7) and not self.user.server.port) or (
+ jupyterhub.version_info >= (0,7) and not self.port
+ ):
self.port = random_port()
self.db.commit()
job = yield self.submit_batch_script()
@@ -356,8 +368,8 @@ def start(self):
else:
self.log.warn('Job ' + self.job_id + ' neither pending nor running.\n' +
self.job_status)
- raise RuntimeError('The Jupyter batch job has disappeared '
- ' while pending in the queue or died immediately '
+ raise RuntimeError('The Jupyter batch job has disappeared'
+ ' while pending in the queue or died immediately'
' after starting.')
yield gen.sleep(self.startup_poll_interval)
@@ -394,7 +406,6 @@ def stop(self, now=False):
self.job_id, self.current_ip, self.port)
)
-import re
class BatchSpawnerRegexStates(BatchSpawnerBase):
"""Subclass of BatchSpawnerBase that uses config-supplied regular expressions
@@ -431,13 +442,15 @@ def state_ispending(self):
assert self.state_pending_re, "Misconfigured: define state_running_re"
if self.job_status and re.search(self.state_pending_re, self.job_status):
return True
- else: return False
+ else:
+ return False
def state_isrunning(self):
assert self.state_running_re, "Misconfigured: define state_running_re"
if self.job_status and re.search(self.state_running_re, self.job_status):
return True
- else: return False
+ else:
+ return False
def state_gethost(self):
assert self.state_exechost_re, "Misconfigured: define state_exechost_re"
@@ -450,6 +463,7 @@ def state_gethost(self):
else:
return match.expand(self.state_exechost_exp)
+
class TorqueSpawner(BatchSpawnerRegexStates):
batch_script = Unicode("""#!/bin/sh
#PBS -q {queue}@{host}
@@ -460,7 +474,9 @@ class TorqueSpawner(BatchSpawnerRegexStates):
#PBS -v {keepvars}
#PBS {options}
+{prologue}
{cmd}
+{epilogue}
""").tag(config=True)
# outputs job id string
@@ -473,6 +489,7 @@ class TorqueSpawner(BatchSpawnerRegexStates):
state_running_re = Unicode(r'R').tag(config=True)
state_exechost_re = Unicode(r'((?:[\w_-]+\.?)+)/\d+').tag(config=True)
+
class MoabSpawner(TorqueSpawner):
# outputs job id string
batch_submit_cmd = Unicode('msub').tag(config=True)
@@ -483,6 +500,7 @@ class MoabSpawner(TorqueSpawner):
state_running_re = Unicode(r'State="Running"').tag(config=True)
state_exechost_re = Unicode(r'AllocNodeList="([^\r\n\t\f :"]*)').tag(config=True)
+
class UserEnvMixin:
"""Mixin class that computes values for USER, SHELL and HOME in the environment passed to
the job submission subprocess in case the batch system needs these for the batch script."""
@@ -499,28 +517,19 @@ def user_env(self, env):
return env
def get_env(self):
- """Add user environment variables"""
+ """Get user environment variables to be passed to the user's job
+
+ Everything here should be passed to the user's job as
+ environment. Caution: If these variables are used for
+ authentication to the batch system commands as an admin, be
+ aware that the user will receive access to these as well.
+ """
env = super().get_env()
env = self.user_env(env)
return env
-class SlurmSpawner(UserEnvMixin,BatchSpawnerRegexStates):
- """A Spawner that just uses Popen to start local processes."""
-
- # all these req_foo traits will be available as substvars for templated strings
- req_cluster = Unicode('', \
- help="Cluster name to submit job to resource manager"
- ).tag(config=True)
-
- req_qos = Unicode('', \
- help="QoS name to submit job to resource manager"
- ).tag(config=True)
-
- req_srun = Unicode('srun',
- help="Job step wrapper, default 'srun'. Set to '' you do not want "
- "to run in job step (affects environment handling)"
- ).tag(config=True)
+class SlurmSpawner(UserEnvMixin,BatchSpawnerRegexStates):
batch_script = Unicode("""#!/bin/bash
#SBATCH --output={{homedir}}/jupyterhub_slurmspawner_%j.log
#SBATCH --job-name=spawner-jupyterhub
@@ -531,6 +540,7 @@ class SlurmSpawner(UserEnvMixin,BatchSpawnerRegexStates):
{% endif %}{% if runtime %}#SBATCH --time={{runtime}}
{% endif %}{% if memory %}#SBATCH --mem={{memory}}
{% endif %}{% if nprocs %}#SBATCH --cpus-per-task={{nprocs}}
+{% endif %}{% if reservation%}#SBATCH --reservation={{reservation}}
{% endif %}{% if options %}#SBATCH {{options}}{% endif %}
trap 'echo SIGTERM received' TERM
@@ -540,6 +550,25 @@ class SlurmSpawner(UserEnvMixin,BatchSpawnerRegexStates):
echo "jupyterhub-singleuser ended gracefully"
{{epilogue}}
""").tag(config=True)
+
+ # all these req_foo traits will be available as substvars for templated strings
+ req_cluster = Unicode('',
+ help="Cluster name to submit job to resource manager"
+ ).tag(config=True)
+
+ req_qos = Unicode('',
+ help="QoS name to submit job to resource manager"
+ ).tag(config=True)
+
+ req_srun = Unicode('srun',
+ help="Job step wrapper, default 'srun'. Set to '' you do not want "
+ "to run in job step (affects environment handling)"
+ ).tag(config=True)
+
+ req_reservation = Unicode('', \
+ help="Reservation name to submit to resource manager"
+ ).tag(config=True)
+
# outputs line like "Submitted batch job 209"
batch_submit_cmd = Unicode('sbatch --parsable').tag(config=True)
# outputs status and exec node like "RUNNING hostname"
@@ -561,6 +590,7 @@ def parse_job_id(self, output):
raise e
return id
+
class MultiSlurmSpawner(SlurmSpawner):
'''When slurm has been compiled with --enable-multiple-slurmd, the
administrator sets the name of the slurmd instance via the slurmd -N
@@ -573,6 +603,7 @@ def state_gethost(self):
host = SlurmSpawner.state_gethost(self)
return self.daemon_resolver.get(host, host)
+
class GridengineSpawner(BatchSpawnerBase):
batch_script = Unicode("""#!/bin/bash
#$ -j yes
@@ -582,7 +613,9 @@ class GridengineSpawner(BatchSpawnerBase):
#$ -v {keepvars}
#$ {options}
+{prologue}
{cmd}
+{epilogue}
""").tag(config=True)
# outputs job id string
@@ -620,6 +653,7 @@ def state_gethost(self):
self.log.error("Spawner unable to match host addr in job {0} with status {1}".format(self.job_id, self.job_status))
return
+
class CondorSpawner(UserEnvMixin,BatchSpawnerRegexStates):
batch_script = Unicode("""
Executable = /bin/sh
@@ -657,6 +691,7 @@ def parse_job_id(self, output):
def cmd_formatted_for_batch(self):
return super(CondorSpawner,self).cmd_formatted_for_batch().replace('"','""').replace("'","''")
+
class LsfSpawner(BatchSpawnerBase):
'''A Spawner that uses IBM's Platform Load Sharing Facility (LSF) to launch notebooks.'''
@@ -668,7 +703,9 @@ class LsfSpawner(BatchSpawnerBase):
#BSUB -o {homedir}/.jupyterhub.lsf.out
#BSUB -e {homedir}/.jupyterhub.lsf.err
+{prologue}
{cmd}
+{epilogue}
''').tag(config=True)
@@ -701,7 +738,6 @@ def state_isrunning(self):
if self.job_status:
return self.job_status.split(' ')[0].upper() == 'RUN'
-
def state_gethost(self):
if self.job_status:
return self.job_status.split(' ')[1].strip()
diff --git a/batchspawner/tests/test_spawners.py b/batchspawner/tests/test_spawners.py
index b45abc9b..01873486 100644
--- a/batchspawner/tests/test_spawners.py
+++ b/batchspawner/tests/test_spawners.py
@@ -279,9 +279,11 @@ def test_torque(db, io_loop):
'req_nprocs': '5',
'req_memory': '5678',
'req_options': 'some_option_asdf',
+ 'req_prologue': 'PROLOGUE',
+ 'req_epilogue': 'EPILOGUE',
}
batch_script_re_list = [
- re.compile(r'singleuser_command'),
+ re.compile(r'^PROLOGUE.*^singleuser_command.*^EPILOGUE', re.S|re.M),
re.compile(r'mem=5678'),
re.compile(r'ppn=5'),
re.compile(r'^#PBS some_option_asdf', re.M),
@@ -305,9 +307,11 @@ def test_moab(db, io_loop):
'req_nprocs': '5',
'req_memory': '5678',
'req_options': 'some_option_asdf',
+ 'req_prologue': 'PROLOGUE',
+ 'req_epilogue': 'EPILOGUE',
}
batch_script_re_list = [
- re.compile(r'singleuser_command'),
+ re.compile(r'^PROLOGUE.*^singleuser_command.*^EPILOGUE', re.S|re.M),
re.compile(r'mem=5678'),
re.compile(r'ppn=5'),
re.compile(r'^#PBS some_option_asdf', re.M),
@@ -332,14 +336,24 @@ def test_slurm(db, io_loop):
'req_nprocs': '5',
'req_memory': '5678',
'req_options': 'some_option_asdf',
+ 'req_prologue': 'PROLOGUE',
+ 'req_epilogue': 'EPILOGUE',
+ 'req_reservation': 'RES123',
}
batch_script_re_list = [
- re.compile(r'srun .* singleuser_command', re.X|re.M),
+ re.compile(r'PROLOGUE.*srun singleuser_command.*EPILOGUE', re.S),
re.compile(r'^#SBATCH \s+ --cpus-per-task=5', re.X|re.M),
re.compile(r'^#SBATCH \s+ --time=3-05:10:10', re.X|re.M),
re.compile(r'^#SBATCH \s+ some_option_asdf', re.X|re.M),
+ re.compile(r'^#SBATCH \s+ --reservation=RES123', re.X|re.M),
]
- script = [
+ from .. import SlurmSpawner
+ run_spawner_script(db, io_loop, SlurmSpawner, normal_slurm_script,
+ batch_script_re_list=batch_script_re_list,
+ spawner_kwargs=spawner_kwargs)
+# We tend to use slurm as our typical example job. These allow quick
+# Slurm examples.
+normal_slurm_script = [
(re.compile(r'sudo.*sbatch'), str(testjob)),
(re.compile(r'sudo.*squeue'), 'PENDING '), # pending
(re.compile(r'sudo.*squeue'), 'RUNNING '+testhost), # running
@@ -347,8 +361,18 @@ def test_slurm(db, io_loop):
(re.compile(r'sudo.*scancel'), 'STOP'),
(re.compile(r'sudo.*squeue'), ''),
]
- from .. import SlurmSpawner
- run_spawner_script(db, io_loop, SlurmSpawner, script,
+from .. import SlurmSpawner
+def run_typical_slurm_spawner(db, io_loop,
+ spawner=SlurmSpawner,
+ script=normal_slurm_script,
+ batch_script_re_list=None,
+ spawner_kwargs={}):
+ """Run a full slurm job with default (overrideable) parameters.
+
+ This is useful, for example, for changing options and testing effect
+ of batch scripts.
+ """
+ return run_spawner_script(db, io_loop, spawner, script,
batch_script_re_list=batch_script_re_list,
spawner_kwargs=spawner_kwargs)
@@ -407,9 +431,11 @@ def test_lfs(db, io_loop):
'req_memory': '5678',
'req_options': 'some_option_asdf',
'req_queue': 'some_queue',
+ 'req_prologue': 'PROLOGUE',
+ 'req_epilogue': 'EPILOGUE',
}
batch_script_re_list = [
- re.compile(r'^singleuser_command', re.M),
+ re.compile(r'^PROLOGUE.*^singleuser_command.*^EPILOGUE', re.S|re.M),
re.compile(r'#BSUB\s+-q\s+some_queue', re.M),
]
script = [
@@ -424,3 +450,28 @@ def test_lfs(db, io_loop):
run_spawner_script(db, io_loop, LsfSpawner, script,
batch_script_re_list=batch_script_re_list,
spawner_kwargs=spawner_kwargs)
+
+
+def test_keepvars(db, io_loop):
+ # req_keepvars
+ spawner_kwargs = {
+ 'req_keepvars': 'ABCDE',
+ }
+ batch_script_re_list = [
+ re.compile(r'--export=ABCDE', re.X|re.M),
+ ]
+ run_typical_slurm_spawner(db, io_loop,
+ spawner_kwargs=spawner_kwargs,
+ batch_script_re_list=batch_script_re_list)
+
+ # req_keepvars AND req_keepvars together
+ spawner_kwargs = {
+ 'req_keepvars': 'ABCDE',
+ 'req_keepvars_extra': 'XYZ',
+ }
+ batch_script_re_list = [
+ re.compile(r'--export=ABCDE,XYZ', re.X|re.M),
+ ]
+ run_typical_slurm_spawner(db, io_loop,
+ spawner_kwargs=spawner_kwargs,
+ batch_script_re_list=batch_script_re_list)