summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog1
-rwxr-xr-xcloud-init-cfg.py40
-rwxr-xr-xcloud-init-query.py17
-rwxr-xr-xcloud-init.py43
-rw-r--r--cloudinit/CloudConfig/__init__.py121
-rw-r--r--cloudinit/CloudConfig/cc_apt_update_upgrade.py97
-rw-r--r--cloudinit/CloudConfig/cc_bootcmd.py12
-rw-r--r--cloudinit/CloudConfig/cc_byobu.py20
-rw-r--r--cloudinit/CloudConfig/cc_ca_certs.py4
-rw-r--r--cloudinit/CloudConfig/cc_chef.py51
-rw-r--r--cloudinit/CloudConfig/cc_disable_ec2_metadata.py5
-rw-r--r--cloudinit/CloudConfig/cc_final_message.py14
-rw-r--r--cloudinit/CloudConfig/cc_foo.py3
-rw-r--r--cloudinit/CloudConfig/cc_grub_dpkg.py37
-rw-r--r--cloudinit/CloudConfig/cc_keys_to_console.py13
-rw-r--r--cloudinit/CloudConfig/cc_landscape.py8
-rw-r--r--cloudinit/CloudConfig/cc_locale.py11
-rw-r--r--cloudinit/CloudConfig/cc_mcollective.py38
-rw-r--r--cloudinit/CloudConfig/cc_mounts.py84
-rw-r--r--cloudinit/CloudConfig/cc_phone_home.py35
-rw-r--r--cloudinit/CloudConfig/cc_puppet.py22
-rw-r--r--cloudinit/CloudConfig/cc_resizefs.py30
-rw-r--r--cloudinit/CloudConfig/cc_rightscale_userdata.py19
-rw-r--r--cloudinit/CloudConfig/cc_rsyslog.py18
-rw-r--r--cloudinit/CloudConfig/cc_runcmd.py9
-rw-r--r--cloudinit/CloudConfig/cc_scripts_per_boot.py3
-rw-r--r--cloudinit/CloudConfig/cc_scripts_per_instance.py3
-rw-r--r--cloudinit/CloudConfig/cc_scripts_per_once.py3
-rw-r--r--cloudinit/CloudConfig/cc_scripts_user.py3
-rw-r--r--cloudinit/CloudConfig/cc_set_hostname.py10
-rw-r--r--cloudinit/CloudConfig/cc_set_passwords.py61
-rw-r--r--cloudinit/CloudConfig/cc_ssh.py62
-rw-r--r--cloudinit/CloudConfig/cc_ssh_import_id.py18
-rw-r--r--cloudinit/CloudConfig/cc_timezone.py14
-rw-r--r--cloudinit/CloudConfig/cc_update_etc_hosts.py14
-rw-r--r--cloudinit/CloudConfig/cc_update_hostname.py27
-rw-r--r--cloudinit/DataSource.py49
-rw-r--r--cloudinit/DataSourceEc2.py79
-rw-r--r--cloudinit/DataSourceNoCloud.py78
-rw-r--r--cloudinit/DataSourceOVF.py177
-rw-r--r--cloudinit/SshUtil.py110
-rw-r--r--cloudinit/UserDataHandler.py84
-rw-r--r--cloudinit/__init__.py296
-rw-r--r--cloudinit/netinfo.py33
-rw-r--r--cloudinit/util.py208
-rw-r--r--tests/unittests/test_handler_ca_certs.py5
-rwxr-xr-xtools/run-pylint49
47 files changed, 1240 insertions, 898 deletions
diff --git a/ChangeLog b/ChangeLog
index a1ae3b7e..07b46ded 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -19,6 +19,7 @@
in the payload parameter. (LP: #874342)
- add test case framework [Mike Milner] (LP: #890851)
- fix pylint warnings [Juerg Haefliger] (LP: #914739)
+ - add support for adding and deleting CA Certificates [Mike Milner] (LP: #915232)
0.6.2:
- fix bug where update was not done unless update was explicitly set.
It would not be run if 'upgrade' or packages were set to be installed
diff --git a/cloud-init-cfg.py b/cloud-init-cfg.py
index de64ef9c..def9583a 100755
--- a/cloud-init-cfg.py
+++ b/cloud-init-cfg.py
@@ -24,9 +24,11 @@ import cloudinit.CloudConfig as CC
import logging
import os
-def Usage(out = sys.stdout):
+
+def Usage(out=sys.stdout):
out.write("Usage: %s name\n" % sys.argv[0])
-
+
+
def main():
# expect to be called with
# name [ freq [ args ]
@@ -38,7 +40,7 @@ def main():
util.close_stdin()
modename = "config"
-
+
if len(sys.argv) < 2:
Usage(sys.stderr)
sys.exit(1)
@@ -49,30 +51,30 @@ def main():
else:
freq = None
run_args = []
- name=sys.argv[1]
+ name = sys.argv[1]
if len(sys.argv) > 2:
freq = sys.argv[2]
if freq == "None":
freq = None
if len(sys.argv) > 3:
- run_args=sys.argv[3:]
+ run_args = sys.argv[3:]
cfg_path = cloudinit.get_ipath_cur("cloud_config")
cfg_env_name = cloudinit.cfg_env_name
- if os.environ.has_key(cfg_env_name):
+ if cfg_env_name in os.environ:
cfg_path = os.environ[cfg_env_name]
- cloud = cloudinit.CloudInit(ds_deps=[]) # ds_deps=[], get only cached
+ cloud = cloudinit.CloudInit(ds_deps=[]) # ds_deps=[], get only cached
try:
cloud.get_data_source()
except cloudinit.DataSourceNotFoundException as e:
# there was no datasource found, theres nothing to do
sys.exit(0)
- cc = CC.CloudConfig(cfg_path,cloud)
+ cc = CC.CloudConfig(cfg_path, cloud)
try:
- (outfmt, errfmt) = CC.get_output_cfg(cc.cfg,modename)
+ (outfmt, errfmt) = CC.get_output_cfg(cc.cfg, modename)
CC.redirect_output(outfmt, errfmt)
except Exception as e:
err("Failed to get and set output config: %s\n" % e)
@@ -81,28 +83,30 @@ def main():
log = logging.getLogger()
log.info("cloud-init-cfg %s" % sys.argv[1:])
- module_list = [ ]
+ module_list = []
if name == "all":
modlist_cfg_name = "cloud_%s_modules" % modename
- module_list = CC.read_cc_modules(cc.cfg,modlist_cfg_name)
+ module_list = CC.read_cc_modules(cc.cfg, modlist_cfg_name)
if not len(module_list):
- err("no modules to run in cloud_config [%s]" % modename,log)
+ err("no modules to run in cloud_config [%s]" % modename, log)
sys.exit(0)
else:
- module_list.append( [ name, freq ] + run_args )
+ module_list.append([name, freq] + run_args)
- failures = CC.run_cc_modules(cc,module_list,log)
+ failures = CC.run_cc_modules(cc, module_list, log)
if len(failures):
- err("errors running cloud_config [%s]: %s" % (modename,failures), log)
+ err("errors running cloud_config [%s]: %s" % (modename, failures), log)
sys.exit(len(failures))
-def err(msg,log=None):
+
+def err(msg, log=None):
if log:
log.error(msg)
sys.stderr.write(msg + "\n")
-def fail(msg,log=None):
- err(msg,log)
+
+def fail(msg, log=None):
+ err(msg, log)
sys.exit(1)
if __name__ == '__main__':
diff --git a/cloud-init-query.py b/cloud-init-query.py
index cfe9b429..71987174 100755
--- a/cloud-init-query.py
+++ b/cloud-init-query.py
@@ -21,23 +21,26 @@ import sys
import cloudinit
import cloudinit.CloudConfig
-def Usage(out = sys.stdout):
+
+def Usage(out=sys.stdout):
out.write("Usage: %s name\n" % sys.argv[0])
-
+
+
def main():
# expect to be called with name of item to fetch
if len(sys.argv) != 2:
Usage(sys.stderr)
sys.exit(1)
- cc = cloudinit.CloudConfig.CloudConfig(cloudinit.cloud_config)
+ cfg_path = cloudinit.get_ipath_cur("cloud_config")
+ cc = cloudinit.CloudConfig.CloudConfig(cfg_path)
data = {
- 'user_data' : cc.cloud.get_userdata(),
- 'user_data_raw' : cc.cloud.get_userdata_raw(),
- 'instance_id' : cc.cloud.get_instance_id(),
+ 'user_data': cc.cloud.get_userdata(),
+ 'user_data_raw': cc.cloud.get_userdata_raw(),
+ 'instance_id': cc.cloud.get_instance_id(),
}
- name = sys.argv[1].replace('-','_')
+ name = sys.argv[1].replace('-', '_')
if name not in data:
sys.stderr.write("unknown name '%s'. Known values are:\n %s\n" %
diff --git a/cloud-init.py b/cloud-init.py
index 8279a0b0..f9f71949 100755
--- a/cloud-init.py
+++ b/cloud-init.py
@@ -30,15 +30,17 @@ import logging
import errno
import os
+
def warn(wstr):
sys.stderr.write("WARN:%s" % wstr)
+
def main():
util.close_stdin()
- cmds = ( "start", "start-local" )
- deps = { "start" : ( ds.DEP_FILESYSTEM, ds.DEP_NETWORK ),
- "start-local" : ( ds.DEP_FILESYSTEM, ) }
+ cmds = ("start", "start-local")
+ deps = {"start": (ds.DEP_FILESYSTEM, ds.DEP_NETWORK),
+ "start-local": (ds.DEP_FILESYSTEM, )}
cmd = ""
if len(sys.argv) > 1:
@@ -54,10 +56,10 @@ def main():
sys.stderr.write("bad command %s. use one of %s\n" % (cmd, cmds))
sys.exit(1)
- now = time.strftime("%a, %d %b %Y %H:%M:%S %z",time.gmtime())
+ now = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
try:
- uptimef=open("/proc/uptime")
- uptime=uptimef.read().split(" ")[0]
+ uptimef = open("/proc/uptime")
+ uptime = uptimef.read().split(" ")[0]
uptimef.close()
except IOError as e:
warn("unable to open /proc/uptime\n")
@@ -74,7 +76,7 @@ def main():
raise
try:
- (outfmt, errfmt) = CC.get_output_cfg(cfg,"init")
+ (outfmt, errfmt) = CC.get_output_cfg(cfg, "init")
CC.redirect_output(outfmt, errfmt)
except Exception as e:
warn("Failed to get and set output config: %s\n" % e)
@@ -92,24 +94,24 @@ def main():
if cmd == "start":
print netinfo.debug_info()
- stop_files = ( cloudinit.get_ipath_cur("obj_pkl"), nonet_path )
+ stop_files = (cloudinit.get_ipath_cur("obj_pkl"), nonet_path)
# if starting as the network start, there are cases
# where everything is already done for us, and it makes
# most sense to exit early and silently
for f in stop_files:
try:
- fp = open(f,"r")
+ fp = open(f, "r")
fp.close()
except:
continue
-
+
log.debug("no need for cloud-init start to run (%s)\n", f)
sys.exit(0)
elif cmd == "start-local":
# cache is not instance specific, so it has to be purged
# but we want 'start' to benefit from a cache if
# a previous start-local populated one
- manclean = util.get_cfg_option_bool(cfg, 'manual_cache_clean',False)
+ manclean = util.get_cfg_option_bool(cfg, 'manual_cache_clean', False)
if manclean:
log.debug("not purging cache, manual_cache_clean = True")
cloudinit.purge_cache(not manclean)
@@ -117,7 +119,8 @@ def main():
try:
os.unlink(nonet_path)
except OSError as e:
- if e.errno != errno.ENOENT: raise
+ if e.errno != errno.ENOENT:
+ raise
msg = "cloud-init %s running: %s. up %s seconds" % (cmd, now, uptime)
sys.stderr.write(msg + "\n")
@@ -146,7 +149,7 @@ def main():
# parse the user data (ec2-run-userdata.py)
try:
ran = cloud.sem_and_run("consume_userdata", cloudinit.per_instance,
- cloud.consume_userdata,[cloudinit.per_instance],False)
+ cloud.consume_userdata, [cloudinit.per_instance], False)
if not ran:
cloud.consume_userdata(cloudinit.per_always)
except:
@@ -160,9 +163,9 @@ def main():
try:
outfmt_orig = outfmt
errfmt_orig = errfmt
- (outfmt, errfmt) = CC.get_output_cfg(cc.cfg,"init")
+ (outfmt, errfmt) = CC.get_output_cfg(cc.cfg, "init")
if outfmt_orig != outfmt or errfmt_orig != errfmt:
- warn("stdout, stderr changing to (%s,%s)" % (outfmt,errfmt))
+ warn("stdout, stderr changing to (%s,%s)" % (outfmt, errfmt))
CC.redirect_output(outfmt, errfmt)
except Exception as e:
warn("Failed to get and set output config: %s\n" % e)
@@ -171,17 +174,17 @@ def main():
cc_path = cloudinit.get_ipath_cur('cloud_config')
cc_ready = cc.cfg.get("cc_ready_cmd",
['initctl', 'emit', 'cloud-config',
- '%s=%s' % (cloudinit.cfg_env_name, cc_path) ])
+ '%s=%s' % (cloudinit.cfg_env_name, cc_path)])
if cc_ready:
- if isinstance(cc_ready,str):
- cc_ready = [ 'sh', '-c', cc_ready]
+ if isinstance(cc_ready, str):
+ cc_ready = ['sh', '-c', cc_ready]
subprocess.Popen(cc_ready).communicate()
- module_list = CC.read_cc_modules(cc.cfg,"cloud_init_modules")
+ module_list = CC.read_cc_modules(cc.cfg, "cloud_init_modules")
failures = []
if len(module_list):
- failures = CC.run_cc_modules(cc,module_list,log)
+ failures = CC.run_cc_modules(cc, module_list, log)
else:
msg = "no cloud_init_modules to run"
sys.stderr.write(msg + "\n")
diff --git a/cloudinit/CloudConfig/__init__.py b/cloudinit/CloudConfig/__init__.py
index f5c4143c..c9acfbf9 100644
--- a/cloudinit/CloudConfig/__init__.py
+++ b/cloudinit/CloudConfig/__init__.py
@@ -25,15 +25,16 @@ import os
import subprocess
import time
-per_instance= cloudinit.per_instance
+per_instance = cloudinit.per_instance
per_always = cloudinit.per_always
per_once = cloudinit.per_once
+
class CloudConfig():
cfgfile = None
cfg = None
- def __init__(self,cfgfile, cloud=None, ds_deps=None):
+ def __init__(self, cfgfile, cloud=None, ds_deps=None):
if cloud == None:
self.cloud = cloudinit.CloudInit(ds_deps)
self.cloud.get_data_source()
@@ -41,62 +42,67 @@ class CloudConfig():
self.cloud = cloud
self.cfg = self.get_config_obj(cfgfile)
- def get_config_obj(self,cfgfile):
+ def get_config_obj(self, cfgfile):
try:
cfg = util.read_conf(cfgfile)
except:
# TODO: this 'log' could/should be passed in
- cloudinit.log.critical("Failed loading of cloud config '%s'. Continuing with empty config\n" % cfgfile)
+ cloudinit.log.critical("Failed loading of cloud config '%s'. "
+ "Continuing with empty config\n" % cfgfile)
cloudinit.log.debug(traceback.format_exc() + "\n")
cfg = None
- if cfg is None: cfg = { }
+ if cfg is None:
+ cfg = {}
try:
ds_cfg = self.cloud.datasource.get_config_obj()
except:
- ds_cfg = { }
+ ds_cfg = {}
cfg = util.mergedict(cfg, ds_cfg)
- return(util.mergedict(cfg,self.cloud.cfg))
+ return(util.mergedict(cfg, self.cloud.cfg))
def handle(self, name, args, freq=None):
try:
- mod = __import__("cc_" + name.replace("-","_"),globals())
- def_freq = getattr(mod, "frequency",per_instance)
+ mod = __import__("cc_" + name.replace("-", "_"), globals())
+ def_freq = getattr(mod, "frequency", per_instance)
handler = getattr(mod, "handle")
if not freq:
freq = def_freq
self.cloud.sem_and_run("config-" + name, freq, handler,
- [ name, self.cfg, self.cloud, cloudinit.log, args ])
+ [name, self.cfg, self.cloud, cloudinit.log, args])
except:
raise
+
# reads a cloudconfig module list, returns
# a 2 dimensional array suitable to pass to run_cc_modules
-def read_cc_modules(cfg,name):
- if name not in cfg: return([])
+def read_cc_modules(cfg, name):
+ if name not in cfg:
+ return([])
module_list = []
# create 'module_list', an array of arrays
# where array[0] = config
# array[1] = freq
# array[2:] = arguemnts
for item in cfg[name]:
- if isinstance(item,str):
+ if isinstance(item, str):
module_list.append((item,))
- elif isinstance(item,list):
+ elif isinstance(item, list):
module_list.append(item)
else:
raise TypeError("failed to read '%s' item in config")
return(module_list)
-
-def run_cc_modules(cc,module_list,log):
+
+
+def run_cc_modules(cc, module_list, log):
failures = []
for cfg_mod in module_list:
name = cfg_mod[0]
freq = None
- run_args = [ ]
+ run_args = []
if len(cfg_mod) > 1:
freq = cfg_mod[1]
if len(cfg_mod) > 2:
@@ -104,17 +110,18 @@ def run_cc_modules(cc,module_list,log):
try:
log.debug("handling %s with freq=%s and args=%s" %
- (name, freq, run_args ))
+ (name, freq, run_args))
cc.handle(name, run_args, freq=freq)
except:
log.warn(traceback.format_exc())
log.error("config handling of %s, %s, %s failed\n" %
- (name,freq,run_args))
+ (name, freq, run_args))
failures.append(name)
return(failures)
-# always returns well formated values
+
+# always returns well formated values
# cfg is expected to have an entry 'output' in it, which is a dictionary
# that includes entries for 'init', 'config', 'final' or 'all'
# init: /var/log/cloud.out
@@ -125,25 +132,28 @@ def run_cc_modules(cc,module_list,log):
# this returns the specific 'mode' entry, cleanly formatted, with value
# None if if none is given
def get_output_cfg(cfg, mode="init"):
- ret = [ None, None ]
- if not 'output' in cfg: return ret
+ ret = [None, None]
+ if not 'output' in cfg:
+ return ret
outcfg = cfg['output']
if mode in outcfg:
modecfg = outcfg[mode]
else:
- if 'all' not in outcfg: return ret
+ if 'all' not in outcfg:
+ return ret
# if there is a 'all' item in the output list
# then it applies to all users of this (init, config, final)
modecfg = outcfg['all']
# if value is a string, it specifies stdout and stderr
- if isinstance(modecfg,str):
- ret = [ modecfg, modecfg ]
+ if isinstance(modecfg, str):
+ ret = [modecfg, modecfg]
# if its a list, then we expect (stdout, stderr)
- if isinstance(modecfg,list):
- if len(modecfg) > 0: ret[0] = modecfg[0]
+ if isinstance(modecfg, list):
+ if len(modecfg) > 0:
+ ret[0] = modecfg[0]
if len(modecfg) > 1:
ret[1] = modecfg[1]
@@ -157,26 +167,28 @@ def get_output_cfg(cfg, mode="init"):
# if err's entry == "&1", then make it same as stdout
# as in shell syntax of "echo foo >/dev/null 2>&1"
- if ret[1] == "&1": ret[1] = ret[0]
+ if ret[1] == "&1":
+ ret[1] = ret[0]
- swlist = [ ">>", ">", "|" ]
+ swlist = [">>", ">", "|"]
for i in range(len(ret)):
- if not ret[i]: continue
+ if not ret[i]:
+ continue
val = ret[i].lstrip()
found = False
for s in swlist:
if val.startswith(s):
- val = "%s %s" % (s,val[len(s):].strip())
+ val = "%s %s" % (s, val[len(s):].strip())
found = True
break
if not found:
# default behavior is append
- val = "%s %s" % ( ">>", val.strip())
+ val = "%s %s" % (">>", val.strip())
ret[i] = val
return(ret)
-
+
# redirect_output(outfmt, errfmt, orig_out, orig_err)
# replace orig_out and orig_err with filehandles specified in outfmt or errfmt
# fmt can be:
@@ -185,13 +197,14 @@ def get_output_cfg(cfg, mode="init"):
# | program [ arg1 [ arg2 [ ... ] ] ]
#
# with a '|', arguments are passed to shell, so one level of
-# shell escape is required.
-def redirect_output(outfmt,errfmt, o_out=sys.stdout, o_err=sys.stderr):
+# shell escape is required.
+def redirect_output(outfmt, errfmt, o_out=sys.stdout, o_err=sys.stderr):
if outfmt:
- (mode, arg) = outfmt.split(" ",1)
+ (mode, arg) = outfmt.split(" ", 1)
if mode == ">" or mode == ">>":
owith = "ab"
- if mode == ">": owith = "wb"
+ if mode == ">":
+ owith = "wb"
new_fp = open(arg, owith)
elif mode == "|":
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
@@ -206,10 +219,11 @@ def redirect_output(outfmt,errfmt, o_out=sys.stdout, o_err=sys.stderr):
return
if errfmt:
- (mode, arg) = errfmt.split(" ",1)
+ (mode, arg) = errfmt.split(" ", 1)
if mode == ">" or mode == ">>":
owith = "ab"
- if mode == ">": owith = "wb"
+ if mode == ">":
+ owith = "wb"
new_fp = open(arg, owith)
elif mode == "|":
proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
@@ -221,32 +235,37 @@ def redirect_output(outfmt,errfmt, o_out=sys.stdout, o_err=sys.stderr):
os.dup2(new_fp.fileno(), o_err.fileno())
return
+
def run_per_instance(name, func, args, clear_on_fail=False):
- semfile = "%s/%s" % (cloudinit.get_ipath_cur("data"),name)
- if os.path.exists(semfile): return
+ semfile = "%s/%s" % (cloudinit.get_ipath_cur("data"), name)
+ if os.path.exists(semfile):
+ return
- util.write_file(semfile,str(time.time()))
+ util.write_file(semfile, str(time.time()))
try:
func(*args)
except:
- if clear_on_fail: os.unlink(semfile)
+ if clear_on_fail:
+ os.unlink(semfile)
raise
+
# apt_get top level command (install, update...), and args to pass it
-def apt_get(tlc,args=None):
+def apt_get(tlc, args=None):
if args is None:
args = []
- e=os.environ.copy()
- e['DEBIAN_FRONTEND']='noninteractive'
- cmd=[ 'apt-get',
- '--option', 'Dpkg::Options::=--force-confold', '--assume-yes',
- tlc ]
+ e = os.environ.copy()
+ e['DEBIAN_FRONTEND'] = 'noninteractive'
+ cmd = ['apt-get', '--option', 'Dpkg::Options::=--force-confold',
+ '--assume-yes', tlc]
cmd.extend(args)
- subprocess.check_call(cmd,env=e)
+ subprocess.check_call(cmd, env=e)
+
def update_package_sources():
run_per_instance("update-sources", apt_get, ("update",))
+
def install_packages(pkglist):
update_package_sources()
- apt_get("install",pkglist)
+ apt_get("install", pkglist)
diff --git a/cloudinit/CloudConfig/cc_apt_update_upgrade.py b/cloudinit/CloudConfig/cc_apt_update_upgrade.py
index 0cbe02d4..8aaaa334 100644
--- a/cloudinit/CloudConfig/cc_apt_update_upgrade.py
+++ b/cloudinit/CloudConfig/cc_apt_update_upgrade.py
@@ -22,7 +22,8 @@ import os
import glob
import cloudinit.CloudConfig as cc
-def handle(_name,cfg,cloud,log,_args):
+
+def handle(_name, cfg, cloud, log, _args):
update = util.get_cfg_option_bool(cfg, 'apt_update', False)
upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False)
@@ -35,18 +36,17 @@ def handle(_name,cfg,cloud,log,_args):
if not util.get_cfg_option_bool(cfg, \
'apt_preserve_sources_list', False):
generate_sources_list(release, mirror)
- old_mir = util.get_cfg_option_str(cfg,'apt_old_mirror', \
+ old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror', \
"archive.ubuntu.com/ubuntu")
rename_apt_lists(old_mir, mirror)
-
# set up proxy
proxy = cfg.get("apt_proxy", None)
proxy_filename = "/etc/apt/apt.conf.d/95cloud-init-proxy"
if proxy:
try:
contents = "Acquire::HTTP::Proxy \"%s\";\n"
- with open(proxy_filename,"w") as fp:
+ with open(proxy_filename, "w") as fp:
fp.write(contents % proxy)
except Exception as e:
log.warn("Failed to write proxy to %s" % proxy_filename)
@@ -54,9 +54,9 @@ def handle(_name,cfg,cloud,log,_args):
os.unlink(proxy_filename)
# process 'apt_sources'
- if cfg.has_key('apt_sources'):
+ if 'apt_sources' in cfg:
errors = add_sources(cfg['apt_sources'],
- { 'MIRROR' : mirror, 'RELEASE' : release } )
+ {'MIRROR': mirror, 'RELEASE': release})
for e in errors:
log.warn("Source Error: %s\n" % ':'.join(e))
@@ -69,9 +69,9 @@ def handle(_name,cfg,cloud,log,_args):
log.error("Failed to run debconf-set-selections")
log.debug(traceback.format_exc())
- pkglist = util.get_cfg_option_list_or_str(cfg,'packages',[])
+ pkglist = util.get_cfg_option_list_or_str(cfg, 'packages', [])
- errors = [ ]
+ errors = []
if update or len(pkglist) or upgrade:
try:
cc.update_package_sources()
@@ -101,77 +101,90 @@ def handle(_name,cfg,cloud,log,_args):
return(True)
+
def mirror2lists_fileprefix(mirror):
- string=mirror
+ string = mirror
# take of http:// or ftp://
- if string.endswith("/"): string=string[0:-1]
- pos=string.find("://")
+ if string.endswith("/"):
+ string = string[0:-1]
+ pos = string.find("://")
if pos >= 0:
- string=string[pos+3:]
- string=string.replace("/","_")
+ string = string[pos + 3:]
+ string = string.replace("/", "_")
return string
-def rename_apt_lists(omirror,new_mirror,lists_d="/var/lib/apt/lists"):
-
- oprefix="%s/%s" % (lists_d,mirror2lists_fileprefix(omirror))
- nprefix="%s/%s" % (lists_d,mirror2lists_fileprefix(new_mirror))
- if(oprefix==nprefix): return
- olen=len(oprefix)
+
+def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"):
+ oprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(omirror))
+ nprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(new_mirror))
+ if(oprefix == nprefix):
+ return
+ olen = len(oprefix)
for filename in glob.glob("%s_*" % oprefix):
- os.rename(filename,"%s%s" % (nprefix, filename[olen:]))
+ os.rename(filename, "%s%s" % (nprefix, filename[olen:]))
+
def get_release():
- stdout, _stderr = subprocess.Popen(['lsb_release', '-cs'], stdout=subprocess.PIPE).communicate()
- return(stdout.strip())
+ stdout, _stderr = subprocess.Popen(['lsb_release', '-cs'],
+ stdout=subprocess.PIPE).communicate()
+ return(str(stdout).strip())
+
def generate_sources_list(codename, mirror):
util.render_to_file('sources.list', '/etc/apt/sources.list', \
- { 'mirror' : mirror, 'codename' : codename })
+ {'mirror': mirror, 'codename': codename})
+
-# srclist is a list of dictionaries,
-# each entry must have: 'source'
-# may have: key, ( keyid and keyserver)
def add_sources(srclist, searchList=None):
+ """
+ add entries in /etc/apt/sources.list.d for each abbreviated
+ sources.list entry in 'srclist'. When rendering template, also
+ include the values in dictionary searchList
+ """
if searchList is None:
searchList = {}
elst = []
for ent in srclist:
- if not ent.has_key('source'):
- elst.append([ "", "missing source" ])
+ if 'source' not in ent:
+ elst.append(["", "missing source"])
continue
- source=ent['source']
+ source = ent['source']
if source.startswith("ppa:"):
- try: util.subp(["add-apt-repository",source])
+ try:
+ util.subp(["add-apt-repository", source])
except:
elst.append([source, "add-apt-repository failed"])
continue
source = util.render_string(source, searchList)
- if not ent.has_key('filename'):
- ent['filename']='cloud_config_sources.list'
+ if 'filename' not in ent:
+ ent['filename'] = 'cloud_config_sources.list'
if not ent['filename'].startswith("/"):
ent['filename'] = "%s/%s" % \
("/etc/apt/sources.list.d/", ent['filename'])
- if ( ent.has_key('keyid') and not ent.has_key('key') ):
+ if ('keyid' in ent and 'key' not in ent):
ks = "keyserver.ubuntu.com"
- if ent.has_key('keyserver'): ks = ent['keyserver']
+ if 'keyserver' in ent:
+ ks = ent['keyserver']
try:
ent['key'] = util.getkeybyid(ent['keyid'], ks)
except:
- elst.append([source,"failed to get key from %s" % ks])
+ elst.append([source, "failed to get key from %s" % ks])
continue
- if ent.has_key('key'):
- try: util.subp(('apt-key', 'add', '-'), ent['key'])
+ if 'key' in ent:
+ try:
+ util.subp(('apt-key', 'add', '-'), ent['key'])
except:
elst.append([source, "failed add key"])
- try: util.write_file(ent['filename'], source + "\n", omode="ab")
+ try:
+ util.write_file(ent['filename'], source + "\n", omode="ab")
except:
elst.append([source, "failed write to file %s" % ent['filename']])
@@ -189,10 +202,10 @@ def find_apt_mirror(cloud, cfg):
}
mirror = None
- cfg_mirror = cfg.get("apt_mirror",None)
+ cfg_mirror = cfg.get("apt_mirror", None)
if cfg_mirror:
mirror = cfg["apt_mirror"]
- elif cfg.has_key("apt_mirror_search"):
+ elif "apt_mirror_search" in cfg:
mirror = util.search_for_mirror(cfg['apt_mirror_search'])
else:
if cloud:
@@ -204,7 +217,7 @@ def find_apt_mirror(cloud, cfg):
if not mirror and cloud:
# if we have a fqdn, then search its domain portion first
- ( _hostname, fqdn ) = util.get_hostname_fqdn(cfg, cloud)
+ (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
mydom = ".".join(fqdn.split(".")[1:])
if mydom:
doms.append(".%s" % mydom)
@@ -213,7 +226,7 @@ def find_apt_mirror(cloud, cfg):
doms.extend((".localdomain", "",))
mirror_list = []
- mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro )
+ mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
for post in doms:
mirror_list.append(mirrorfmt % post)
diff --git a/cloudinit/CloudConfig/cc_bootcmd.py b/cloudinit/CloudConfig/cc_bootcmd.py
index fc925447..66c452e9 100644
--- a/cloudinit/CloudConfig/cc_bootcmd.py
+++ b/cloudinit/CloudConfig/cc_bootcmd.py
@@ -18,11 +18,13 @@
import cloudinit.util as util
import subprocess
import tempfile
+import os
from cloudinit.CloudConfig import per_always
frequency = per_always
-def handle(_name,cfg,cloud,log,_args):
- if not cfg.has_key("bootcmd"):
+
+def handle(_name, cfg, cloud, log, _args):
+ if "bootcmd" not in cfg:
return
try:
@@ -33,10 +35,10 @@ def handle(_name,cfg,cloud,log,_args):
except:
log.warn("failed to shellify bootcmd")
raise
-
+
try:
- env=os.environ.copy()
- env['INSTANCE_ID']=cloud.get_instance_id()
+ env = os.environ.copy()
+ env['INSTANCE_ID'] = cloud.get_instance_id()
subprocess.check_call(['/bin/sh'], env=env, stdin=tmpf)
tmpf.close()
except:
diff --git a/cloudinit/CloudConfig/cc_byobu.py b/cloudinit/CloudConfig/cc_byobu.py
index dd510dda..7e455a7a 100644
--- a/cloudinit/CloudConfig/cc_byobu.py
+++ b/cloudinit/CloudConfig/cc_byobu.py
@@ -19,19 +19,21 @@ import cloudinit.util as util
import subprocess
import traceback
-def handle(_name,cfg,_cloud,log,args):
+
+def handle(_name, cfg, _cloud, log, args):
if len(args) != 0:
value = args[0]
else:
- value = util.get_cfg_option_str(cfg,"byobu_by_default","")
+ value = util.get_cfg_option_str(cfg, "byobu_by_default", "")
- if not value: return
+ if not value:
+ return
if value == "user" or value == "system":
value = "enable-%s" % value
- valid = ( "enable-user", "enable-system", "enable",
- "disable-user", "disable-system", "disable" )
+ valid = ("enable-user", "enable-system", "enable",
+ "disable-user", "disable-system", "disable")
if not value in valid:
log.warn("Unknown value %s for byobu_by_default" % value)
@@ -50,7 +52,7 @@ def handle(_name,cfg,_cloud,log,args):
shcmd = ""
if mod_user:
- user = util.get_cfg_option_str(cfg,"user","ubuntu")
+ user = util.get_cfg_option_str(cfg, "user", "ubuntu")
shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst)
shcmd += " || X=$(($X+1)); "
if mod_sys:
@@ -58,7 +60,7 @@ def handle(_name,cfg,_cloud,log,args):
shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive"
shcmd += " || X=$(($X+1)); "
- cmd = [ "/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X" ) ]
+ cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
log.debug("setting byobu to %s" % value)
@@ -66,7 +68,7 @@ def handle(_name,cfg,_cloud,log,args):
subprocess.check_call(cmd)
except subprocess.CalledProcessError as e:
log.debug(traceback.format_exc(e))
- raise Exception("Cmd returned %s: %s" % ( e.returncode, cmd))
+ raise Exception("Cmd returned %s: %s" % (e.returncode, cmd))
except OSError as e:
log.debug(traceback.format_exc(e))
- raise Exception("Cmd failed to execute: %s" % ( cmd ))
+ raise Exception("Cmd failed to execute: %s" % (cmd))
diff --git a/cloudinit/CloudConfig/cc_ca_certs.py b/cloudinit/CloudConfig/cc_ca_certs.py
index e6cdc3f5..c18821f9 100644
--- a/cloudinit/CloudConfig/cc_ca_certs.py
+++ b/cloudinit/CloudConfig/cc_ca_certs.py
@@ -56,7 +56,7 @@ def remove_default_ca_certs():
write_file(CA_CERT_CONFIG, "", mode=0644)
-def handle(name, cfg, cloud, log, args):
+def handle(_name, cfg, _cloud, log, _args):
"""
Call to handle ca-cert sections in cloud-config file.
@@ -74,12 +74,14 @@ def handle(name, cfg, cloud, log, args):
# If there is a remove-defaults option set to true, remove the system
# default trusted CA certs first.
if ca_cert_cfg.get("remove-defaults", False):
+ log.debug("removing default certificates")
remove_default_ca_certs()
# If we are given any new trusted CA certs to add, add them.
if "trusted" in ca_cert_cfg:
trusted_certs = get_cfg_option_list_or_str(ca_cert_cfg, "trusted")
if trusted_certs:
+ log.debug("adding %d certificates" % len(trusted_certs))
add_ca_certs(trusted_certs)
# Update the system with the new cert configuration.
diff --git a/cloudinit/CloudConfig/cc_chef.py b/cloudinit/CloudConfig/cc_chef.py
index 977fe80f..c9b464b5 100644
--- a/cloudinit/CloudConfig/cc_chef.py
+++ b/cloudinit/CloudConfig/cc_chef.py
@@ -23,9 +23,11 @@ import cloudinit.util as util
ruby_version_default = "1.8"
-def handle(_name,cfg,cloud,log,_args):
+
+def handle(_name, cfg, cloud, log, _args):
# If there isn't a chef key in the configuration don't do anything
- if not cfg.has_key('chef'): return
+ if 'chef' not in cfg:
+ return
chef_cfg = cfg['chef']
# ensure the chef directories we use exist
@@ -35,7 +37,7 @@ def handle(_name,cfg,cloud,log,_args):
# set the validation key based on the presence of either 'validation_key'
# or 'validation_cert'. In the case where both exist, 'validation_key'
# takes precedence
- if chef_cfg.has_key('validation_key') or chef_cfg.has_key('validation_cert'):
+ if ('validation_key' in chef_cfg or 'validation_cert' in chef_cfg):
validation_key = util.get_cfg_option_str(chef_cfg, 'validation_key',
chef_cfg['validation_cert'])
with open('/etc/chef/validation.pem', 'w') as validation_key_fh:
@@ -43,26 +45,28 @@ def handle(_name,cfg,cloud,log,_args):
# create the chef config from template
util.render_to_file('chef_client.rb', '/etc/chef/client.rb',
- {'server_url': chef_cfg['server_url'],
- 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',
- cloud.datasource.get_instance_id()),
- 'environment': util.get_cfg_option_str(chef_cfg, 'environment',
- '_default'),
- 'validation_name': chef_cfg['validation_name']})
+ {'server_url': chef_cfg['server_url'],
+ 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',
+ cloud.datasource.get_instance_id()),
+ 'environment': util.get_cfg_option_str(chef_cfg, 'environment',
+ '_default'),
+ 'validation_name': chef_cfg['validation_name']})
# set the firstboot json
with open('/etc/chef/firstboot.json', 'w') as firstboot_json_fh:
initial_json = {}
- if chef_cfg.has_key('run_list'):
+ if 'run_list' in chef_cfg:
initial_json['run_list'] = chef_cfg['run_list']
- if chef_cfg.has_key('initial_attributes'):
+ if 'initial_attributes' in chef_cfg:
initial_attributes = chef_cfg['initial_attributes']
- for k in initial_attributes.keys(): initial_json[k] = initial_attributes[k]
+ for k in initial_attributes.keys():
+ initial_json[k] = initial_attributes[k]
firstboot_json_fh.write(json.dumps(initial_json))
# If chef is not installed, we install chef based on 'install_type'
if not os.path.isfile('/usr/bin/chef-client'):
- install_type = util.get_cfg_option_str(chef_cfg, 'install_type', 'packages')
+ install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
+ 'packages')
if install_type == "gems":
# this will install and run the chef-client from gems
chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
@@ -71,37 +75,42 @@ def handle(_name,cfg,cloud,log,_args):
install_chef_from_gems(ruby_version, chef_version)
# and finally, run chef-client
log.debug('running chef-client')
- subprocess.check_call(['/usr/bin/chef-client', '-d', '-i', '1800', '-s', '20'])
+ subprocess.check_call(['/usr/bin/chef-client', '-d', '-i', '1800',
+ '-s', '20'])
else:
# this will install and run the chef-client from packages
cc.install_packages(('chef',))
+
def get_ruby_packages(version):
# return a list of packages needed to install ruby at version
- pkgs = [ 'ruby%s' % version, 'ruby%s-dev' % version ]
+ pkgs = ['ruby%s' % version, 'ruby%s-dev' % version]
if version == "1.8":
pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8'))
return(pkgs)
-def install_chef_from_gems(ruby_version, chef_version = None):
+
+def install_chef_from_gems(ruby_version, chef_version=None):
cc.install_packages(get_ruby_packages(ruby_version))
if not os.path.exists('/usr/bin/gem'):
os.symlink('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem')
if not os.path.exists('/usr/bin/ruby'):
os.symlink('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
if chef_version:
- subprocess.check_call(['/usr/bin/gem','install','chef',
+ subprocess.check_call(['/usr/bin/gem', 'install', 'chef',
'-v %s' % chef_version, '--no-ri',
- '--no-rdoc','--bindir','/usr/bin','-q'])
+ '--no-rdoc', '--bindir', '/usr/bin', '-q'])
else:
- subprocess.check_call(['/usr/bin/gem','install','chef',
- '--no-ri','--no-rdoc','--bindir',
- '/usr/bin','-q'])
+ subprocess.check_call(['/usr/bin/gem', 'install', 'chef',
+ '--no-ri', '--no-rdoc', '--bindir',
+ '/usr/bin', '-q'])
+
def ensure_dir(d):
if not os.path.exists(d):
os.makedirs(d)
+
def mkdirs(dirs):
for d in dirs:
ensure_dir(d)
diff --git a/cloudinit/CloudConfig/cc_disable_ec2_metadata.py b/cloudinit/CloudConfig/cc_disable_ec2_metadata.py
index f06d4dfc..7deec324 100644
--- a/cloudinit/CloudConfig/cc_disable_ec2_metadata.py
+++ b/cloudinit/CloudConfig/cc_disable_ec2_metadata.py
@@ -21,7 +21,8 @@ from cloudinit.CloudConfig import per_always
frequency = per_always
-def handle(_name,cfg,_cloud,_log,_args):
+
+def handle(_name, cfg, _cloud, _log, _args):
if util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False):
- fwall="route add -host 169.254.169.254 reject"
+ fwall = "route add -host 169.254.169.254 reject"
subprocess.call(fwall.split(' '))
diff --git a/cloudinit/CloudConfig/cc_final_message.py b/cloudinit/CloudConfig/cc_final_message.py
index c8631d01..63618fd2 100644
--- a/cloudinit/CloudConfig/cc_final_message.py
+++ b/cloudinit/CloudConfig/cc_final_message.py
@@ -24,28 +24,28 @@ frequency = per_always
final_message = "cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds"
-def handle(_name,cfg,_cloud,log,args):
+
+def handle(_name, cfg, _cloud, log, args):
if len(args) != 0:
msg_in = args[0]
else:
- msg_in = util.get_cfg_option_str(cfg,"final_message",final_message)
+ msg_in = util.get_cfg_option_str(cfg, "final_message", final_message)
try:
- uptimef=open("/proc/uptime")
- uptime=uptimef.read().split(" ")[0]
+ uptimef = open("/proc/uptime")
+ uptime = uptimef.read().split(" ")[0]
uptimef.close()
except IOError as e:
log.warn("unable to open /proc/uptime\n")
uptime = "na"
-
try:
- ts = time.strftime("%a, %d %b %Y %H:%M:%S %z",time.gmtime())
+ ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
except:
ts = "na"
try:
- subs = { 'UPTIME' : uptime, 'TIMESTAMP' : ts }
+ subs = {'UPTIME': uptime, 'TIMESTAMP': ts}
sys.stdout.write("%s\n" % util.render_string(msg_in, subs))
except Exception as e:
log.warn("failed to render string to stdout: %s" % e)
diff --git a/cloudinit/CloudConfig/cc_foo.py b/cloudinit/CloudConfig/cc_foo.py
index 48d20e5b..98e2d648 100644
--- a/cloudinit/CloudConfig/cc_foo.py
+++ b/cloudinit/CloudConfig/cc_foo.py
@@ -22,5 +22,6 @@ from cloudinit.CloudConfig import per_instance
frequency = per_instance
-def handle(_name,_cfg,_cloud,_log,_args):
+
+def handle(_name, _cfg, _cloud, _log, _args):
print "hi"
diff --git a/cloudinit/CloudConfig/cc_grub_dpkg.py b/cloudinit/CloudConfig/cc_grub_dpkg.py
index 97d79bdb..69cc96b9 100644
--- a/cloudinit/CloudConfig/cc_grub_dpkg.py
+++ b/cloudinit/CloudConfig/cc_grub_dpkg.py
@@ -20,37 +20,40 @@ import cloudinit.util as util
import traceback
import os
-def handle(_name,cfg,_cloud,log,_args):
-
- idevs=None
- idevs_empty=None
+
+def handle(_name, cfg, _cloud, log, _args):
+ idevs = None
+ idevs_empty = None
if "grub-dpkg" in cfg:
- idevs=util.get_cfg_option_str(cfg["grub-dpkg"],
- "grub-pc/install_devices",None)
- idevs_empty=util.get_cfg_option_str(cfg["grub-dpkg"],
- "grub-pc/install_devices_empty",None)
-
- if (( os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda") ) or
- ( os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda") )):
- if idevs == None: idevs=""
- if idevs_empty == None: idevs_empty="true"
+ idevs = util.get_cfg_option_str(cfg["grub-dpkg"],
+ "grub-pc/install_devices", None)
+ idevs_empty = util.get_cfg_option_str(cfg["grub-dpkg"],
+ "grub-pc/install_devices_empty", None)
+
+ if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or
+ (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))):
+ if idevs == None:
+ idevs = ""
+ if idevs_empty == None:
+ idevs_empty = "true"
else:
- if idevs_empty == None: idevs_empty="false"
+ if idevs_empty == None:
+ idevs_empty = "false"
if idevs == None:
idevs = "/dev/sda"
- for dev in ( "/dev/sda", "/dev/vda", "/dev/sda1", "/dev/vda1"):
+ for dev in ("/dev/sda", "/dev/vda", "/dev/sda1", "/dev/vda1"):
if os.path.exists(dev):
idevs = dev
break
-
+
# now idevs and idevs_empty are set to determined values
# or, those set by user
dconf_sel = "grub-pc grub-pc/install_devices string %s\n" % idevs + \
"grub-pc grub-pc/install_devices_empty boolean %s\n" % idevs_empty
log.debug("setting grub debconf-set-selections with '%s','%s'" %
- (idevs,idevs_empty))
+ (idevs, idevs_empty))
try:
util.subp(('debconf-set-selections'), dconf_sel)
diff --git a/cloudinit/CloudConfig/cc_keys_to_console.py b/cloudinit/CloudConfig/cc_keys_to_console.py
index 08e8f085..941c49de 100644
--- a/cloudinit/CloudConfig/cc_keys_to_console.py
+++ b/cloudinit/CloudConfig/cc_keys_to_console.py
@@ -21,12 +21,15 @@ import subprocess
frequency = per_instance
-def handle(_name,cfg,_cloud,log,_args):
- cmd = [ '/usr/lib/cloud-init/write-ssh-key-fingerprints' ]
- fp_blacklist = util.get_cfg_option_list_or_str(cfg, "ssh_fp_console_blacklist", [])
- key_blacklist = util.get_cfg_option_list_or_str(cfg, "ssh_key_console_blacklist", ["ssh-dss"])
+
+def handle(_name, cfg, _cloud, log, _args):
+ cmd = ['/usr/lib/cloud-init/write-ssh-key-fingerprints']
+ fp_blacklist = util.get_cfg_option_list_or_str(cfg,
+ "ssh_fp_console_blacklist", [])
+ key_blacklist = util.get_cfg_option_list_or_str(cfg,
+ "ssh_key_console_blacklist", ["ssh-dss"])
try:
- confp = open('/dev/console',"wb")
+ confp = open('/dev/console', "wb")
cmd.append(','.join(fp_blacklist))
cmd.append(','.join(key_blacklist))
subprocess.call(cmd, stdout=confp)
diff --git a/cloudinit/CloudConfig/cc_landscape.py b/cloudinit/CloudConfig/cc_landscape.py
index 22a90665..f228d2cf 100644
--- a/cloudinit/CloudConfig/cc_landscape.py
+++ b/cloudinit/CloudConfig/cc_landscape.py
@@ -23,7 +23,7 @@ frequency = per_instance
lsc_client_cfg_file = "/etc/landscape/client.conf"
# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
-lsc_builtincfg = {
+lsc_builtincfg = {
'client': {
'log_level': "info",
'url': "https://landscape.canonical.com/message-system",
@@ -32,14 +32,15 @@ lsc_builtincfg = {
}
}
-def handle(_name,cfg,_cloud,log,_args):
+
+def handle(_name, cfg, _cloud, log, _args):
"""
Basically turn a top level 'landscape' entry with a 'client' dict
and render it to ConfigObj format under '[client]' section in
/etc/landscape/client.conf
"""
- ls_cloudcfg = cfg.get("landscape", { })
+ ls_cloudcfg = cfg.get("landscape", {})
if not isinstance(ls_cloudcfg, dict):
raise(Exception("'landscape' existed in config, but not a dict"))
@@ -51,6 +52,7 @@ def handle(_name,cfg,_cloud,log,_args):
log.debug("updated %s" % lsc_client_cfg_file)
+
def mergeTogether(objs):
"""
merge together ConfigObj objects or things that ConfigObj() will take in
diff --git a/cloudinit/CloudConfig/cc_locale.py b/cloudinit/CloudConfig/cc_locale.py
index 8e91d3bf..9129ca30 100644
--- a/cloudinit/CloudConfig/cc_locale.py
+++ b/cloudinit/CloudConfig/cc_locale.py
@@ -20,24 +20,27 @@ import os.path
import subprocess
import traceback
+
def apply_locale(locale, cfgfile):
if os.path.exists('/usr/sbin/locale-gen'):
subprocess.Popen(['locale-gen', locale]).communicate()
if os.path.exists('/usr/sbin/update-locale'):
subprocess.Popen(['update-locale', locale]).communicate()
- util.render_to_file('default-locale', cfgfile, { 'locale' : locale })
+ util.render_to_file('default-locale', cfgfile, {'locale': locale})
+
-def handle(_name,cfg,cloud,log,args):
+def handle(_name, cfg, cloud, log, args):
if len(args) != 0:
locale = args[0]
else:
- locale = util.get_cfg_option_str(cfg,"locale",cloud.get_locale())
+ locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale())
locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile",
"/etc/default/locale")
- if not locale: return
+ if not locale:
+ return
log.debug("setting locale to %s" % locale)
diff --git a/cloudinit/CloudConfig/cc_mcollective.py b/cloudinit/CloudConfig/cc_mcollective.py
index 38fe4a3c..2b8b2f96 100644
--- a/cloudinit/CloudConfig/cc_mcollective.py
+++ b/cloudinit/CloudConfig/cc_mcollective.py
@@ -27,49 +27,59 @@ import cloudinit.util as util
pubcert_file = "/etc/mcollective/ssl/server-public.pem"
pricert_file = "/etc/mcollective/ssl/server-private.pem"
+
# Our fake header section
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[nullsection]\n'
+
def readline(self):
if self.sechead:
- try: return self.sechead
- finally: self.sechead = None
- else: return self.fp.readline()
+ try:
+ return self.sechead
+ finally:
+ self.sechead = None
+ else:
+ return self.fp.readline()
-def handle(_name,cfg,_cloud,_log,_args):
+
+def handle(_name, cfg, _cloud, _log, _args):
# If there isn't a mcollective key in the configuration don't do anything
- if not cfg.has_key('mcollective'): return
+ if 'mcollective' not in cfg:
+ return
mcollective_cfg = cfg['mcollective']
# Start by installing the mcollective package ...
cc.install_packages(("mcollective",))
# ... and then update the mcollective configuration
- if mcollective_cfg.has_key('conf'):
+ if 'conf' in mcollective_cfg:
# Create object for reading server.cfg values
mcollective_config = ConfigParser.ConfigParser()
- # Read server.cfg values from original file in order to be able to mix the rest up
- mcollective_config.readfp(FakeSecHead(open('/etc/mcollective/server.cfg')))
+ # Read server.cfg values from original file in order to be able to mix
+ # the rest up
+ mcollective_config.readfp(FakeSecHead(open('/etc/mcollective/'
+ 'server.cfg')))
for cfg_name, cfg in mcollective_cfg['conf'].iteritems():
if cfg_name == 'public-cert':
util.write_file(pubcert_file, cfg, mode=0644)
mcollective_config.set(cfg_name,
'plugin.ssl_server_public', pubcert_file)
- mcollective_config.set(cfg_name,'securityprovider','ssl')
+ mcollective_config.set(cfg_name, 'securityprovider', 'ssl')
elif cfg_name == 'private-cert':
util.write_file(pricert_file, cfg, mode=0600)
mcollective_config.set(cfg_name,
'plugin.ssl_server_private', pricert_file)
- mcollective_config.set(cfg_name,'securityprovider','ssl')
+ mcollective_config.set(cfg_name, 'securityprovider', 'ssl')
else:
# Iterate throug the config items, we'll use ConfigParser.set
# to overwrite or create new items as needed
for o, v in cfg.iteritems():
- mcollective_config.set(cfg_name,o,v)
+ mcollective_config.set(cfg_name, o, v)
# We got all our config as wanted we'll rename
# the previous server.cfg and create our new one
- os.rename('/etc/mcollective/server.cfg','/etc/mcollective/server.cfg.old')
+ os.rename('/etc/mcollective/server.cfg',
+ '/etc/mcollective/server.cfg.old')
outputfile = StringIO.StringIO()
mcollective_config.write(outputfile)
# Now we got the whole file, write to disk except first line
@@ -79,9 +89,9 @@ def handle(_name,cfg,_cloud,_log,_args):
# search and replace of '=' with ':' could be problematic though.
# this most likely needs fixing.
util.write_file('/etc/mcollective/server.cfg',
- outputfile.getvalue().replace('[nullsection]\n','').replace(' =',':'),
+ outputfile.getvalue().replace('[nullsection]\n', '').replace(' =',
+ ':'),
mode=0644)
# Start mcollective
subprocess.check_call(['service', 'mcollective', 'start'])
-
diff --git a/cloudinit/CloudConfig/cc_mounts.py b/cloudinit/CloudConfig/cc_mounts.py
index a3036d5a..2fa57362 100644
--- a/cloudinit/CloudConfig/cc_mounts.py
+++ b/cloudinit/CloudConfig/cc_mounts.py
@@ -18,30 +18,32 @@
import cloudinit.util as util
import os
import re
-import string
+from string import whitespace # pylint: disable=W0402
+
def is_mdname(name):
# return true if this is a metadata service name
- if name in [ "ami", "root", "swap" ]:
+ if name in ["ami", "root", "swap"]:
return True
# names 'ephemeral0' or 'ephemeral1'
# 'ebs[0-9]' appears when '--block-device-mapping sdf=snap-d4d90bbc'
- for enumname in ( "ephemeral", "ebs" ):
+ for enumname in ("ephemeral", "ebs"):
if name.startswith(enumname) and name.find(":") == -1:
return True
return False
-def handle(_name,cfg,cloud,log,_args):
+
+def handle(_name, cfg, cloud, log, _args):
# fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno
- defvals = [ None, None, "auto", "defaults,nobootwait", "0", "2" ]
+ defvals = [None, None, "auto", "defaults,nobootwait", "0", "2"]
defvals = cfg.get("mount_default_fields", defvals)
# these are our default set of mounts
- defmnts = [ [ "ephemeral0", "/mnt", "auto", defvals[3], "0", "2" ],
- [ "swap", "none", "swap", "sw", "0", "0" ] ]
+ defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"],
+ ["swap", "none", "swap", "sw", "0", "0"]]
- cfgmnt = [ ]
- if cfg.has_key("mounts"):
+ cfgmnt = []
+ if "mounts" in cfg:
cfgmnt = cfg["mounts"]
# shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1
@@ -50,7 +52,8 @@ def handle(_name,cfg,cloud,log,_args):
for i in range(len(cfgmnt)):
# skip something that wasn't a list
- if not isinstance(cfgmnt[i],list): continue
+ if not isinstance(cfgmnt[i], list):
+ continue
# workaround, allow user to specify 'ephemeral'
# rather than more ec2 correct 'ephemeral0'
@@ -75,7 +78,7 @@ def handle(_name,cfg,cloud,log,_args):
# but do not convert None to 'None' (LP: #898365)
for j in range(len(cfgmnt[i])):
if isinstance(cfgmnt[i][j], int):
- cfgmnt[i][j]=str(cfgmnt[i][j])
+ cfgmnt[i][j] = str(cfgmnt[i][j])
for i in range(len(cfgmnt)):
# fill in values with defaults from defvals above
@@ -93,12 +96,12 @@ def handle(_name,cfg,cloud,log,_args):
if cfgmnt[j][0] == cfgmnt[i][0]:
cfgmnt[j][1] = None
-
# for each of the "default" mounts, add them only if no other
# entry has the same device name
for defmnt in defmnts:
devname = cloud.device_name_to_device(defmnt[0])
- if devname is None: continue
+ if devname is None:
+ continue
if devname.startswith("/"):
defmnt[0] = devname
else:
@@ -109,54 +112,65 @@ def handle(_name,cfg,cloud,log,_args):
if cfgm[0] == defmnt[0]:
cfgmnt_has = True
break
-
- if cfgmnt_has: continue
- cfgmnt.append(defmnt)
+ if cfgmnt_has:
+ continue
+ cfgmnt.append(defmnt)
# now, each entry in the cfgmnt list has all fstab values
# if the second field is None (not the string, the value) we skip it
actlist = [x for x in cfgmnt if x[1] is not None]
- if len(actlist) == 0: return
+ if len(actlist) == 0:
+ return
- comment="comment=cloudconfig"
- cc_lines = [ ]
+ comment = "comment=cloudconfig"
+ cc_lines = []
needswap = False
- dirs = [ ]
+ dirs = []
for line in actlist:
# write 'comment' in the fs_mntops, entry, claiming this
- line[3]="%s,comment=cloudconfig" % line[3]
- if line[2] == "swap": needswap = True
- if line[1].startswith("/"): dirs.append(line[1])
+ line[3] = "%s,comment=cloudconfig" % line[3]
+ if line[2] == "swap":
+ needswap = True
+ if line[1].startswith("/"):
+ dirs.append(line[1])
cc_lines.append('\t'.join(line))
- fstab_lines = [ ]
- fstab=open("/etc/fstab","r+")
- ws = re.compile("[%s]+" % string.whitespace)
+ fstab_lines = []
+ fstab = open("/etc/fstab", "r+")
+ ws = re.compile("[%s]+" % whitespace)
for line in fstab.read().splitlines():
try:
toks = ws.split(line)
- if toks[3].find(comment) != -1: continue
+ if toks[3].find(comment) != -1:
+ continue
except:
pass
fstab_lines.append(line)
fstab_lines.extend(cc_lines)
-
+
fstab.seek(0)
fstab.write("%s\n" % '\n'.join(fstab_lines))
fstab.truncate()
fstab.close()
if needswap:
- try: util.subp(("swapon", "-a"))
- except: log.warn("Failed to enable swap")
+ try:
+ util.subp(("swapon", "-a"))
+ except:
+ log.warn("Failed to enable swap")
for d in dirs:
- if os.path.exists(d): continue
- try: os.makedirs(d)
- except: log.warn("Failed to make '%s' config-mount\n",d)
+ if os.path.exists(d):
+ continue
+ try:
+ os.makedirs(d)
+ except:
+ log.warn("Failed to make '%s' config-mount\n", d)
- try: util.subp(("mount","-a"))
- except: log.warn("'mount -a' failed")
+ try:
+ util.subp(("mount", "-a"))
+ except:
+ log.warn("'mount -a' failed")
diff --git a/cloudinit/CloudConfig/cc_phone_home.py b/cloudinit/CloudConfig/cc_phone_home.py
index 7897d31b..73066444 100644
--- a/cloudinit/CloudConfig/cc_phone_home.py
+++ b/cloudinit/CloudConfig/cc_phone_home.py
@@ -20,7 +20,9 @@ import cloudinit.util as util
from time import sleep
frequency = per_instance
-post_list_all = [ 'pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa', 'instance_id', 'hostname' ]
+post_list_all = ['pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa', 'instance_id',
+ 'hostname']
+
# phone_home:
# url: http://my.foo.bar/$INSTANCE/
@@ -30,12 +32,13 @@ post_list_all = [ 'pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa', 'instance_id',
# phone_home:
# url: http://my.foo.bar/$INSTANCE_ID/
# post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id
-#
-def handle(_name,cfg,cloud,log,args):
+#
+def handle(_name, cfg, cloud, log, args):
if len(args) != 0:
- ph_cfg = util.readconf(args[0])
+ ph_cfg = util.read_conf(args[0])
else:
- if not 'phone_home' in cfg: return
+ if not 'phone_home' in cfg:
+ return
ph_cfg = cfg['phone_home']
if 'url' not in ph_cfg:
@@ -44,7 +47,7 @@ def handle(_name,cfg,cloud,log,args):
url = ph_cfg['url']
post_list = ph_cfg.get('post', 'all')
- tries = ph_cfg.get('tries',10)
+ tries = ph_cfg.get('tries', 10)
try:
tries = int(tries)
except:
@@ -54,7 +57,7 @@ def handle(_name,cfg,cloud,log,args):
if post_list == "all":
post_list = post_list_all
- all_keys = { }
+ all_keys = {}
all_keys['instance_id'] = cloud.get_instance_id()
all_keys['hostname'] = cloud.get_hostname()
@@ -72,7 +75,7 @@ def handle(_name,cfg,cloud,log,args):
except:
log.warn("%s: failed to open in phone_home" % path)
- submit_keys = { }
+ submit_keys = {}
for k in post_list:
if k in all_keys:
submit_keys[k] = all_keys[k]
@@ -80,20 +83,22 @@ def handle(_name,cfg,cloud,log,args):
submit_keys[k] = "N/A"
log.warn("requested key %s from 'post' list not available")
- url = util.render_string(url, { 'INSTANCE_ID' : all_keys['instance_id'] })
+ url = util.render_string(url, {'INSTANCE_ID': all_keys['instance_id']})
- last_e = None
- for i in range(0,tries):
+ null_exc = object()
+ last_e = null_exc
+ for i in range(0, tries):
try:
util.readurl(url, submit_keys)
- log.debug("succeeded submit to %s on try %i" % (url, i+1))
+ log.debug("succeeded submit to %s on try %i" % (url, i + 1))
return
except Exception as e:
- log.debug("failed to post to %s on try %i" % (url, i+1))
+ log.debug("failed to post to %s on try %i" % (url, i + 1))
last_e = e
sleep(3)
log.warn("failed to post to %s in %i tries" % (url, tries))
- if last_e: raise(last_e)
-
+ if last_e is not null_exc:
+ raise(last_e)
+
return
diff --git a/cloudinit/CloudConfig/cc_puppet.py b/cloudinit/CloudConfig/cc_puppet.py
index 3748559a..6db1ed5c 100644
--- a/cloudinit/CloudConfig/cc_puppet.py
+++ b/cloudinit/CloudConfig/cc_puppet.py
@@ -25,21 +25,25 @@ import ConfigParser
import cloudinit.CloudConfig as cc
import cloudinit.util as util
-def handle(_name,cfg,cloud,log,_args):
+
+def handle(_name, cfg, cloud, log, _args):
# If there isn't a puppet key in the configuration don't do anything
- if not cfg.has_key('puppet'): return
+ if 'puppet' not in cfg:
+ return
puppet_cfg = cfg['puppet']
# Start by installing the puppet package ...
cc.install_packages(("puppet",))
# ... and then update the puppet configuration
- if puppet_cfg.has_key('conf'):
+ if 'conf' in puppet_cfg:
# Add all sections from the conf object to puppet.conf
puppet_conf_fh = open('/etc/puppet/puppet.conf', 'r')
# Create object for reading puppet.conf values
puppet_config = ConfigParser.ConfigParser()
- # Read puppet.conf values from original file in order to be able to mix the rest up
- puppet_config.readfp(StringIO.StringIO(''.join(i.lstrip() for i in puppet_conf_fh.readlines())))
+ # Read puppet.conf values from original file in order to be able to
+ # mix the rest up
+ puppet_config.readfp(StringIO.StringIO(''.join(i.lstrip() for i in
+ puppet_conf_fh.readlines())))
# Close original file, no longer needed
puppet_conf_fh.close()
for cfg_name, cfg in puppet_cfg['conf'].iteritems():
@@ -63,7 +67,8 @@ def handle(_name,cfg,cloud,log,_args):
util.restorecon_if_possible('/var/lib/puppet', recursive=True)
else:
#puppet_conf_fh.write("\n[%s]\n" % (cfg_name))
- # If puppet.conf already has this section we don't want to write it again
+ # If puppet.conf already has this section we don't want to
+ # write it again
if puppet_config.has_section(cfg_name) == False:
puppet_config.add_section(cfg_name)
# Iterate throug the config items, we'll use ConfigParser.set
@@ -77,11 +82,11 @@ def handle(_name,cfg,cloud,log,_args):
cloud.datasource.get_instance_id())
# certname needs to be downcase
v = v.lower()
- puppet_config.set(cfg_name,o,v)
+ puppet_config.set(cfg_name, o, v)
#puppet_conf_fh.write("%s=%s\n" % (o, v))
# We got all our config as wanted we'll rename
# the previous puppet.conf and create our new one
- os.rename('/etc/puppet/puppet.conf','/etc/puppet/puppet.conf.old')
+ os.rename('/etc/puppet/puppet.conf', '/etc/puppet/puppet.conf.old')
with open('/etc/puppet/puppet.conf', 'wb') as configfile:
puppet_config.write(configfile)
util.restorecon_if_possible('/etc/puppet/puppet.conf')
@@ -98,4 +103,3 @@ def handle(_name,cfg,cloud,log,_args):
log.warn("Do not know how to enable puppet service on this system")
# Start puppetd
subprocess.check_call(['service', 'puppet', 'start'])
-
diff --git a/cloudinit/CloudConfig/cc_resizefs.py b/cloudinit/CloudConfig/cc_resizefs.py
index adec70be..f29f886d 100644
--- a/cloudinit/CloudConfig/cc_resizefs.py
+++ b/cloudinit/CloudConfig/cc_resizefs.py
@@ -25,24 +25,26 @@ from cloudinit.CloudConfig import per_always
frequency = per_always
-def handle(_name,cfg,_cloud,log,args):
+
+def handle(_name, cfg, _cloud, log, args):
if len(args) != 0:
resize_root = False
- if str(args[0]).lower() in [ 'true', '1', 'on', 'yes']:
+ if str(args[0]).lower() in ['true', '1', 'on', 'yes']:
resize_root = True
else:
- resize_root = util.get_cfg_option_bool(cfg,"resize_rootfs",True)
+ resize_root = util.get_cfg_option_bool(cfg, "resize_rootfs", True)
- if not resize_root: return
+ if not resize_root:
+ return
# this really only uses the filename from mktemp, then we mknod into it
(fd, devpth) = tempfile.mkstemp()
os.unlink(devpth)
os.close(fd)
-
+
try:
- st_dev=os.stat("/").st_dev
- dev=os.makedev(os.major(st_dev),os.minor(st_dev))
+ st_dev = os.stat("/").st_dev
+ dev = os.makedev(os.major(st_dev), os.minor(st_dev))
os.mknod(devpth, 0400 | stat.S_IFBLK, dev)
except:
if util.islxc():
@@ -51,9 +53,9 @@ def handle(_name,cfg,_cloud,log,args):
log.warn("Failed to make device node to resize /")
raise
- cmd = [ 'blkid', '-c', '/dev/null', '-sTYPE', '-ovalue', devpth ]
+ cmd = ['blkid', '-c', '/dev/null', '-sTYPE', '-ovalue', devpth]
try:
- (fstype,_err) = util.subp(cmd)
+ (fstype, _err) = util.subp(cmd)
except subprocess.CalledProcessError as e:
log.warn("Failed to get filesystem type of maj=%s, min=%s via: %s" %
(os.major(st_dev), os.minor(st_dev), cmd))
@@ -61,13 +63,13 @@ def handle(_name,cfg,_cloud,log,args):
os.unlink(devpth)
raise
- log.debug("resizing root filesystem (type=%s, maj=%i, min=%i)" %
- (fstype.rstrip("\n"), os.major(st_dev), os.minor(st_dev)))
+ log.debug("resizing root filesystem (type=%s, maj=%i, min=%i)" %
+ (str(fstype).rstrip("\n"), os.major(st_dev), os.minor(st_dev)))
- if fstype.startswith("ext"):
- resize_cmd = [ 'resize2fs', devpth ]
+ if str(fstype).startswith("ext"):
+ resize_cmd = ['resize2fs', devpth]
elif fstype == "xfs":
- resize_cmd = [ 'xfs_growfs', devpth ]
+ resize_cmd = ['xfs_growfs', devpth]
else:
os.unlink(devpth)
log.debug("not resizing unknown filesystem %s" % fstype)
diff --git a/cloudinit/CloudConfig/cc_rightscale_userdata.py b/cloudinit/CloudConfig/cc_rightscale_userdata.py
index 2b43023c..d6e93aa3 100644
--- a/cloudinit/CloudConfig/cc_rightscale_userdata.py
+++ b/cloudinit/CloudConfig/cc_rightscale_userdata.py
@@ -24,9 +24,9 @@
## for cloud-init support, there will be a key named
## 'CLOUD_INIT_REMOTE_HOOK'.
##
-## This cloud-config module will
+## This cloud-config module will
## - read the blob of data from raw user data, and parse it as key/value
-## - for each key that is found, download the content to
+## - for each key that is found, download the content to
## the local instance/scripts directory and set them executable.
## - the files in that directory will be run by the user-scripts module
## Therefore, this must run before that.
@@ -42,7 +42,8 @@ frequency = per_instance
my_name = "cc_rightscale_userdata"
my_hookname = 'CLOUD_INIT_REMOTE_HOOK'
-def handle(_name,_cfg,cloud,log,_args):
+
+def handle(_name, _cfg, cloud, log, _args):
try:
ud = cloud.get_userdata_raw()
except:
@@ -51,7 +52,8 @@ def handle(_name,_cfg,cloud,log,_args):
try:
mdict = parse_qs(ud)
- if not my_hookname in mdict: return
+ if not my_hookname in mdict:
+ return
except:
log.warn("failed to urlparse.parse_qa(userdata_raw())")
raise
@@ -60,14 +62,15 @@ def handle(_name,_cfg,cloud,log,_args):
i = 0
first_e = None
for url in mdict[my_hookname]:
- fname = "%s/rightscale-%02i" % (scripts_d,i)
- i = i +1
+ fname = "%s/rightscale-%02i" % (scripts_d, i)
+ i = i + 1
try:
content = util.readurl(url)
util.write_file(fname, content, mode=0700)
except Exception as e:
- if not first_e: first_e = None
+ if not first_e:
+ first_e = None
log.warn("%s failed to read %s: %s" % (my_name, url, e))
-
+
if first_e:
raise(e)
diff --git a/cloudinit/CloudConfig/cc_rsyslog.py b/cloudinit/CloudConfig/cc_rsyslog.py
index ab85a6d8..552597a5 100644
--- a/cloudinit/CloudConfig/cc_rsyslog.py
+++ b/cloudinit/CloudConfig/cc_rsyslog.py
@@ -24,7 +24,8 @@ import traceback
DEF_FILENAME = "20-cloud-config.conf"
DEF_DIR = "/etc/rsyslog.d"
-def handle(_name,cfg,_cloud,log,_args):
+
+def handle(_name, cfg, _cloud, log, _args):
# rsyslog:
# - "*.* @@192.158.1.1"
# - content: "*.* @@192.0.2.1:10514"
@@ -33,15 +34,16 @@ def handle(_name,cfg,_cloud,log,_args):
# *.* @@syslogd.example.com
# process 'rsyslog'
- if not 'rsyslog' in cfg: return
+ if not 'rsyslog' in cfg:
+ return
def_dir = cfg.get('rsyslog_dir', DEF_DIR)
def_fname = cfg.get('rsyslog_filename', DEF_FILENAME)
- files = [ ]
- elst = [ ]
+ files = []
+ elst = []
for ent in cfg['rsyslog']:
- if isinstance(ent,dict):
+ if isinstance(ent, dict):
if not "content" in ent:
elst.append((ent, "no 'content' entry"))
continue
@@ -52,7 +54,7 @@ def handle(_name,cfg,_cloud,log,_args):
filename = def_fname
if not filename.startswith("/"):
- filename = "%s/%s" % (def_dir,filename)
+ filename = "%s/%s" % (def_dir, filename)
omode = "ab"
# truncate filename first time you see it
@@ -69,7 +71,7 @@ def handle(_name,cfg,_cloud,log,_args):
# need to restart syslogd
restarted = False
try:
- # if this config module is running at cloud-init time
+ # if this config module is running at cloud-init time
# (before rsyslog is running) we don't actually have to
# restart syslog.
#
@@ -83,7 +85,7 @@ def handle(_name,cfg,_cloud,log,_args):
except Exception as e:
elst.append(("restart", str(e)))
-
+
if restarted:
# this only needs to run if we *actually* restarted
# syslog above.
diff --git a/cloudinit/CloudConfig/cc_runcmd.py b/cloudinit/CloudConfig/cc_runcmd.py
index d255223b..cb297568 100644
--- a/cloudinit/CloudConfig/cc_runcmd.py
+++ b/cloudinit/CloudConfig/cc_runcmd.py
@@ -18,12 +18,13 @@
import cloudinit.util as util
-def handle(_name,cfg,cloud,log,_args):
- if not cfg.has_key("runcmd"):
+
+def handle(_name, cfg, cloud, log, _args):
+ if "runcmd" not in cfg:
return
- outfile="%s/runcmd" % cloud.get_ipath('scripts')
+ outfile = "%s/runcmd" % cloud.get_ipath('scripts')
try:
content = util.shellify(cfg["runcmd"])
- util.write_file(outfile,content,0700)
+ util.write_file(outfile, content, 0700)
except:
log.warn("failed to open %s for runcmd" % outfile)
diff --git a/cloudinit/CloudConfig/cc_scripts_per_boot.py b/cloudinit/CloudConfig/cc_scripts_per_boot.py
index fb643c6d..2eb77c18 100644
--- a/cloudinit/CloudConfig/cc_scripts_per_boot.py
+++ b/cloudinit/CloudConfig/cc_scripts_per_boot.py
@@ -23,7 +23,8 @@ from cloudinit import get_cpath
frequency = per_always
runparts_path = "%s/%s" % (get_cpath(), "scripts/per-boot")
-def handle(_name,_cfg,_cloud,log,_args):
+
+def handle(_name, _cfg, _cloud, log, _args):
try:
util.runparts(runparts_path)
except:
diff --git a/cloudinit/CloudConfig/cc_scripts_per_instance.py b/cloudinit/CloudConfig/cc_scripts_per_instance.py
index b0f0601a..0141c977 100644
--- a/cloudinit/CloudConfig/cc_scripts_per_instance.py
+++ b/cloudinit/CloudConfig/cc_scripts_per_instance.py
@@ -23,7 +23,8 @@ from cloudinit import get_cpath
frequency = per_instance
runparts_path = "%s/%s" % (get_cpath(), "scripts/per-instance")
-def handle(_name,_cfg,_cloud,log,_args):
+
+def handle(_name, _cfg, _cloud, log, _args):
try:
util.runparts(runparts_path)
except:
diff --git a/cloudinit/CloudConfig/cc_scripts_per_once.py b/cloudinit/CloudConfig/cc_scripts_per_once.py
index 2ab81840..bbf77dfb 100644
--- a/cloudinit/CloudConfig/cc_scripts_per_once.py
+++ b/cloudinit/CloudConfig/cc_scripts_per_once.py
@@ -23,7 +23,8 @@ from cloudinit import get_cpath
frequency = per_once
runparts_path = "%s/%s" % (get_cpath(), "scripts/per-once")
-def handle(_name,_cfg,_cloud,log,_args):
+
+def handle(_name, _cfg, _cloud, log, _args):
try:
util.runparts(runparts_path)
except:
diff --git a/cloudinit/CloudConfig/cc_scripts_user.py b/cloudinit/CloudConfig/cc_scripts_user.py
index 9c7f2322..949b4198 100644
--- a/cloudinit/CloudConfig/cc_scripts_user.py
+++ b/cloudinit/CloudConfig/cc_scripts_user.py
@@ -23,7 +23,8 @@ from cloudinit import get_ipath_cur
frequency = per_instance
runparts_path = "%s/%s" % (get_ipath_cur(), "scripts")
-def handle(_name,_cfg,_cloud,log,_args):
+
+def handle(_name, _cfg, _cloud, log, _args):
try:
util.runparts(runparts_path)
except:
diff --git a/cloudinit/CloudConfig/cc_set_hostname.py b/cloudinit/CloudConfig/cc_set_hostname.py
index 4f19b0c8..0b1c8924 100644
--- a/cloudinit/CloudConfig/cc_set_hostname.py
+++ b/cloudinit/CloudConfig/cc_set_hostname.py
@@ -18,12 +18,13 @@
import cloudinit.util as util
-def handle(_name,cfg,cloud,log,_args):
- if util.get_cfg_option_bool(cfg,"preserve_hostname",False):
+
+def handle(_name, cfg, cloud, log, _args):
+ if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
log.debug("preserve_hostname is set. not setting hostname")
return(True)
- ( hostname, _fqdn ) = util.get_hostname_fqdn(cfg, cloud)
+ (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud)
try:
set_hostname(hostname, log)
except Exception:
@@ -32,7 +33,8 @@ def handle(_name,cfg,cloud,log,_args):
return(True)
+
def set_hostname(hostname, log):
util.subp(['hostname', hostname])
- util.write_file("/etc/hostname","%s\n" % hostname, 0644)
+ util.write_file("/etc/hostname", "%s\n" % hostname, 0644)
log.debug("populated /etc/hostname with %s on first boot", hostname)
diff --git a/cloudinit/CloudConfig/cc_set_passwords.py b/cloudinit/CloudConfig/cc_set_passwords.py
index 07e3ca1b..f40544b3 100644
--- a/cloudinit/CloudConfig/cc_set_passwords.py
+++ b/cloudinit/CloudConfig/cc_set_passwords.py
@@ -19,16 +19,17 @@
import cloudinit.util as util
import sys
import random
-import string
+from string import letters, digits # pylint: disable=W0402
-def handle(_name,cfg,_cloud,log,args):
+
+def handle(_name, cfg, _cloud, log, args):
if len(args) != 0:
# if run from command line, and give args, wipe the chpasswd['list']
password = args[0]
if 'chpasswd' in cfg and 'list' in cfg['chpasswd']:
del cfg['chpasswd']['list']
else:
- password = util.get_cfg_option_str(cfg,"password",None)
+ password = util.get_cfg_option_str(cfg, "password", None)
expire = True
pw_auth = "no"
@@ -37,11 +38,11 @@ def handle(_name,cfg,_cloud,log,args):
if 'chpasswd' in cfg:
chfg = cfg['chpasswd']
- plist = util.get_cfg_option_str(chfg,'list',plist)
- expire = util.get_cfg_option_bool(chfg,'expire', expire)
+ plist = util.get_cfg_option_str(chfg, 'list', plist)
+ expire = util.get_cfg_option_bool(chfg, 'expire', expire)
if not plist and password:
- user = util.get_cfg_option_str(cfg,"user","ubuntu")
+ user = util.get_cfg_option_str(cfg, "user", "ubuntu")
plist = "%s:%s" % (user, password)
errors = []
@@ -50,13 +51,13 @@ def handle(_name,cfg,_cloud,log,args):
randlist = []
users = []
for line in plist.splitlines():
- u,p = line.split(':',1)
+ u, p = line.split(':', 1)
if p == "R" or p == "RANDOM":
p = rand_user_password()
- randlist.append("%s:%s" % (u,p))
- plist_in.append("%s:%s" % (u,p))
+ randlist.append("%s:%s" % (u, p))
+ plist_in.append("%s:%s" % (u, p))
users.append(u)
-
+
ch_in = '\n'.join(plist_in)
try:
util.subp(['chpasswd'], ch_in)
@@ -67,36 +68,36 @@ def handle(_name,cfg,_cloud,log,args):
if len(randlist):
sys.stdout.write("%s\n%s\n" % ("Set the following passwords\n",
- '\n'.join(randlist) ))
+ '\n'.join(randlist)))
if expire:
- enum=len(errors)
+ enum = len(errors)
for u in users:
try:
util.subp(['passwd', '--expire', u])
except Exception as e:
errors.append(e)
- log.warn("failed to expire account for %s" % u )
+ log.warn("failed to expire account for %s" % u)
if enum == len(errors):
log.debug("expired passwords for: %s" % u)
if 'ssh_pwauth' in cfg:
val = str(cfg['ssh_pwauth']).lower()
- if val in ( "true", "1", "yes"):
- pw_auth="yes"
- change_pwauth=True
- elif val in ( "false", "0", "no"):
- pw_auth="no"
- change_pwauth=True
+ if val in ("true", "1", "yes"):
+ pw_auth = "yes"
+ change_pwauth = True
+ elif val in ("false", "0", "no"):
+ pw_auth = "no"
+ change_pwauth = True
else:
- change_pwauth=False
-
+ change_pwauth = False
+
if change_pwauth:
pa_s = "\(#*\)\(PasswordAuthentication[[:space:]]\+\)\(yes\|no\)"
msg = "set PasswordAuthentication to '%s'" % pw_auth
try:
- cmd = [ 'sed', '-i', 's,%s,\\2%s,' % (pa_s, pw_auth),
- '/etc/ssh/sshd_config' ]
+ cmd = ['sed', '-i', 's,%s,\\2%s,' % (pa_s, pw_auth),
+ '/etc/ssh/sshd_config']
util.subp(cmd)
log.debug(msg)
except Exception as e:
@@ -104,7 +105,8 @@ def handle(_name,cfg,_cloud,log,args):
errors.append(e)
try:
- p = util.subp(['service', cfg.get('ssh_svcname', 'ssh'), 'restart'])
+ p = util.subp(['service', cfg.get('ssh_svcname', 'ssh'),
+ 'restart'])
log.debug("restarted sshd")
except:
log.warn("restart of ssh failed")
@@ -114,11 +116,12 @@ def handle(_name,cfg,_cloud,log,args):
return
-def rand_str(strlen=32, select_from=string.letters+string.digits):
+
+def rand_str(strlen=32, select_from=letters + digits):
return("".join([random.choice(select_from) for _x in range(0, strlen)]))
-def rand_user_password(pwlen=9):
- selfrom=(string.letters.translate(None,'loLOI') +
- string.digits.translate(None,'01'))
- return(rand_str(pwlen,select_from=selfrom))
+def rand_user_password(pwlen=9):
+ selfrom = (letters.translate(None, 'loLOI') +
+ digits.translate(None, '01'))
+ return(rand_str(pwlen, select_from=selfrom))
diff --git a/cloudinit/CloudConfig/cc_ssh.py b/cloudinit/CloudConfig/cc_ssh.py
index 0aad2187..cdf90bdc 100644
--- a/cloudinit/CloudConfig/cc_ssh.py
+++ b/cloudinit/CloudConfig/cc_ssh.py
@@ -21,49 +21,50 @@ import os
import glob
import subprocess
-DISABLE_ROOT_OPTS="no-port-forwarding,no-agent-forwarding,no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" rather than the user \\\"root\\\".\';echo;sleep 10\""
+DISABLE_ROOT_OPTS = "no-port-forwarding,no-agent-forwarding," \
+"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" " \
+"rather than the user \\\"root\\\".\';echo;sleep 10\""
-global_log = None
-
-def handle(_name,cfg,cloud,log,_args):
- global global_log
- global_log = log
+def handle(_name, cfg, cloud, log, _args):
# remove the static keys from the pristine image
if cfg.get("ssh_deletekeys", True):
for f in glob.glob("/etc/ssh/ssh_host_*key*"):
- try: os.unlink(f)
- except: pass
+ try:
+ os.unlink(f)
+ except:
+ pass
- if cfg.has_key("ssh_keys"):
+ if "ssh_keys" in cfg:
# if there are keys in cloud-config, use them
key2file = {
- "rsa_private" : ("/etc/ssh/ssh_host_rsa_key", 0600),
- "rsa_public" : ("/etc/ssh/ssh_host_rsa_key.pub", 0644),
- "dsa_private" : ("/etc/ssh/ssh_host_dsa_key", 0600),
- "dsa_public" : ("/etc/ssh/ssh_host_dsa_key.pub", 0644),
- "ecdsa_private" : ("/etc/ssh/ssh_host_ecdsa_key", 0600),
- "ecdsa_public" : ("/etc/ssh/ssh_host_ecdsa_key.pub", 0644),
+ "rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0600),
+ "rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0644),
+ "dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0600),
+ "dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0644),
+ "ecdsa_private": ("/etc/ssh/ssh_host_ecdsa_key", 0600),
+ "ecdsa_public": ("/etc/ssh/ssh_host_ecdsa_key.pub", 0644),
}
- for key,val in cfg["ssh_keys"].items():
- if key2file.has_key(key):
- util.write_file(key2file[key][0],val,key2file[key][1])
+ for key, val in cfg["ssh_keys"].items():
+ if key in key2file:
+ util.write_file(key2file[key][0], val, key2file[key][1])
- priv2pub = { 'rsa_private':'rsa_public', 'dsa_private':'dsa_public',
- 'ecdsa_private': 'ecdsa_public', }
+ priv2pub = {'rsa_private': 'rsa_public', 'dsa_private': 'dsa_public',
+ 'ecdsa_private': 'ecdsa_public', }
cmd = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
- for priv,pub in priv2pub.iteritems():
- if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']: continue
- pair=(key2file[priv][0], key2file[pub][0])
+ for priv, pub in priv2pub.iteritems():
+ if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']:
+ continue
+ pair = (key2file[priv][0], key2file[pub][0])
subprocess.call(('sh', '-xc', cmd % pair))
log.debug("generated %s from %s" % pair)
else:
# if not, generate them
for keytype in util.get_cfg_option_list_or_str(cfg, 'ssh_genkeytypes',
- ['rsa', 'dsa', 'ecdsa']):
+ ['rsa', 'dsa', 'ecdsa']):
keyfile = '/etc/ssh/ssh_host_%s_key' % keytype
if not os.path.exists(keyfile):
subprocess.call(['ssh-keygen', '-t', keytype, '-N', '',
@@ -72,30 +73,31 @@ def handle(_name,cfg,cloud,log,_args):
util.restorecon_if_possible('/etc/ssh', recursive=True)
try:
- user = util.get_cfg_option_str(cfg,'user')
+ user = util.get_cfg_option_str(cfg, 'user')
disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
DISABLE_ROOT_OPTS)
keys = cloud.get_public_ssh_keys()
- if cfg.has_key("ssh_authorized_keys"):
+ if "ssh_authorized_keys" in cfg:
cfgkeys = cfg["ssh_authorized_keys"]
keys.extend(cfgkeys)
- apply_credentials(keys,user,disable_root, disable_root_opts)
+ apply_credentials(keys, user, disable_root, disable_root_opts, log)
except:
util.logexc(log)
log.warn("applying credentials failed!\n")
-def apply_credentials(keys, user, disable_root, disable_root_opts=DISABLE_ROOT_OPTS, log=global_log):
+
+def apply_credentials(keys, user, disable_root,
+ disable_root_opts=DISABLE_ROOT_OPTS, log=None):
keys = set(keys)
if user:
sshutil.setup_user_keys(keys, user, '', log)
-
+
if disable_root:
key_prefix = disable_root_opts.replace('$USER', user)
else:
key_prefix = ''
sshutil.setup_user_keys(keys, 'root', key_prefix, log)
-
diff --git a/cloudinit/CloudConfig/cc_ssh_import_id.py b/cloudinit/CloudConfig/cc_ssh_import_id.py
index 7e7a54a1..f14a99cd 100644
--- a/cloudinit/CloudConfig/cc_ssh_import_id.py
+++ b/cloudinit/CloudConfig/cc_ssh_import_id.py
@@ -19,19 +19,21 @@ import cloudinit.util as util
import subprocess
import traceback
-def handle(_name,cfg,_cloud,log,args):
+
+def handle(_name, cfg, _cloud, log, args):
if len(args) != 0:
user = args[0]
- ids = [ ]
+ ids = []
if len(args) > 1:
ids = args[1:]
else:
- user = util.get_cfg_option_str(cfg,"user","ubuntu")
- ids = util.get_cfg_option_list_or_str(cfg,"ssh_import_id",[])
+ user = util.get_cfg_option_str(cfg, "user", "ubuntu")
+ ids = util.get_cfg_option_list_or_str(cfg, "ssh_import_id", [])
- if len(ids) == 0: return
+ if len(ids) == 0:
+ return
- cmd = [ "sudo", "-Hu", user, "ssh-import-id" ] + ids
+ cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids
log.debug("importing ssh ids. cmd = %s" % cmd)
@@ -39,7 +41,7 @@ def handle(_name,cfg,_cloud,log,args):
subprocess.check_call(cmd)
except subprocess.CalledProcessError as e:
log.debug(traceback.format_exc(e))
- raise Exception("Cmd returned %s: %s" % ( e.returncode, cmd))
+ raise Exception("Cmd returned %s: %s" % (e.returncode, cmd))
except OSError as e:
log.debug(traceback.format_exc(e))
- raise Exception("Cmd failed to execute: %s" % ( cmd ))
+ raise Exception("Cmd failed to execute: %s" % (cmd))
diff --git a/cloudinit/CloudConfig/cc_timezone.py b/cloudinit/CloudConfig/cc_timezone.py
index 26b2796d..6f0e8f6b 100644
--- a/cloudinit/CloudConfig/cc_timezone.py
+++ b/cloudinit/CloudConfig/cc_timezone.py
@@ -24,22 +24,24 @@ import shutil
frequency = per_instance
tz_base = "/usr/share/zoneinfo"
-def handle(_name,cfg,_cloud,log,args):
+
+def handle(_name, cfg, _cloud, log, args):
if len(args) != 0:
timezone = args[0]
else:
- timezone = util.get_cfg_option_str(cfg,"timezone",False)
+ timezone = util.get_cfg_option_str(cfg, "timezone", False)
- if not timezone: return
+ if not timezone:
+ return
- tz_file = "%s/%s" % (tz_base , timezone)
+ tz_file = "%s/%s" % (tz_base, timezone)
if not os.path.isfile(tz_file):
log.debug("Invalid timezone %s" % tz_file)
raise Exception("Invalid timezone %s" % tz_file)
try:
- fp=open("/etc/timezone","wb")
+ fp = open("/etc/timezone", "wb")
fp.write("%s\n" % timezone)
fp.close()
except:
@@ -58,6 +60,6 @@ def handle(_name,cfg,_cloud,log,args):
except:
log.debug("failed to copy %s to /etc/localtime" % tz_file)
raise
-
+
log.debug("set timezone to %s" % timezone)
return
diff --git a/cloudinit/CloudConfig/cc_update_etc_hosts.py b/cloudinit/CloudConfig/cc_update_etc_hosts.py
index 1c245e67..131e1a1e 100644
--- a/cloudinit/CloudConfig/cc_update_etc_hosts.py
+++ b/cloudinit/CloudConfig/cc_update_etc_hosts.py
@@ -21,10 +21,11 @@ import StringIO
frequency = per_always
-def handle(_name,cfg,cloud,log,_args):
- ( hostname, fqdn ) = util.get_hostname_fqdn(cfg, cloud)
- manage_hosts = util.get_cfg_option_bool(cfg,"manage_etc_hosts", False)
+def handle(_name, cfg, cloud, log, _args):
+ (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+
+ manage_hosts = util.get_cfg_option_bool(cfg, "manage_etc_hosts", False)
if manage_hosts in ("True", "true", True, "template"):
# render from template file
try:
@@ -32,8 +33,8 @@ def handle(_name,cfg,cloud,log,_args):
log.info("manage_etc_hosts was set, but no hostname found")
return
- util.render_to_file('hosts', '/etc/hosts', \
- { 'hostname' : hostname, 'fqdn' : fqdn })
+ util.render_to_file('hosts', '/etc/hosts',
+ {'hostname': hostname, 'fqdn': fqdn})
except Exception:
log.warn("failed to update /etc/hosts")
raise
@@ -76,9 +77,8 @@ def update_etc_hosts(hostname, fqdn, _log):
new_etchosts.write("%s%s" % (header, hosts_line))
need_write = True
if need_write == True:
- new_etcfile = open ('/etc/hosts','wb')
+ new_etcfile = open('/etc/hosts', 'wb')
new_etcfile.write(new_etchosts.getvalue())
new_etcfile.close()
new_etchosts.close()
return
-
diff --git a/cloudinit/CloudConfig/cc_update_hostname.py b/cloudinit/CloudConfig/cc_update_hostname.py
index 4bc1cb2b..2387a8dc 100644
--- a/cloudinit/CloudConfig/cc_update_hostname.py
+++ b/cloudinit/CloudConfig/cc_update_hostname.py
@@ -22,25 +22,27 @@ from cloudinit.CloudConfig import per_always
frequency = per_always
-def handle(_name,cfg,cloud,log,_args):
- if util.get_cfg_option_bool(cfg,"preserve_hostname",False):
+
+def handle(_name, cfg, cloud, log, _args):
+ if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
log.debug("preserve_hostname is set. not updating hostname")
return
- ( hostname, _fqdn ) = util.get_hostname_fqdn(cfg, cloud)
+ (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud)
try:
- prev ="%s/%s" % (cloud.get_cpath('data'),"previous-hostname")
+ prev = "%s/%s" % (cloud.get_cpath('data'), "previous-hostname")
update_hostname(hostname, prev, log)
except Exception:
log.warn("failed to set hostname\n")
raise
+
# read hostname from a 'hostname' file
# allow for comments and stripping line endings.
# if file doesn't exist, or no contents, return default
def read_hostname(filename, default=None):
try:
- fp = open(filename,"r")
+ fp = open(filename, "r")
lines = fp.readlines()
fp.close()
for line in lines:
@@ -51,9 +53,11 @@ def read_hostname(filename, default=None):
if line:
return line
except IOError as e:
- if e.errno != errno.ENOENT: raise
+ if e.errno != errno.ENOENT:
+ raise
return default
-
+
+
def update_hostname(hostname, prev_file, log):
etc_file = "/etc/hostname"
@@ -74,22 +78,21 @@ def update_hostname(hostname, prev_file, log):
if not hostname_prev or hostname_prev != hostname:
update_files.append(prev_file)
- if (not hostname_in_etc or
+ if (not hostname_in_etc or
(hostname_in_etc == hostname_prev and hostname_in_etc != hostname)):
update_files.append(etc_file)
try:
for fname in update_files:
- util.write_file(fname,"%s\n" % hostname, 0644)
- log.debug("wrote %s to %s" % (hostname,fname))
+ util.write_file(fname, "%s\n" % hostname, 0644)
+ log.debug("wrote %s to %s" % (hostname, fname))
except:
log.warn("failed to write hostname to %s" % fname)
if hostname_in_etc and hostname_prev and hostname_in_etc != hostname_prev:
log.debug("%s differs from %s. assuming user maintained" %
- (prev_file,etc_file))
+ (prev_file, etc_file))
if etc_file in update_files:
log.debug("setting hostname to %s" % hostname)
subprocess.Popen(['hostname', hostname]).communicate()
-
diff --git a/cloudinit/DataSource.py b/cloudinit/DataSource.py
index ac79f757..0985c6b2 100644
--- a/cloudinit/DataSource.py
+++ b/cloudinit/DataSource.py
@@ -20,22 +20,23 @@
DEP_FILESYSTEM = "FILESYSTEM"
DEP_NETWORK = "NETWORK"
-import UserDataHandler as ud
+import cloudinit.UserDataHandler as ud
import cloudinit.util as util
import socket
+
class DataSource:
userdata = None
metadata = None
userdata_raw = None
cfgname = ""
- # system config (passed in from cloudinit,
+ # system config (passed in from cloudinit,
# cloud-config before input from the DataSource)
- sys_cfg = { }
+ sys_cfg = {}
# datasource config, the cloud-config['datasource']['__name__']
- ds_cfg = { } # datasource config
+ ds_cfg = {} # datasource config
- def __init__(self,sys_cfg=None):
+ def __init__(self, sys_cfg=None):
if not self.cfgname:
name = str(self.__class__).split(".")[-1]
if name.startswith("DataSource"):
@@ -45,7 +46,7 @@ class DataSource:
self.sys_cfg = sys_cfg
self.ds_cfg = util.get_cfg_by_path(self.sys_cfg,
- ("datasource",self.cfgname),self.ds_cfg)
+ ("datasource", self.cfgname), self.ds_cfg)
def get_userdata(self):
if self.userdata == None:
@@ -55,26 +56,26 @@ class DataSource:
def get_userdata_raw(self):
return(self.userdata_raw)
-
# the data sources' config_obj is a cloud-config formated
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere
def get_config_obj(self):
- return({ })
+ return({})
def get_public_ssh_keys(self):
keys = []
- if not self.metadata.has_key('public-keys'): return([])
+ if 'public-keys' not in self.metadata:
+ return([])
if isinstance(self.metadata['public-keys'], str):
- return([self.metadata['public-keys'],])
-
+ return([self.metadata['public-keys'], ])
+
for _keyname, klist in self.metadata['public-keys'].items():
# lp:506332 uec metadata service responds with
# data that makes boto populate a string for 'klist' rather
# than a list.
- if isinstance(klist,str):
- klist = [ klist ]
+ if isinstance(klist, str):
+ klist = [klist]
for pkey in klist:
# there is an empty string at the end of the keylist, trim it
if pkey:
@@ -103,7 +104,7 @@ class DataSource:
def get_hostname(self, fqdn=False):
defdomain = "localdomain"
- defhost = "localhost"
+ defhost = "localhost"
domain = defdomain
if not 'local-hostname' in self.metadata:
@@ -119,19 +120,18 @@ class DataSource:
fqdn = util.get_fqdn_from_hosts(hostname)
if fqdn and fqdn.find(".") > 0:
- toks = fqdn.split(".")
+ toks = str(fqdn).split(".")
elif hostname:
- toks = [ hostname, defdomain ]
+ toks = [hostname, defdomain]
else:
- toks = [ defhost, defdomain ]
-
+ toks = [defhost, defdomain]
else:
# if there is an ipv4 address in 'local-hostname', then
# make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
lhost = self.metadata['local-hostname']
if is_ipv4(lhost):
- toks = "ip-%s" % lhost.replace(".","-")
+ toks = "ip-%s" % lhost.replace(".", "-")
else:
toks = lhost.split(".")
@@ -142,10 +142,11 @@ class DataSource:
hostname = toks[0]
if fqdn:
- return "%s.%s" % (hostname,domain)
+ return "%s.%s" % (hostname, domain)
else:
return hostname
+
# return a list of classes that have the same depends as 'depends'
# iterate through cfg_list, loading "DataSourceCollections" modules
# and calling their "get_datasource_list".
@@ -163,7 +164,8 @@ def list_sources(cfg_list, depends, pkglist=None):
retlist = []
for ds_coll in cfg_list:
for pkg in pkglist:
- if pkg: pkg="%s." % pkg
+ if pkg:
+ pkg = "%s." % pkg
try:
mod = __import__("%sDataSource%s" % (pkg, ds_coll))
if pkg:
@@ -175,15 +177,16 @@ def list_sources(cfg_list, depends, pkglist=None):
raise
return(retlist)
+
# depends is a list of dependencies (DEP_FILESYSTEM)
# dslist is a list of 2 item lists
-# dslist = [
+# dslist = [
# ( class, ( depends-that-this-class-needs ) )
# }
# it returns a list of 'class' that matched these deps exactly
# it is a helper function for DataSourceCollections
def list_from_depends(depends, dslist):
- retlist = [ ]
+ retlist = []
depset = set(depends)
for elem in dslist:
(cls, deps) = elem
diff --git a/cloudinit/DataSourceEc2.py b/cloudinit/DataSourceEc2.py
index 9191e647..37dfcb5d 100644
--- a/cloudinit/DataSourceEc2.py
+++ b/cloudinit/DataSourceEc2.py
@@ -16,9 +16,10 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import DataSource
+import cloudinit.DataSource as DataSource
-from cloudinit import seeddir, log # pylint: disable=W0611
+from cloudinit import seeddir as base_seeddir
+from cloudinit import log
import cloudinit.util as util
import socket
import urllib2
@@ -26,29 +27,33 @@ import time
import boto.utils as boto_utils
import os.path
+
class DataSourceEc2(DataSource.DataSource):
- api_ver = '2009-04-04'
- seeddir = seeddir + '/ec2'
+ api_ver = '2009-04-04'
+ seeddir = base_seeddir + '/ec2'
metadata_address = "http://169.254.169.254"
def __str__(self):
return("DataSourceEc2")
def get_data(self):
- seedret={ }
- if util.read_optional_seed(seedret,base=self.seeddir+ "/"):
+ seedret = {}
+ if util.read_optional_seed(seedret, base=self.seeddir + "/"):
self.userdata_raw = seedret['user-data']
self.metadata = seedret['meta-data']
log.debug("using seeded ec2 data in %s" % self.seeddir)
return True
-
+
try:
if not self.wait_for_metadata_service():
return False
start = time.time()
- self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver, None, self.metadata_address)
- self.metadata = boto_utils.get_instance_metadata(self.api_ver, self.metadata_address)
- log.debug("crawl of metadata service took %ds" % (time.time()-start))
+ self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver,
+ None, self.metadata_address)
+ self.metadata = boto_utils.get_instance_metadata(self.api_ver,
+ self.metadata_address)
+ log.debug("crawl of metadata service took %ds" % (time.time() -
+ start))
return True
except Exception as e:
print e
@@ -63,7 +68,7 @@ class DataSourceEc2(DataSource.DataSource):
def get_local_mirror(self):
return(self.get_mirror_from_availability_zone())
- def get_mirror_from_availability_zone(self, availability_zone = None):
+ def get_mirror_from_availability_zone(self, availability_zone=None):
# availability is like 'us-west-1b' or 'eu-west-1a'
if availability_zone == None:
availability_zone = self.get_availability_zone()
@@ -74,7 +79,7 @@ class DataSourceEc2(DataSource.DataSource):
return fallback
try:
- host="%s.ec2.archive.ubuntu.com" % availability_zone[:-1]
+ host = "%s.ec2.archive.ubuntu.com" % availability_zone[:-1]
socket.getaddrinfo(host, None, 0, socket.SOCK_STREAM)
return 'http://%s/ubuntu/' % host
except:
@@ -84,11 +89,11 @@ class DataSourceEc2(DataSource.DataSource):
mcfg = self.ds_cfg
if not hasattr(mcfg, "get"):
- mcfg = {}
+ mcfg = {}
max_wait = 120
try:
- max_wait = int(mcfg.get("max_wait",max_wait))
+ max_wait = int(mcfg.get("max_wait", max_wait))
except Exception:
util.logexc(log)
log.warn("Failed to get max wait. using %s" % max_wait)
@@ -98,7 +103,7 @@ class DataSourceEc2(DataSource.DataSource):
timeout = 50
try:
- timeout = int(mcfg.get("timeout",timeout))
+ timeout = int(mcfg.get("timeout", timeout))
except Exception:
util.logexc(log)
log.warn("Failed to get timeout, using %s" % timeout)
@@ -119,8 +124,8 @@ class DataSourceEc2(DataSource.DataSource):
log.warn("Empty metadata url list! using default list")
mdurls = def_mdurls
- urls = [ ]
- url2base = { False: False }
+ urls = []
+ url2base = {False: False}
for url in mdurls:
cur = "%s/%s/meta-data/instance-id" % (url, self.api_ver)
urls.append(cur)
@@ -134,7 +139,7 @@ class DataSourceEc2(DataSource.DataSource):
log.debug("Using metadata source: '%s'" % url2base[url])
else:
log.critical("giving up on md after %i seconds\n" %
- int(time.time()-starttime))
+ int(time.time() - starttime))
self.metadata_address = url2base[url]
return (bool(url))
@@ -143,7 +148,7 @@ class DataSourceEc2(DataSource.DataSource):
# consult metadata service, that has
# ephemeral0: sdb
# and return 'sdb' for input 'ephemeral0'
- if not self.metadata.has_key('block-device-mapping'):
+ if 'block-device-mapping' not in self.metadata:
return(None)
found = None
@@ -163,22 +168,23 @@ class DataSourceEc2(DataSource.DataSource):
# when the kernel named them 'vda' or 'xvda'
# we want to return the correct value for what will actually
# exist in this instance
- mappings = { "sd": ("vd", "xvd") }
+ mappings = {"sd": ("vd", "xvd")}
ofound = found
short = os.path.basename(found)
-
+
if not found.startswith("/"):
- found="/dev/%s" % found
+ found = "/dev/%s" % found
if os.path.exists(found):
return(found)
for nfrom, tlist in mappings.items():
- if not short.startswith(nfrom): continue
+ if not short.startswith(nfrom):
+ continue
for nto in tlist:
cand = "/dev/%s%s" % (nto, short[len(nfrom):])
if os.path.exists(cand):
- log.debug("remapped device name %s => %s" % (found,cand))
+ log.debug("remapped device name %s => %s" % (found, cand))
return(cand)
# on t1.micro, ephemeral0 will appear in block-device-mapping from
@@ -192,14 +198,16 @@ class DataSourceEc2(DataSource.DataSource):
def is_vpc(self):
# per comment in LP: #615545
- ph="public-hostname"; p4="public-ipv4"
+ ph = "public-hostname"
+ p4 = "public-ipv4"
if ((ph not in self.metadata or self.metadata[ph] == "") and
(p4 not in self.metadata or self.metadata[p4] == "")):
return True
return False
-def wait_for_metadata_service(urls, max_wait=None, timeout=None, status_cb=None):
+def wait_for_metadata_service(urls, max_wait=None, timeout=None,
+ status_cb=None):
"""
urls: a list of urls to try
max_wait: roughly the maximum time to wait before giving up
@@ -207,7 +215,7 @@ def wait_for_metadata_service(urls, max_wait=None, timeout=None, status_cb=None)
be tried once and given the timeout provided.
timeout: the timeout provided to urllib2.urlopen
status_cb: call method with string message when a url is not available
-
+
the idea of this routine is to wait for the EC2 metdata service to
come up. On both Eucalyptus and EC2 we have seen the case where
the instance hit the MD before the MD service was up. EC2 seems
@@ -227,16 +235,19 @@ def wait_for_metadata_service(urls, max_wait=None, timeout=None, status_cb=None)
sleeptime = 1
+ def nullstatus_cb(msg):
+ return
+
if status_cb == None:
- def status_cb(msg): return
+ status_cb = nullstatus_cb
def timeup(max_wait, starttime):
return((max_wait <= 0 or max_wait == None) or
- (time.time()-starttime > max_wait))
+ (time.time() - starttime > max_wait))
loop_n = 0
while True:
- sleeptime=int(loop_n/5)+1
+ sleeptime = int(loop_n / 5) + 1
for url in urls:
now = time.time()
if loop_n != 0:
@@ -264,7 +275,8 @@ def wait_for_metadata_service(urls, max_wait=None, timeout=None, status_cb=None)
if log:
status_cb("'%s' failed [%s/%ss]: %s" %
- (url, int(time.time()-starttime), max_wait, reason))
+ (url, int(time.time() - starttime), max_wait,
+ reason))
if timeup(max_wait, starttime):
break
@@ -275,10 +287,11 @@ def wait_for_metadata_service(urls, max_wait=None, timeout=None, status_cb=None)
return False
-datasources = [
- ( DataSourceEc2, ( DataSource.DEP_FILESYSTEM , DataSource.DEP_NETWORK ) ),
+datasources = [
+ (DataSourceEc2, (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
]
+
# return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return(DataSource.list_from_depends(depends, datasources))
diff --git a/cloudinit/DataSourceNoCloud.py b/cloudinit/DataSourceNoCloud.py
index 2f4bd604..0d5f15b3 100644
--- a/cloudinit/DataSourceNoCloud.py
+++ b/cloudinit/DataSourceNoCloud.py
@@ -16,32 +16,34 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import DataSource
+import cloudinit.DataSource as DataSource
-from cloudinit import seeddir, log # pylint: disable=W0611
+from cloudinit import seeddir as base_seeddir
+from cloudinit import log
import cloudinit.util as util
+
class DataSourceNoCloud(DataSource.DataSource):
metadata = None
userdata = None
userdata_raw = None
- supported_seed_starts = ( "/" , "file://" )
+ supported_seed_starts = ("/", "file://")
seed = None
cmdline_id = "ds=nocloud"
- seeddir = seeddir + '/nocloud'
+ seeddir = base_seeddir + '/nocloud'
def __str__(self):
- mstr="DataSourceNoCloud"
+ mstr = "DataSourceNoCloud"
mstr = mstr + " [seed=%s]" % self.seed
return(mstr)
def get_data(self):
- defaults = {
- "instance-id" : "nocloud"
+ defaults = {
+ "instance-id": "nocloud"
}
- found = [ ]
- md = { }
+ found = []
+ md = {}
ud = ""
try:
@@ -53,9 +55,9 @@ class DataSourceNoCloud(DataSource.DataSource):
return False
# check to see if the seeddir has data.
- seedret={ }
- if util.read_optional_seed(seedret,base=self.seeddir + "/"):
- md = util.mergedict(md,seedret['meta-data'])
+ seedret = {}
+ if util.read_optional_seed(seedret, base=self.seeddir + "/"):
+ md = util.mergedict(md, seedret['meta-data'])
ud = seedret['user-data']
found.append(self.seeddir)
log.debug("using seeded cache data in %s" % self.seeddir)
@@ -72,7 +74,7 @@ class DataSourceNoCloud(DataSource.DataSource):
seedfound = False
for proto in self.supported_seed_starts:
if seedfrom.startswith(proto):
- seedfound=proto
+ seedfound = proto
break
if not seedfound:
log.debug("seed from %s not supported by %s" %
@@ -81,69 +83,75 @@ class DataSourceNoCloud(DataSource.DataSource):
# this could throw errors, but the user told us to do it
# so if errors are raised, let them raise
- (md_seed,ud) = util.read_seeded(seedfrom, timeout=None)
+ (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
log.debug("using seeded cache data from %s" % seedfrom)
# values in the command line override those from the seed
- md = util.mergedict(md,md_seed)
+ md = util.mergedict(md, md_seed)
found.append(seedfrom)
- md = util.mergedict(md,defaults)
+ md = util.mergedict(md, defaults)
self.seed = ",".join(found)
self.metadata = md
self.userdata_raw = ud
return True
+
# returns true or false indicating if cmdline indicated
# that this module should be used
# example cmdline:
# root=LABEL=uec-rootfs ro ds=nocloud
-def parse_cmdline_data(ds_id,fill,cmdline=None):
+def parse_cmdline_data(ds_id, fill, cmdline=None):
if cmdline is None:
cmdline = util.get_cmdline()
cmdline = " %s " % cmdline
- if not ( " %s " % ds_id in cmdline or " %s;" % ds_id in cmdline ):
+ if not (" %s " % ds_id in cmdline or " %s;" % ds_id in cmdline):
return False
- argline=""
+ argline = ""
# cmdline can contain:
# ds=nocloud[;key=val;key=val]
for tok in cmdline.split():
- if tok.startswith(ds_id): argline=tok.split("=",1)
-
+ if tok.startswith(ds_id):
+ argline = tok.split("=", 1)
+
# argline array is now 'nocloud' followed optionally by
# a ';' and then key=value pairs also terminated with ';'
- tmp=argline[1].split(";")
+ tmp = argline[1].split(";")
if len(tmp) > 1:
- kvpairs=tmp[1:]
+ kvpairs = tmp[1:]
else:
- kvpairs=()
+ kvpairs = ()
# short2long mapping to save cmdline typing
- s2l = { "h" : "local-hostname", "i" : "instance-id", "s" : "seedfrom" }
+ s2l = {"h": "local-hostname", "i": "instance-id", "s": "seedfrom"}
for item in kvpairs:
try:
- (k,v) = item.split("=",1)
+ (k, v) = item.split("=", 1)
except:
- k=item
- v=None
- if k in s2l: k=s2l[k]
- fill[k]=v
+ k = item
+ v = None
+ if k in s2l:
+ k = s2l[k]
+ fill[k] = v
return(True)
+
class DataSourceNoCloudNet(DataSourceNoCloud):
cmdline_id = "ds=nocloud-net"
- supported_seed_starts = ( "http://", "https://", "ftp://" )
- seeddir = seeddir + '/nocloud-net'
+ supported_seed_starts = ("http://", "https://", "ftp://")
+ seeddir = base_seeddir + '/nocloud-net'
+
datasources = (
- ( DataSourceNoCloud, ( DataSource.DEP_FILESYSTEM, ) ),
- ( DataSourceNoCloudNet,
- ( DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK ) ),
+ (DataSourceNoCloud, (DataSource.DEP_FILESYSTEM, )),
+ (DataSourceNoCloudNet,
+ (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
)
+
# return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return(DataSource.list_from_depends(depends, datasources))
diff --git a/cloudinit/DataSourceOVF.py b/cloudinit/DataSourceOVF.py
index 1c510688..1a527524 100644
--- a/cloudinit/DataSourceOVF.py
+++ b/cloudinit/DataSourceOVF.py
@@ -16,11 +16,11 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import DataSource
+import cloudinit.DataSource as DataSource
-from cloudinit import seeddir, log
+from cloudinit import seeddir as base_seeddir
+from cloudinit import log
import cloudinit.util as util
-import sys
import os.path
import os
from xml.dom import minidom
@@ -29,48 +29,51 @@ import re
import tempfile
import subprocess
+
class DataSourceOVF(DataSource.DataSource):
seed = None
- seeddir = seeddir + '/ovf'
+ seeddir = base_seeddir + '/ovf'
environment = None
- cfg = { }
+ cfg = {}
userdata_raw = None
metadata = None
- supported_seed_starts = ( "/" , "file://" )
+ supported_seed_starts = ("/", "file://")
def __str__(self):
- mstr="DataSourceOVF"
+ mstr = "DataSourceOVF"
mstr = mstr + " [seed=%s]" % self.seed
return(mstr)
def get_data(self):
- found = [ ]
- md = { }
+ found = []
+ md = {}
ud = ""
- defaults = {
- "instance-id" : "iid-dsovf"
+ defaults = {
+ "instance-id": "iid-dsovf"
}
- (seedfile, contents) = get_ovf_env(seeddir)
+ (seedfile, contents) = get_ovf_env(base_seeddir)
if seedfile:
# found a seed dir
- seed = "%s/%s" % (seeddir,seedfile)
+ seed = "%s/%s" % (base_seeddir, seedfile)
(md, ud, cfg) = read_ovf_environment(contents)
self.environment = contents
found.append(seed)
else:
- np = { 'iso' : transport_iso9660,
- 'vmware-guestd' : transport_vmware_guestd, }
+ np = {'iso': transport_iso9660,
+ 'vmware-guestd': transport_vmware_guestd, }
+ name = None
for name, transfunc in np.iteritems():
(contents, _dev, _fname) = transfunc()
- if contents: break
+ if contents:
+ break
if contents:
(md, ud, cfg) = read_ovf_environment(contents)
self.environment = contents
- found.append(name) # pylint: disable=W0631
+ found.append(name)
# There was no OVF transports found
if len(found) == 0:
@@ -88,14 +91,13 @@ class DataSourceOVF(DataSource.DataSource):
(seedfrom, self.__class__))
return False
- (md_seed,ud) = util.read_seeded(seedfrom, timeout=None)
+ (md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
log.debug("using seeded cache data from %s" % seedfrom)
- md = util.mergedict(md,md_seed)
+ md = util.mergedict(md, md_seed)
found.append(seedfrom)
-
- md = util.mergedict(md,defaults)
+ md = util.mergedict(md, defaults)
self.seed = ",".join(found)
self.metadata = md
self.userdata_raw = ud
@@ -103,30 +105,34 @@ class DataSourceOVF(DataSource.DataSource):
return True
def get_public_ssh_keys(self):
- if not 'public-keys' in self.metadata: return([])
- return([self.metadata['public-keys'],])
-
+ if not 'public-keys' in self.metadata:
+ return([])
+ return([self.metadata['public-keys'], ])
+
# the data sources' config_obj is a cloud-config formated
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere
def get_config_obj(self):
return(self.cfg)
+
class DataSourceOVFNet(DataSourceOVF):
- seeddir = seeddir + '/ovf-net'
- supported_seed_starts = ( "http://", "https://", "ftp://" )
+ seeddir = base_seeddir + '/ovf-net'
+ supported_seed_starts = ("http://", "https://", "ftp://")
+
# this will return a dict with some content
# meta-data, user-data
def read_ovf_environment(contents):
props = getProperties(contents)
- md = { }
- cfg = { }
+ md = {}
+ cfg = {}
ud = ""
- cfg_props = [ 'password', ]
- md_props = [ 'seedfrom', 'local-hostname', 'public-keys', 'instance-id' ]
+ cfg_props = ['password', ]
+ md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
for prop, val in props.iteritems():
- if prop == 'hostname': prop = "local-hostname"
+ if prop == 'hostname':
+ prop = "local-hostname"
if prop in md_props:
md[prop] = val
elif prop in cfg_props:
@@ -137,31 +143,32 @@ def read_ovf_environment(contents):
except:
ud = val
return(md, ud, cfg)
-
+
# returns tuple of filename (in 'dirname', and the contents of the file)
# on "not found", returns 'None' for filename and False for contents
def get_ovf_env(dirname):
- env_names = ("ovf-env.xml", "ovf_env.xml", "OVF_ENV.XML", "OVF-ENV.XML" )
+ env_names = ("ovf-env.xml", "ovf_env.xml", "OVF_ENV.XML", "OVF-ENV.XML")
for fname in env_names:
- if os.path.isfile("%s/%s" % (dirname,fname)):
- fp = open("%s/%s" % (dirname,fname))
+ if os.path.isfile("%s/%s" % (dirname, fname)):
+ fp = open("%s/%s" % (dirname, fname))
contents = fp.read()
fp.close()
- return(fname,contents)
- return(None,False)
+ return(fname, contents)
+ return(None, False)
+
# transport functions take no input and return
# a 3 tuple of content, path, filename
def transport_iso9660(require_iso=False):
- # default_regex matches values in
+ # default_regex matches values in
# /lib/udev/rules.d/60-cdrom_id.rules
# KERNEL!="sr[0-9]*|hd[a-z]|xvd*", GOTO="cdrom_end"
envname = "CLOUD_INIT_CDROM_DEV_REGEX"
default_regex = "^(sr[0-9]+|hd[a-z]|xvd.*)"
- devname_regex = os.environ.get(envname,default_regex)
+ devname_regex = os.environ.get(envname, default_regex)
cdmatch = re.compile(devname_regex)
# go through mounts to see if it was already mounted
@@ -169,19 +176,20 @@ def transport_iso9660(require_iso=False):
mounts = fp.readlines()
fp.close()
- mounted = { }
+ mounted = {}
for mpline in mounts:
- (dev,mp,fstype,_opts,_freq,_passno) = mpline.split()
- mounted[dev]=(dev,fstype,mp,False)
- mp = mp.replace("\\040"," ")
- if fstype != "iso9660" and require_iso: continue
+ (dev, mp, fstype, _opts, _freq, _passno) = mpline.split()
+ mounted[dev] = (dev, fstype, mp, False)
+ mp = mp.replace("\\040", " ")
+ if fstype != "iso9660" and require_iso:
+ continue
- if cdmatch.match(dev[5:]) == None: # take off '/dev/'
+ if cdmatch.match(dev[5:]) == None: # take off '/dev/'
continue
-
- (fname,contents) = get_ovf_env(mp)
+
+ (fname, contents) = get_ovf_env(mp)
if contents is not False:
- return(contents,dev,fname)
+ return(contents, dev, fname)
tmpd = None
dvnull = None
@@ -201,7 +209,8 @@ def transport_iso9660(require_iso=False):
fp.read(512)
fp.close()
except:
- if fp: fp.close()
+ if fp:
+ fp.close()
continue
if tmpd is None:
@@ -212,20 +221,21 @@ def transport_iso9660(require_iso=False):
except:
pass
- cmd = [ "mount", "-o", "ro", fullp, tmpd ]
- if require_iso: cmd.extend(('-t','iso9660'))
+ cmd = ["mount", "-o", "ro", fullp, tmpd]
+ if require_iso:
+ cmd.extend(('-t', 'iso9660'))
rc = subprocess.call(cmd, stderr=dvnull, stdout=dvnull, stdin=dvnull)
if rc:
continue
- (fname,contents) = get_ovf_env(tmpd)
+ (fname, contents) = get_ovf_env(tmpd)
subprocess.call(["umount", tmpd])
if contents is not False:
os.rmdir(tmpd)
- return(contents,fullp,fname)
+ return(contents, fullp, fname)
if tmpd:
os.rmdir(tmpd)
@@ -235,11 +245,13 @@ def transport_iso9660(require_iso=False):
return(False, None, None)
+
def transport_vmware_guestd():
- # http://blogs.vmware.com/vapp/2009/07/selfconfiguration-and-the-ovf-environment.html
+ # http://blogs.vmware.com/vapp/2009/07/ \
+ # selfconfiguration-and-the-ovf-environment.html
# try:
# cmd = ['vmware-guestd', '--cmd', 'info-get guestinfo.ovfEnv']
- # (out,err) = subp(cmd)
+ # (out, err) = subp(cmd)
# return(out, 'guestinfo.ovfEnv', 'vmware-guestd')
# except:
# # would need to error check here and see why this failed
@@ -248,13 +260,16 @@ def transport_vmware_guestd():
return(False, None, None)
-def findChild(node,filter_func):
+def findChild(node, filter_func):
ret = []
- if not node.hasChildNodes(): return ret
+ if not node.hasChildNodes():
+ return ret
for child in node.childNodes:
- if filter_func(child): ret.append(child)
+ if filter_func(child):
+ ret.append(child)
return(ret)
+
def getProperties(environString):
dom = minidom.parseString(environString)
if dom.documentElement.localName != "Environment":
@@ -265,7 +280,7 @@ def getProperties(environString):
envNsURI = "http://schemas.dmtf.org/ovf/environment/1"
- # could also check here that elem.namespaceURI ==
+ # could also check here that elem.namespaceURI ==
# "http://schemas.dmtf.org/ovf/environment/1"
propSections = findChild(dom.documentElement,
lambda n: n.localName == "PropertySection")
@@ -273,37 +288,43 @@ def getProperties(environString):
if len(propSections) == 0:
raise Exception("No 'PropertySection's")
- props = { }
+ props = {}
propElems = findChild(propSections[0], lambda n: n.localName == "Property")
for elem in propElems:
- key = elem.attributes.getNamedItemNS(envNsURI,"key").value
- val = elem.attributes.getNamedItemNS(envNsURI,"value").value
+ key = elem.attributes.getNamedItemNS(envNsURI, "key").value
+ val = elem.attributes.getNamedItemNS(envNsURI, "value").value
props[key] = val
return(props)
+
datasources = (
- ( DataSourceOVF, ( DataSource.DEP_FILESYSTEM, ) ),
- ( DataSourceOVFNet,
- ( DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK ) ),
+ (DataSourceOVF, (DataSource.DEP_FILESYSTEM, )),
+ (DataSourceOVFNet,
+ (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
)
+
# return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return(DataSource.list_from_depends(depends, datasources))
+
if __name__ == "__main__":
- import sys
- envStr = open(sys.argv[1]).read()
- props = getProperties(envStr)
- import pprint
- pprint.pprint(props)
-
- md, ud, cfg = read_ovf_environment(envStr)
- print "=== md ==="
- pprint.pprint(md)
- print "=== ud ==="
- pprint.pprint(ud)
- print "=== cfg ==="
- pprint.pprint(cfg)
+ def main():
+ import sys
+ envStr = open(sys.argv[1]).read()
+ props = getProperties(envStr)
+ import pprint
+ pprint.pprint(props)
+
+ md, ud, cfg = read_ovf_environment(envStr)
+ print "=== md ==="
+ pprint.pprint(md)
+ print "=== ud ==="
+ pprint.pprint(ud)
+ print "=== cfg ==="
+ pprint.pprint(cfg)
+
+ main()
diff --git a/cloudinit/SshUtil.py b/cloudinit/SshUtil.py
index 125ca618..9db0c6b4 100644
--- a/cloudinit/SshUtil.py
+++ b/cloudinit/SshUtil.py
@@ -4,6 +4,7 @@ import os
import os.path
import cloudinit.util as util
+
class AuthKeyEntry():
# lines are options, keytype, base64-encoded key, comment
# man page says the following which I did not understand:
@@ -18,13 +19,13 @@ class AuthKeyEntry():
line_in = ""
def __init__(self, line, def_opt=None):
- line=line.rstrip("\n\r")
+ line = line.rstrip("\n\r")
self.line_in = line
if line.startswith("#") or line.strip() == "":
self.is_comment = True
else:
ent = line.strip()
- toks = ent.split(None,3)
+ toks = ent.split(None, 3)
if len(toks) == 1:
self.base64 = toks[0]
elif len(toks) == 2:
@@ -37,7 +38,7 @@ class AuthKeyEntry():
quoted = False
# taken from auth_rsa_key_allowed in auth-rsa.c
try:
- while (i < len(ent) and
+ while (i < len(ent) and
((quoted) or (ent[i] not in (" ", "\t")))):
curc = ent[i]
nextc = ent[i + 1]
@@ -48,12 +49,12 @@ class AuthKeyEntry():
i = i + 1
except IndexError:
self.is_comment = True
- return()
+ return
try:
self.options = ent[0:i]
(self.keytype, self.base64, self.comment) = \
- ent[i+1:].split(None,3)
+ ent[i + 1:].split(None, 3)
except ValueError:
# we did not understand this line
self.is_comment = True
@@ -64,30 +65,33 @@ class AuthKeyEntry():
return
def debug(self):
- print("line_in=%s\ncomment: %s\noptions=%s\nkeytype=%s\nbase64=%s\ncomment=%s\n" %
- (self.line_in, self.is_comment, self.options, self.keytype, self.base64, self.comment)),
+ print("line_in=%s\ncomment: %s\noptions=%s\nkeytype=%s\nbase64=%s\n"
+ "comment=%s\n" % (self.line_in, self.is_comment, self.options,
+ self.keytype, self.base64, self.comment)),
+
def __repr__(self):
if self.is_comment:
return(self.line_in)
else:
- toks = [ ]
+ toks = []
for e in (self.options, self.keytype, self.base64, self.comment):
if e:
toks.append(e)
-
+
return(' '.join(toks))
-
+
+
def update_authorized_keys(fname, keys):
# keys is a list of AuthKeyEntries
# key_prefix is the prefix (options) to prepend
try:
fp = open(fname, "r")
- lines = fp.readlines() # lines have carriage return
+ lines = fp.readlines() # lines have carriage return
fp.close()
except IOError:
- lines = [ ]
+ lines = []
- ka_stats = { } # keys_added status
+ ka_stats = {} # keys_added status
for k in keys:
ka_stats[k] = False
@@ -95,7 +99,7 @@ def update_authorized_keys(fname, keys):
for key in keys:
to_add.append(key)
- for i in range(0,len(lines)):
+ for i in range(0, len(lines)):
ent = AuthKeyEntry(lines[i])
for k in keys:
if k.base64 == ent.base64 and not k.is_comment:
@@ -115,7 +119,7 @@ def update_authorized_keys(fname, keys):
else:
return('\n'.join(lines) + "\n")
-
+
def setup_user_keys(keys, user, key_prefix, log=None):
import pwd
saved_umask = os.umask(077)
@@ -129,7 +133,7 @@ def setup_user_keys(keys, user, key_prefix, log=None):
try:
ssh_cfg = parse_ssh_config()
- akeys = ssh_cfg.get("AuthorizedKeysFile","%h/.ssh/authorized_keys")
+ akeys = ssh_cfg.get("AuthorizedKeysFile", "%h/.ssh/authorized_keys")
akeys = akeys.replace("%h", pwent.pw_dir)
akeys = akeys.replace("%u", user)
authorized_keys = akeys
@@ -151,46 +155,52 @@ def setup_user_keys(keys, user, key_prefix, log=None):
os.umask(saved_umask)
-if __name__ == "__main__":
- import sys
- # usage: orig_file, new_keys, [key_prefix]
- # prints out merged, where 'new_keys' will trump old
- ## example
- ## ### begin authorized_keys ###
- # ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA28CDAGtxSucHezSKqwh1wAs39xdeZTSVmmyMcKDI5Njnd1d/Uhgj/awxP0Whep8eRSm6F+Xgwi0pH1KNPCszPvq+03K+yi3YkYkQIkVBhctK6AP/UmlVQTVmjJdEvgtrppFTjCzf16q0BT0mXX5YFV3csgm8cJn7UveKHkYjJp8= smoser-work
- # ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies
- # ### end authorized_keys ###
- #
- # ### begin new_keys ###
- # ssh-rsa nonmatch smoser@newhost
- # ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA28CDAGtxSucHezSKqwh1wAs39xdeZTSVmmyMcKDI5Njnd1d/Uhgj/awxP0Whep8eRSm6F+Xgwi0pH1KNPCszPvq+03K+yi3YkYkQIkVBhctK6AP/UmlVQTVmjJdEvgtrppFTjCzf16q0BT0mXX5YFV3csgm8cJn7UveKHkYjJp8= new_comment
- # ### end new_keys ###
- #
- # Then run as:
- # program authorized_keys new_keys 'no-port-forwarding,command=\"echo hi world;\"'
- def_prefix = None
- orig_key_file = sys.argv[1]
- new_key_file = sys.argv[2]
- if len(sys.argv) > 3:
- def_prefix = sys.argv[3]
- fp = open(new_key_file)
-
- newkeys = [ ]
- for line in fp.readlines():
- newkeys.append(AuthKeyEntry(line, def_prefix))
-
- fp.close()
- print update_authorized_keys(orig_key_file, newkeys)
def parse_ssh_config(fname="/etc/ssh/sshd_config"):
- ret = { }
- fp=open(fname)
+ ret = {}
+ fp = open(fname)
for l in fp.readlines():
l = l.strip()
if not l or l.startswith("#"):
continue
- key,val = l.split(None,1)
- ret[key]=val
+ key, val = l.split(None, 1)
+ ret[key] = val
fp.close()
return(ret)
+if __name__ == "__main__":
+ def main():
+ import sys
+ # usage: orig_file, new_keys, [key_prefix]
+ # prints out merged, where 'new_keys' will trump old
+ ## example
+ ## ### begin auth_keys ###
+ # ssh-rsa AAAAB3NzaC1xxxxxxxxxV3csgm8cJn7UveKHkYjJp8= smoser-work
+ # ssh-rsa AAAAB3NzaC1xxxxxxxxxCmXp5Kt5/82cD/VN3NtHw== smoser@brickies
+ # ### end authorized_keys ###
+ #
+ # ### begin new_keys ###
+ # ssh-rsa nonmatch smoser@newhost
+ # ssh-rsa AAAAB3NzaC1xxxxxxxxxV3csgm8cJn7UveKHkYjJp8= new_comment
+ # ### end new_keys ###
+ #
+ # Then run as:
+ # program auth_keys new_keys \
+ # 'no-port-forwarding,command=\"echo hi world;\"'
+ def_prefix = None
+ orig_key_file = sys.argv[1]
+ new_key_file = sys.argv[2]
+ if len(sys.argv) > 3:
+ def_prefix = sys.argv[3]
+ fp = open(new_key_file)
+
+ newkeys = []
+ for line in fp.readlines():
+ newkeys.append(AuthKeyEntry(line, def_prefix))
+
+ fp.close()
+ print update_authorized_keys(orig_key_file, newkeys)
+
+ main()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/UserDataHandler.py b/cloudinit/UserDataHandler.py
index 14aea58b..9331aa17 100644
--- a/cloudinit/UserDataHandler.py
+++ b/cloudinit/UserDataHandler.py
@@ -26,34 +26,38 @@ import cloudinit.util as util
import hashlib
import urllib
-starts_with_mappings={
- '#include' : 'text/x-include-url',
- '#include-once' : 'text/x-include-once-url',
- '#!' : 'text/x-shellscript',
- '#cloud-config' : 'text/cloud-config',
- '#upstart-job' : 'text/upstart-job',
- '#part-handler' : 'text/part-handler',
- '#cloud-boothook' : 'text/cloud-boothook',
- '#cloud-config-archive' : 'text/cloud-config-archive',
+
+starts_with_mappings = {
+ '#include': 'text/x-include-url',
+ '#include-once': 'text/x-include-once-url',
+ '#!': 'text/x-shellscript',
+ '#cloud-config': 'text/cloud-config',
+ '#upstart-job': 'text/upstart-job',
+ '#part-handler': 'text/part-handler',
+ '#cloud-boothook': 'text/cloud-boothook',
+ '#cloud-config-archive': 'text/cloud-config-archive',
}
+
# if 'string' is compressed return decompressed otherwise return it
def decomp_str(string):
import StringIO
import gzip
try:
- uncomp = gzip.GzipFile(None,"rb",1,StringIO.StringIO(string)).read()
+ uncomp = gzip.GzipFile(None, "rb", 1, StringIO.StringIO(string)).read()
return(uncomp)
except:
return(string)
+
def do_include(content, appendmsg):
import os
# is just a list of urls, one per line
# also support '#include <url here>'
includeonce = False
for line in content.splitlines():
- if line == "#include": continue
+ if line == "#include":
+ continue
if line == "#include-once":
includeonce = True
continue
@@ -62,10 +66,11 @@ def do_include(content, appendmsg):
includeonce = True
elif line.startswith("#include"):
line = line[len("#include"):].lstrip()
- if line.startswith("#"): continue
+ if line.startswith("#"):
+ continue
# urls cannot not have leading or trailing white space
- msum = hashlib.md5()
+ msum = hashlib.md5() # pylint: disable=E1101
msum.update(line.strip())
includeonce_filename = "%s/urlcache/%s" % (
cloudinit.get_ipath_cur("data"), msum.hexdigest())
@@ -86,14 +91,14 @@ def do_include(content, appendmsg):
def explode_cc_archive(archive, appendmsg):
for ent in yaml.load(archive):
# ent can be one of:
- # dict { 'filename' : 'value' , 'content' : 'value', 'type' : 'value' }
+ # dict { 'filename' : 'value', 'content' : 'value', 'type' : 'value' }
# filename and type not be present
# or
# scalar(payload)
-
+
def_type = "text/cloud-config"
- if isinstance(ent,str):
- ent = { 'content': ent }
+ if isinstance(ent, str):
+ ent = {'content': ent}
content = ent.get('content', '')
mtype = ent.get('type', None)
@@ -116,7 +121,7 @@ def explode_cc_archive(archive, appendmsg):
continue
msg.add_header(header, ent['header'])
- _attach_part(appendmsg,msg)
+ _attach_part(appendmsg, msg)
def multi_part_count(outermsg, newcount=None):
@@ -133,6 +138,7 @@ def multi_part_count(outermsg, newcount=None):
return(int(outermsg.get('Number-Attachments', 0)))
+
def _attach_part(outermsg, part):
"""
Attach an part to an outer message. outermsg must be a MIMEMultipart.
@@ -141,18 +147,20 @@ def _attach_part(outermsg, part):
cur = multi_part_count(outermsg)
if not part.get_filename(None):
part.add_header('Content-Disposition', 'attachment',
- filename = 'part-%03d' % (cur+1))
+ filename='part-%03d' % (cur + 1))
outermsg.attach(part)
- multi_part_count(outermsg, cur+1)
-
+ multi_part_count(outermsg, cur + 1)
+
+
def type_from_startswith(payload, default=None):
# slist is sorted longest first
- slist = sorted(starts_with_mappings.keys(), key=lambda e: 0-len(e))
+ slist = sorted(starts_with_mappings.keys(), key=lambda e: 0 - len(e))
for sstr in slist:
if payload.startswith(sstr):
return(starts_with_mappings[sstr])
return default
+
def process_includes(msg, appendmsg=None):
if appendmsg == None:
appendmsg = MIMEMultipart()
@@ -188,32 +196,36 @@ def process_includes(msg, appendmsg=None):
_attach_part(appendmsg, part)
+
def message_from_string(data, headers=None):
if headers is None:
headers = {}
if "mime-version:" in data[0:4096].lower():
msg = email.message_from_string(data)
- for (key,val) in headers.items():
+ for (key, val) in headers.items():
if key in msg:
- msg.replace_header(key,val)
+ msg.replace_header(key, val)
else:
msg[key] = val
else:
- mtype = headers.get("Content-Type","text/plain")
+ mtype = headers.get("Content-Type", "text/plain")
maintype, subtype = mtype.split("/", 1)
msg = MIMEBase(maintype, subtype, *headers)
msg.set_payload(data)
return(msg)
+
# this is heavily wasteful, reads through userdata string input
def preprocess_userdata(data):
newmsg = MIMEMultipart()
process_includes(message_from_string(decomp_str(data)), newmsg)
return(newmsg.as_string())
-# callback is a function that will be called with (data, content_type, filename, payload)
-def walk_userdata(istr, callback, data = None):
+
+# callback is a function that will be called with (data, content_type,
+# filename, payload)
+def walk_userdata(istr, callback, data=None):
partnum = 0
for part in message_from_string(istr).walk():
# multipart/* are just containers
@@ -230,12 +242,16 @@ def walk_userdata(istr, callback, data = None):
callback(data, ctype, filename, part.get_payload(decode=True))
- partnum = partnum+1
+ partnum = partnum + 1
+
if __name__ == "__main__":
- import sys
- data = decomp_str(file(sys.argv[1]).read())
- newmsg = MIMEMultipart()
- process_includes(message_from_string(data), newmsg)
- print newmsg
- print "#found %s parts" % multi_part_count(newmsg)
+ def main():
+ import sys
+ data = decomp_str(file(sys.argv[1]).read())
+ newmsg = MIMEMultipart()
+ process_includes(message_from_string(data), newmsg)
+ print newmsg
+ print "#found %s parts" % multi_part_count(newmsg)
+
+ main()
diff --git a/cloudinit/__init__.py b/cloudinit/__init__.py
index d74a0f1f..0bf2da02 100644
--- a/cloudinit/__init__.py
+++ b/cloudinit/__init__.py
@@ -26,34 +26,33 @@ seeddir = varlibdir + "/seed"
cfg_env_name = "CLOUD_CFG"
cfg_builtin = """
-log_cfgs: [ ]
-datasource_list: [ "NoCloud", "OVF", "Ec2" ]
+log_cfgs: []
+datasource_list: ["NoCloud", "OVF", "Ec2"]
def_log_file: /var/log/cloud-init.log
syslog_fix_perms: syslog:adm
"""
logger_name = "cloudinit"
pathmap = {
- "handlers" : "/handlers",
- "scripts" : "/scripts",
- "sem" : "/sem",
- "boothooks" : "/boothooks",
- "userdata_raw" : "/user-data.txt",
- "userdata" : "/user-data.txt.i",
- "obj_pkl" : "/obj.pkl",
- "cloud_config" : "/cloud-config.txt",
- "data" : "/data",
- None : "",
+ "handlers": "/handlers",
+ "scripts": "/scripts",
+ "sem": "/sem",
+ "boothooks": "/boothooks",
+ "userdata_raw": "/user-data.txt",
+ "userdata": "/user-data.txt.i",
+ "obj_pkl": "/obj.pkl",
+ "cloud_config": "/cloud-config.txt",
+ "data": "/data",
+ None: "",
}
-per_instance="once-per-instance"
-per_always="always"
-per_once="once"
+per_instance = "once-per-instance"
+per_always = "always"
+per_once = "once"
-parsed_cfgs = { }
+parsed_cfgs = {}
import os
-from configobj import ConfigObj
import cPickle
import sys
@@ -62,32 +61,38 @@ import errno
import pwd
import subprocess
import yaml
-import util
import logging
import logging.config
import StringIO
import glob
import traceback
+import cloudinit.util as util
+
+
class NullHandler(logging.Handler):
- def emit(self,record): pass
+ def emit(self, record):
+ pass
+
log = logging.getLogger(logger_name)
log.addHandler(NullHandler())
+
def logging_set_from_cfg_file(cfg_file=system_config):
- logging_set_from_cfg(util.get_base_cfg(cfg_file,cfg_builtin,parsed_cfgs))
+ logging_set_from_cfg(util.get_base_cfg(cfg_file, cfg_builtin, parsed_cfgs))
+
def logging_set_from_cfg(cfg):
log_cfgs = []
- logcfg=util.get_cfg_option_str(cfg, "log_cfg", False)
+ logcfg = util.get_cfg_option_str(cfg, "log_cfg", False)
if logcfg:
# if there is a 'logcfg' entry in the config, respect
# it, it is the old keyname
- log_cfgs = [ logcfg ]
+ log_cfgs = [logcfg]
elif "log_cfgs" in cfg:
for cfg in cfg['log_cfgs']:
- if isinstance(cfg,list):
+ if isinstance(cfg, list):
log_cfgs.append('\n'.join(cfg))
else:
log_cfgs.append()
@@ -106,39 +111,40 @@ def logging_set_from_cfg(cfg):
raise Exception("no valid logging found\n")
-import DataSource
-import UserDataHandler
+import cloudinit.DataSource as DataSource
+import cloudinit.UserDataHandler as UserDataHandler
+
class CloudInit:
cfg = None
- part_handlers = { }
+ part_handlers = {}
old_conffile = '/etc/ec2-init/ec2-config.cfg'
- ds_deps = [ DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK ]
+ ds_deps = [DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK]
datasource = None
cloud_config_str = ''
datasource_name = ''
- builtin_handlers = [ ]
+ builtin_handlers = []
- def __init__(self, ds_deps = None, sysconfig=system_config):
+ def __init__(self, ds_deps=None, sysconfig=system_config):
self.builtin_handlers = [
- [ 'text/x-shellscript', self.handle_user_script, per_always ],
- [ 'text/cloud-config', self.handle_cloud_config, per_always ],
- [ 'text/upstart-job', self.handle_upstart_job, per_instance ],
- [ 'text/cloud-boothook', self.handle_cloud_boothook, per_always ],
+ ['text/x-shellscript', self.handle_user_script, per_always],
+ ['text/cloud-config', self.handle_cloud_config, per_always],
+ ['text/upstart-job', self.handle_upstart_job, per_instance],
+ ['text/cloud-boothook', self.handle_cloud_boothook, per_always],
]
if ds_deps != None:
self.ds_deps = ds_deps
- self.sysconfig=sysconfig
- self.cfg=self.read_cfg()
+ self.sysconfig = sysconfig
+ self.cfg = self.read_cfg()
def read_cfg(self):
if self.cfg:
return(self.cfg)
try:
- conf = util.get_base_cfg(self.sysconfig,cfg_builtin, parsed_cfgs)
+ conf = util.get_base_cfg(self.sysconfig, cfg_builtin, parsed_cfgs)
except Exception:
conf = get_builtin_cfg()
@@ -147,8 +153,9 @@ class CloudInit:
try:
from configobj import ConfigObj
oldcfg = ConfigObj(self.old_conffile)
- if oldcfg is None: oldcfg = { }
- conf = util.mergedict(conf,oldcfg)
+ if oldcfg is None:
+ oldcfg = {}
+ conf = util.mergedict(conf, oldcfg)
except:
pass
@@ -160,7 +167,7 @@ class CloudInit:
# by using the instance link, if purge_cache was called
# the file wont exist
cache = get_ipath_cur('obj_pkl')
- f=open(cache, "rb")
+ f = open(cache, "rb")
data = cPickle.load(f)
f.close()
self.datasource = data
@@ -175,23 +182,24 @@ class CloudInit:
except OSError as e:
if e.errno != errno.EEXIST:
return False
-
+
try:
- f=open(cache, "wb")
- cPickle.dump(self.datasource,f)
+ f = open(cache, "wb")
+ cPickle.dump(self.datasource, f)
f.close()
- os.chmod(cache,0400)
+ os.chmod(cache, 0400)
except:
raise
-
+
def get_data_source(self):
- if self.datasource is not None: return True
+ if self.datasource is not None:
+ return True
if self.restore_from_cache():
log.debug("restored from cache type %s" % self.datasource)
return True
- cfglist=self.cfg['datasource_list']
+ cfglist = self.cfg['datasource_list']
dslist = list_sources(cfglist, self.ds_deps)
dsnames = [f.__name__ for f in dslist]
@@ -206,7 +214,7 @@ class CloudInit:
log.debug("found data source %s" % ds)
return True
except Exception as e:
- log.warn("get_data of %s raised %s" % (ds,e))
+ log.warn("get_data of %s raised %s" % (ds, e))
util.logexc(log)
msg = "Did not find data source. searched classes: %s" % dsnames
log.debug(msg)
@@ -216,18 +224,19 @@ class CloudInit:
try:
os.unlink(cur_instance_link)
except OSError as e:
- if e.errno != errno.ENOENT: raise
+ if e.errno != errno.ENOENT:
+ raise
iid = self.get_instance_id()
os.symlink("./instances/%s" % iid, cur_instance_link)
idir = self.get_ipath()
dlist = []
- for d in [ "handlers", "scripts", "sem" ]:
+ for d in ["handlers", "scripts", "sem"]:
dlist.append("%s/%s" % (idir, d))
-
+
util.ensure_dirs(dlist)
- ds = "%s: %s\n" % ( self.datasource.__class__, str(self.datasource) )
+ ds = "%s: %s\n" % (self.datasource.__class__, str(self.datasource))
dp = self.get_cpath('data')
util.write_file("%s/%s" % (idir, 'datasource'), ds)
util.write_file("%s/%s" % (dp, 'previous-datasource'), ds)
@@ -252,68 +261,69 @@ class CloudInit:
util.write_file(self.get_ipath('userdata'),
self.datasource.get_userdata(), 0600)
- def sem_getpath(self,name,freq):
+ def sem_getpath(self, name, freq):
if freq == 'once-per-instance':
- return("%s/%s" % (self.get_ipath("sem"),name))
+ return("%s/%s" % (self.get_ipath("sem"), name))
return("%s/%s.%s" % (get_cpath("sem"), name, freq))
-
- def sem_has_run(self,name,freq):
- if freq == per_always: return False
- semfile = self.sem_getpath(name,freq)
+
+ def sem_has_run(self, name, freq):
+ if freq == per_always:
+ return False
+ semfile = self.sem_getpath(name, freq)
if os.path.exists(semfile):
return True
return False
-
- def sem_acquire(self,name,freq):
+
+ def sem_acquire(self, name, freq):
from time import time
- semfile = self.sem_getpath(name,freq)
-
+ semfile = self.sem_getpath(name, freq)
+
try:
os.makedirs(os.path.dirname(semfile))
except OSError as e:
if e.errno != errno.EEXIST:
raise e
-
+
if os.path.exists(semfile) and freq != per_always:
return False
-
+
# race condition
try:
- f = open(semfile,"w")
+ f = open(semfile, "w")
f.write("%s\n" % str(time()))
f.close()
except:
return(False)
return(True)
-
- def sem_clear(self,name,freq):
- semfile = self.sem_getpath(name,freq)
+
+ def sem_clear(self, name, freq):
+ semfile = self.sem_getpath(name, freq)
try:
os.unlink(semfile)
except OSError as e:
if e.errno != errno.ENOENT:
return False
-
+
return True
# acquire lock on 'name' for given 'freq'
# if that does not exist, then call 'func' with given 'args'
# if 'clear_on_fail' is True and func throws an exception
# then remove the lock (so it would run again)
- def sem_and_run(self,semname,freq,func,args=None,clear_on_fail=False):
+ def sem_and_run(self, semname, freq, func, args=None, clear_on_fail=False):
if args is None:
args = []
- if self.sem_has_run(semname,freq):
+ if self.sem_has_run(semname, freq):
log.debug("%s already ran %s", semname, freq)
return False
try:
- if not self.sem_acquire(semname,freq):
+ if not self.sem_acquire(semname, freq):
raise Exception("Failed to acquire lock on %s" % semname)
func(*args)
except:
if clear_on_fail:
- self.sem_clear(semname,freq)
+ self.sem_clear(semname, freq)
raise
return True
@@ -321,8 +331,8 @@ class CloudInit:
# get_ipath : get the instance path for a name in pathmap
# (/var/lib/cloud/instances/<instance>/name)<name>)
def get_ipath(self, name=None):
- return("%s/instances/%s%s"
- % (varlibdir,self.get_instance_id(), pathmap[name]))
+ return("%s/instances/%s%s"
+ % (varlibdir, self.get_instance_id(), pathmap[name]))
def consume_userdata(self, frequency=per_instance):
self.get_userdata()
@@ -333,18 +343,20 @@ class CloudInit:
# add the path to the plugins dir to the top of our list for import
# instance dir should be read before cloud-dir
- sys.path.insert(0,cdir)
- sys.path.insert(0,idir)
+ sys.path.insert(0, cdir)
+ sys.path.insert(0, idir)
- part_handlers = { }
+ part_handlers = {}
# add handlers in cdir
for fname in glob.glob("%s/*.py" % cdir):
- if not os.path.isfile(fname): continue
+ if not os.path.isfile(fname):
+ continue
modname = os.path.basename(fname)[0:-3]
try:
mod = __import__(modname)
handler_register(mod, part_handlers, data, frequency)
- log.debug("added handler for [%s] from %s" % (mod.list_types(), fname))
+ log.debug("added handler for [%s] from %s" % (mod.list_types(),
+ fname))
except:
log.warn("failed to initialize handler in %s" % fname)
util.logexc(log)
@@ -357,44 +369,46 @@ class CloudInit:
part_handlers, data, frequency)
# walk the data
- pdata = { 'handlers': part_handlers, 'handlerdir': idir,
- 'data' : data, 'frequency': frequency }
+ pdata = {'handlers': part_handlers, 'handlerdir': idir,
+ 'data': data, 'frequency': frequency}
UserDataHandler.walk_userdata(self.get_userdata(),
- partwalker_callback, data = pdata)
+ partwalker_callback, data=pdata)
# give callbacks opportunity to finalize
- called = [ ]
+ called = []
for (_mtype, mod) in part_handlers.iteritems():
if mod in called:
continue
handler_call_end(mod, data, frequency)
- def handle_user_script(self,_data,ctype,filename,payload, _frequency):
- if ctype == "__end__": return
+ def handle_user_script(self, _data, ctype, filename, payload, _frequency):
+ if ctype == "__end__":
+ return
if ctype == "__begin__":
# maybe delete existing things here
return
- filename=filename.replace(os.sep,'_')
+ filename = filename.replace(os.sep, '_')
scriptsdir = get_ipath_cur('scripts')
- util.write_file("%s/%s" %
- (scriptsdir,filename), util.dos2unix(payload), 0700)
+ util.write_file("%s/%s" %
+ (scriptsdir, filename), util.dos2unix(payload), 0700)
- def handle_upstart_job(self,_data,ctype,filename,payload, frequency):
+ def handle_upstart_job(self, _data, ctype, filename, payload, frequency):
# upstart jobs are only written on the first boot
if frequency != per_instance:
return
- if ctype == "__end__" or ctype == "__begin__": return
+ if ctype == "__end__" or ctype == "__begin__":
+ return
if not filename.endswith(".conf"):
- filename=filename+".conf"
+ filename = filename + ".conf"
- util.write_file("%s/%s" % ("/etc/init",filename),
+ util.write_file("%s/%s" % ("/etc/init", filename),
util.dos2unix(payload), 0644)
- def handle_cloud_config(self,_data,ctype,filename,payload, _frequency):
+ def handle_cloud_config(self, _data, ctype, filename, payload, _frequency):
if ctype == "__begin__":
- self.cloud_config_str=""
+ self.cloud_config_str = ""
return
if ctype == "__end__":
cloud_config = self.get_ipath("cloud_config")
@@ -405,37 +419,40 @@ class CloudInit:
## as CloudConfig does that also, merging it with this cfg
##
# ccfg = yaml.load(self.cloud_config_str)
- # if ccfg is None: ccfg = { }
+ # if ccfg is None: ccfg = {}
# self.cfg = util.mergedict(ccfg, self.cfg)
return
- self.cloud_config_str+="\n#%s\n%s" % (filename,payload)
+ self.cloud_config_str += "\n#%s\n%s" % (filename, payload)
- def handle_cloud_boothook(self,_data,ctype,filename,payload, _frequency):
- if ctype == "__end__": return
- if ctype == "__begin__": return
+ def handle_cloud_boothook(self, _data, ctype, filename, payload,
+ _frequency):
+ if ctype == "__end__":
+ return
+ if ctype == "__begin__":
+ return
- filename=filename.replace(os.sep,'_')
+ filename = filename.replace(os.sep, '_')
payload = util.dos2unix(payload)
- prefix="#cloud-boothook"
+ prefix = "#cloud-boothook"
start = 0
if payload.startswith(prefix):
start = len(prefix) + 1
-
+
boothooks_dir = self.get_ipath("boothooks")
- filepath = "%s/%s" % (boothooks_dir,filename)
+ filepath = "%s/%s" % (boothooks_dir, filename)
util.write_file(filepath, payload[start:], 0700)
try:
- env=os.environ.copy()
- env['INSTANCE_ID']= self.datasource.get_instance_id()
+ env = os.environ.copy()
+ env['INSTANCE_ID'] = self.datasource.get_instance_id()
subprocess.check_call([filepath], env=env)
except subprocess.CalledProcessError as e:
log.error("boothooks script %s returned %i" %
- (filepath,e.returncode))
+ (filepath, e.returncode))
except Exception as e:
log.error("boothooks unknown exception %s when running %s" %
- (e,filepath))
+ (e, filepath))
def get_public_ssh_keys(self):
return(self.datasource.get_public_ssh_keys())
@@ -449,71 +466,84 @@ class CloudInit:
def get_hostname(self, fqdn=False):
return(self.datasource.get_hostname(fqdn=fqdn))
- def device_name_to_device(self,name):
+ def device_name_to_device(self, name):
return(self.datasource.device_name_to_device(name))
# I really don't know if this should be here or not, but
# I needed it in cc_update_hostname, where that code had a valid 'cloud'
# reference, but did not have a cloudinit handle
# (ie, no cloudinit.get_cpath())
- def get_cpath(self,name=None):
+ def get_cpath(self, name=None):
return(get_cpath(name))
def initfs():
- subds = [ 'scripts/per-instance', 'scripts/per-once', 'scripts/per-boot',
- 'seed', 'instances', 'handlers', 'sem', 'data' ]
- dlist = [ ]
+ subds = ['scripts/per-instance', 'scripts/per-once', 'scripts/per-boot',
+ 'seed', 'instances', 'handlers', 'sem', 'data']
+ dlist = []
for subd in subds:
dlist.append("%s/%s" % (varlibdir, subd))
util.ensure_dirs(dlist)
- cfg = util.get_base_cfg(system_config,cfg_builtin,parsed_cfgs)
+ cfg = util.get_base_cfg(system_config, cfg_builtin, parsed_cfgs)
log_file = util.get_cfg_option_str(cfg, 'def_log_file', None)
perms = util.get_cfg_option_str(cfg, 'syslog_fix_perms', None)
if log_file:
- fp = open(log_file,"ab")
+ fp = open(log_file, "ab")
fp.close()
if log_file and perms:
- (u,g) = perms.split(':',1)
- if u == "-1" or u == "None": u = None
- if g == "-1" or g == "None": g = None
+ (u, g) = perms.split(':', 1)
+ if u == "-1" or u == "None":
+ u = None
+ if g == "-1" or g == "None":
+ g = None
util.chownbyname(log_file, u, g)
+
def purge_cache(rmcur=True):
- rmlist = [ boot_finished ]
- if rmcur: rmlist.append(cur_instance_link)
+ rmlist = [boot_finished]
+ if rmcur:
+ rmlist.append(cur_instance_link)
for f in rmlist:
try:
os.unlink(f)
except OSError as e:
- if e.errno == errno.ENOENT: continue
+ if e.errno == errno.ENOENT:
+ continue
return(False)
except:
return(False)
return(True)
+
# get_ipath_cur: get the current instance path for an item
def get_ipath_cur(name=None):
return("%s/%s%s" % (varlibdir, "instance", pathmap[name]))
+
# get_cpath : get the "clouddir" (/var/lib/cloud/<name>)
# for a name in dirmap
def get_cpath(name=None):
return("%s%s" % (varlibdir, pathmap[name]))
+
def get_base_cfg(cfg_path=None):
- if cfg_path is None: cfg_path = system_config
- return(util.get_base_cfg(cfg_path,cfg_builtin,parsed_cfgs))
+ if cfg_path is None:
+ cfg_path = system_config
+ return(util.get_base_cfg(cfg_path, cfg_builtin, parsed_cfgs))
+
def get_builtin_cfg():
return(yaml.load(cfg_builtin))
+
class DataSourceNotFoundException(Exception):
pass
+
def list_sources(cfg_list, depends):
- return(DataSource.list_sources(cfg_list,depends, ["cloudinit", "" ]))
+ return(DataSource.list_sources(cfg_list, depends, ["cloudinit", ""]))
+
def handler_register(mod, part_handlers, data, frequency=per_instance):
if not hasattr(mod, "handler_version"):
@@ -525,27 +555,30 @@ def handler_register(mod, part_handlers, data, frequency=per_instance):
handler_call_begin(mod, data, frequency)
return(mod)
+
def handler_call_begin(mod, data, frequency):
handler_handle_part(mod, data, "__begin__", None, None, frequency)
+
def handler_call_end(mod, data, frequency):
handler_handle_part(mod, data, "__end__", None, None, frequency)
+
def handler_handle_part(mod, data, ctype, filename, payload, frequency):
# only add the handler if the module should run
modfreq = getattr(mod, "frequency", per_instance)
- if not ( modfreq == per_always or
- ( frequency == per_instance and modfreq == per_instance)):
+ if not (modfreq == per_always or
+ (frequency == per_instance and modfreq == per_instance)):
return
if mod.handler_version == 1:
mod.handle_part(data, ctype, filename, payload)
else:
mod.handle_part(data, ctype, filename, payload, frequency)
-def partwalker_handle_handler(pdata, _ctype, _filename, payload):
+def partwalker_handle_handler(pdata, _ctype, _filename, payload):
curcount = pdata['handlercount']
- modname = 'part-handler-%03d' % curcount
+ modname = 'part-handler-%03d' % curcount
frequency = pdata['frequency']
modfname = modname + ".py"
@@ -561,6 +594,7 @@ def partwalker_handle_handler(pdata, _ctype, _filename, payload):
traceback.print_exc(file=sys.stderr)
return
+
def partwalker_callback(pdata, ctype, filename, payload):
# data here is the part_handlers array and then the data to pass through
if ctype == "text/part-handler":
@@ -573,18 +607,20 @@ def partwalker_callback(pdata, ctype, filename, payload):
handler_handle_part(pdata['handlers'][ctype], pdata['data'],
ctype, filename, payload, pdata['frequency'])
+
class InternalPartHandler:
freq = per_instance
- mtypes = [ ]
+ mtypes = []
handler_version = 1
handler = None
- def __init__(self, handler, mtypes, frequency, version = 2):
+
+ def __init__(self, handler, mtypes, frequency, version=2):
self.handler = handler
self.mtypes = mtypes
self.frequency = frequency
self.handler_version = version
- def __repr__():
+ def __repr__(self):
return("InternalPartHandler: [%s]" % self.mtypes)
def list_types(self):
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index 9ca72b77..40bfa7ea 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -1,15 +1,16 @@
import subprocess
+
def netdev_info():
- fields = ( "hwaddr", "addr", "bcast", "mask" )
- ifcfg_out = subprocess.check_output(["ifconfig", "-a"])
- devs = { }
+ fields = ("hwaddr", "addr", "bcast", "mask")
+ ifcfg_out = str(subprocess.check_output(["ifconfig", "-a"]))
+ devs = {}
for line in ifcfg_out.splitlines():
if len(line) == 0:
continue
if line[0] not in ("\t", " "):
curdev = line.split()[0]
- devs[curdev] = { "up": False }
+ devs[curdev] = {"up": False}
for field in fields:
devs[curdev][field] = ""
toks = line.lower().strip().split()
@@ -23,25 +24,26 @@ def netdev_info():
for i in range(len(toks)):
if toks[i] == "hwaddr":
try:
- devs[curdev]["hwaddr"] = toks[i+1]
+ devs[curdev]["hwaddr"] = toks[i + 1]
except IndexError:
pass
for field in ("addr", "bcast", "mask"):
target = "%s%s" % (field, fieldpost)
- if devs[curdev].get(target,""):
+ if devs[curdev].get(target, ""):
continue
if toks[i] == "%s:" % field:
try:
- devs[curdev][target] = toks[i+1]
+ devs[curdev][target] = toks[i + 1]
except IndexError:
pass
elif toks[i].startswith("%s:" % field):
- devs[curdev][target] = toks[i][len(field)+1:]
+ devs[curdev][target] = toks[i][len(field) + 1:]
return(devs)
+
def route_info():
- route_out = subprocess.check_output(["route", "-n"])
- routes = [ ]
+ route_out = str(subprocess.check_output(["route", "-n"]))
+ routes = []
for line in route_out.splitlines()[1:]:
if not line:
continue
@@ -51,14 +53,16 @@ def route_info():
routes.append(toks)
return(routes)
+
def getgateway():
for r in route_info():
if r[3].find("G") >= 0:
- return("%s[%s]" % (r[1],r[7]))
+ return("%s[%s]" % (r[1], r[7]))
return(None)
+
def debug_info(pre="ci-info: "):
- lines = [ ]
+ lines = []
try:
netdev = netdev_info()
except Exception:
@@ -66,7 +70,7 @@ def debug_info(pre="ci-info: "):
netdev = {}
for (dev, d) in netdev.iteritems():
lines.append("%s%-6s: %i %-15s %-15s %s" %
- (pre, dev, d["up"], d["addr"],d["mask"], d["hwaddr"]))
+ (pre, dev, d["up"], d["addr"], d["mask"], d["hwaddr"]))
try:
routes = route_info()
except Exception:
@@ -76,8 +80,9 @@ def debug_info(pre="ci-info: "):
for r in routes:
lines.append("%sroute-%d: %-15s %-15s %-15s %-6s %s" %
(pre, n, r[0], r[1], r[2], r[7], r[3]))
- n = n+1
+ n = n + 1
return('\n'.join(lines))
+
if __name__ == '__main__':
print debug_info()
diff --git a/cloudinit/util.py b/cloudinit/util.py
index ba9becda..e4337e3a 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -38,20 +38,22 @@ try:
except ImportError:
HAVE_LIBSELINUX = False
+
def read_conf(fname):
try:
- stream = open(fname,"r")
+ stream = open(fname, "r")
conf = yaml.load(stream)
stream.close()
return conf
except IOError as e:
if e.errno == errno.ENOENT:
- return { }
+ return {}
raise
-def get_base_cfg(cfgfile,cfg_builtin="", parsed_cfgs=None):
- kerncfg = { }
- syscfg = { }
+
+def get_base_cfg(cfgfile, cfg_builtin="", parsed_cfgs=None):
+ kerncfg = {}
+ syscfg = {}
if parsed_cfgs and cfgfile in parsed_cfgs:
return(parsed_cfgs[cfgfile])
@@ -66,7 +68,7 @@ def get_base_cfg(cfgfile,cfg_builtin="", parsed_cfgs=None):
if cfg_builtin:
builtin = yaml.load(cfg_builtin)
- fin = mergedict(combined,builtin)
+ fin = mergedict(combined, builtin)
else:
fin = combined
@@ -74,18 +76,24 @@ def get_base_cfg(cfgfile,cfg_builtin="", parsed_cfgs=None):
parsed_cfgs[cfgfile] = fin
return(fin)
+
def get_cfg_option_bool(yobj, key, default=False):
- if not yobj.has_key(key): return default
+ if key not in yobj:
+ return default
val = yobj[key]
- if val is True: return True
- if str(val).lower() in [ 'true', '1', 'on', 'yes']:
+ if val is True:
+ return True
+ if str(val).lower() in ['true', '1', 'on', 'yes']:
return True
return False
+
def get_cfg_option_str(yobj, key, default=None):
- if not yobj.has_key(key): return default
+ if key not in yobj:
+ return default
return yobj[key]
+
def get_cfg_option_list_or_str(yobj, key, default=None):
"""
Gets the C{key} config option from C{yobj} as a list of strings. If the
@@ -106,15 +114,18 @@ def get_cfg_option_list_or_str(yobj, key, default=None):
return yobj[key]
return [yobj[key]]
+
# get a cfg entry by its path array
# for f['a']['b']: get_cfg_by_path(mycfg,('a','b'))
-def get_cfg_by_path(yobj,keyp,default=None):
+def get_cfg_by_path(yobj, keyp, default=None):
cur = yobj
for tok in keyp:
- if tok not in cur: return(default)
+ if tok not in cur:
+ return(default)
cur = cur[tok]
return(cur)
+
def mergedict(src, cand):
"""
Merge values from C{cand} into C{src}. If C{src} has a key C{cand} will
@@ -128,6 +139,7 @@ def mergedict(src, cand):
src[k] = mergedict(src[k], v)
return src
+
def delete_dir_contents(dirname):
"""
Deletes all contents of a directory without deleting the directory itself.
@@ -141,6 +153,7 @@ def delete_dir_contents(dirname):
else:
os.unlink(node_fullpath)
+
def write_file(filename, content, mode=0644, omode="wb"):
"""
Writes a file with the given content and sets the file mode as specified.
@@ -164,13 +177,15 @@ def write_file(filename, content, mode=0644, omode="wb"):
f.close()
restorecon_if_possible(filename)
+
def restorecon_if_possible(path, recursive=False):
if HAVE_LIBSELINUX and selinux.is_selinux_enabled():
selinux.restorecon(path, recursive=recursive)
+
# get keyid from keyserver
-def getkeybyid(keyid,keyserver):
- shcmd="""
+def getkeybyid(keyid, keyserver):
+ shcmd = """
k=${1} ks=${2};
exec 2>/dev/null
[ -n "$k" ] || exit 1;
@@ -182,36 +197,43 @@ def getkeybyid(keyid,keyserver):
fi
[ -n "${armour}" ] && echo "${armour}"
"""
- args=['sh', '-c', shcmd, "export-gpg-keyid", keyid, keyserver]
+ args = ['sh', '-c', shcmd, "export-gpg-keyid", keyid, keyserver]
return(subp(args)[0])
+
def runparts(dirp, skip_no_exist=True):
- if skip_no_exist and not os.path.isdir(dirp): return
-
+ if skip_no_exist and not os.path.isdir(dirp):
+ return
+
# per bug 857926, Fedora's run-parts will exit failure on empty dir
- if os.path.isdir(dirp) and os.listdir(dirp) == []: return
+ if os.path.isdir(dirp) and os.listdir(dirp) == []:
+ return
- cmd = [ 'run-parts', '--regex', '.*', dirp ]
+ cmd = ['run-parts', '--regex', '.*', dirp]
sp = subprocess.Popen(cmd)
sp.communicate()
if sp.returncode is not 0:
- raise subprocess.CalledProcessError(sp.returncode,cmd)
+ raise subprocess.CalledProcessError(sp.returncode, cmd)
return
+
def subp(args, input_=None):
sp = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
- out,err = sp.communicate(input_)
+ out, err = sp.communicate(input_)
if sp.returncode is not 0:
- raise subprocess.CalledProcessError(sp.returncode,args, (out,err))
- return(out,err)
+ raise subprocess.CalledProcessError(sp.returncode, args, (out, err))
+ return(out, err)
+
def render_to_file(template, outfile, searchList):
- t = Template(file='/etc/cloud/templates/%s.tmpl' % template, searchList=[searchList])
+ t = Template(file='/etc/cloud/templates/%s.tmpl' % template,
+ searchList=[searchList])
f = open(outfile, 'w')
f.write(t.respond())
f.close()
+
def render_string(template, searchList):
return(Template(template, searchList=[searchList]).respond())
@@ -220,22 +242,22 @@ def render_string(template, searchList):
# returns boolean indicating success or failure (presense of files)
# if files are present, populates 'fill' dictionary with 'user-data' and
# 'meta-data' entries
-def read_optional_seed(fill,base="",ext="", timeout=5):
+def read_optional_seed(fill, base="", ext="", timeout=5):
try:
- (md,ud) = read_seeded(base,ext,timeout)
- fill['user-data']= ud
- fill['meta-data']= md
+ (md, ud) = read_seeded(base, ext, timeout)
+ fill['user-data'] = ud
+ fill['meta-data'] = md
return True
except OSError, e:
if e.errno == errno.ENOENT:
return False
raise
-
+
# raise OSError with enoent if not found
def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
if base.startswith("/"):
- base="file://%s" % base
+ base = "file://%s" % base
# default retries for file is 0. for network is 10
if base.startswith("file://"):
@@ -248,20 +270,22 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
ud_url = "%s%s%s" % (base, "user-data", ext)
md_url = "%s%s%s" % (base, "meta-data", ext)
- raise_err = None
- for attempt in range(0,retries+1):
+ no_exc = object()
+ raise_err = no_exc
+ for attempt in range(0, retries + 1):
try:
md_str = readurl(md_url, timeout=timeout)
ud = readurl(ud_url, timeout=timeout)
md = yaml.load(md_str)
-
- return(md,ud)
+
+ return(md, ud)
except urllib2.HTTPError as e:
raise_err = e
except urllib2.URLError as e:
raise_err = e
- if isinstance(e.reason,OSError) and e.reason.errno == errno.ENOENT:
- raise_err = e.reason
+ if (isinstance(e.reason, OSError) and
+ e.reason.errno == errno.ENOENT):
+ raise_err = e.reason
if attempt == retries:
break
@@ -271,13 +295,16 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
raise(raise_err)
-def logexc(log,lvl=logging.DEBUG):
- log.log(lvl,traceback.format_exc())
+
+def logexc(log, lvl=logging.DEBUG):
+ log.log(lvl, traceback.format_exc())
+
class RecursiveInclude(Exception):
pass
-def read_file_with_includes(fname, rel = ".", stack=None, patt = None):
+
+def read_file_with_includes(fname, rel=".", stack=None, patt=None):
if stack is None:
stack = []
if not fname.startswith("/"):
@@ -292,7 +319,7 @@ def read_file_with_includes(fname, rel = ".", stack=None, patt = None):
(fname, len(stack))))
if patt == None:
- patt = re.compile("^#(opt_include|include)[ \t].*$",re.MULTILINE)
+ patt = re.compile("^#(opt_include|include)[ \t].*$", re.MULTILINE)
try:
fp = open(fname)
@@ -307,11 +334,12 @@ def read_file_with_includes(fname, rel = ".", stack=None, patt = None):
cur = 0
while True:
match = patt.search(contents[cur:])
- if not match: break
+ if not match:
+ break
loc = match.start() + cur
endl = match.end() + cur
- (key, cur_fname) = contents[loc:endl].split(None,2)
+ (key, cur_fname) = contents[loc:endl].split(None, 2)
cur_fname = cur_fname.strip()
try:
@@ -321,43 +349,47 @@ def read_file_with_includes(fname, rel = ".", stack=None, patt = None):
inc_contents = ""
else:
raise
- contents = contents[0:loc] + inc_contents + contents[endl+1:]
+ contents = contents[0:loc] + inc_contents + contents[endl + 1:]
cur = loc + len(inc_contents)
stack.pop()
return(contents)
+
def read_conf_d(confd):
# get reverse sorted list (later trumps newer)
- confs = sorted(os.listdir(confd),reverse=True)
-
+ confs = sorted(os.listdir(confd), reverse=True)
+
# remove anything not ending in '.cfg'
confs = [f for f in confs if f.endswith(".cfg")]
# remove anything not a file
- confs = [f for f in confs if os.path.isfile("%s/%s" % (confd,f))]
+ confs = [f for f in confs if os.path.isfile("%s/%s" % (confd, f))]
- cfg = { }
+ cfg = {}
for conf in confs:
- cfg = mergedict(cfg,read_conf("%s/%s" % (confd,conf)))
+ cfg = mergedict(cfg, read_conf("%s/%s" % (confd, conf)))
return(cfg)
+
def read_conf_with_confd(cfgfile):
cfg = read_conf(cfgfile)
confd = False
if "conf_d" in cfg:
if cfg['conf_d'] is not None:
confd = cfg['conf_d']
- if not isinstance(confd,str):
- raise Exception("cfgfile %s contains 'conf_d' with non-string" % cfgfile)
+ if not isinstance(confd, str):
+ raise Exception("cfgfile %s contains 'conf_d' "
+ "with non-string" % cfgfile)
elif os.path.isdir("%s.d" % cfgfile):
confd = "%s.d" % cfgfile
- if not confd: return(cfg)
+ if not confd:
+ return(cfg)
confd_cfg = read_conf_d(confd)
- return(mergedict(confd_cfg,cfg))
+ return(mergedict(confd_cfg, cfg))
def get_cmdline():
@@ -371,7 +403,8 @@ def get_cmdline():
except:
cmdline = ""
return(cmdline)
-
+
+
def read_cc_from_cmdline(cmdline=None):
# this should support reading cloud-config information from
# the kernel command line. It is intended to support content of the
@@ -384,23 +417,25 @@ def read_cc_from_cmdline(cmdline=None):
if cmdline is None:
cmdline = get_cmdline()
- tag_begin="cc:"
- tag_end="end_cc"
+ tag_begin = "cc:"
+ tag_end = "end_cc"
begin_l = len(tag_begin)
end_l = len(tag_end)
clen = len(cmdline)
- tokens = [ ]
+ tokens = []
begin = cmdline.find(tag_begin)
while begin >= 0:
end = cmdline.find(tag_end, begin + begin_l)
if end < 0:
end = clen
- tokens.append(cmdline[begin+begin_l:end].lstrip().replace("\\n","\n"))
-
+ tokens.append(cmdline[begin + begin_l:end].lstrip().replace("\\n",
+ "\n"))
+
begin = cmdline.find(tag_begin, end + end_l)
return('\n'.join(tokens))
+
def ensure_dirs(dirlist, mode=0755):
fixmodes = []
for d in dirlist:
@@ -410,16 +445,20 @@ def ensure_dirs(dirlist, mode=0755):
else:
os.makedirs(d, mode)
except OSError as e:
- if e.errno != errno.EEXIST: raise
- if mode != None: fixmodes.append(d)
+ if e.errno != errno.EEXIST:
+ raise
+ if mode != None:
+ fixmodes.append(d)
for d in fixmodes:
os.chmod(d, mode)
-def chownbyname(fname,user=None,group=None):
+
+def chownbyname(fname, user=None, group=None):
uid = -1
gid = -1
- if user == None and group == None: return
+ if user == None and group == None:
+ return
if user:
import pwd
uid = pwd.getpwnam(user).pw_uid
@@ -427,10 +466,11 @@ def chownbyname(fname,user=None,group=None):
import grp
gid = grp.getgrnam(group).gr_gid
- os.chown(fname,uid,gid)
+ os.chown(fname, uid, gid)
+
def readurl(url, data=None, timeout=None):
- openargs = { }
+ openargs = {}
if timeout != None:
openargs['timeout'] = timeout
@@ -443,44 +483,48 @@ def readurl(url, data=None, timeout=None):
response = urllib2.urlopen(req, **openargs)
return(response.read())
+
# shellify, takes a list of commands
# for each entry in the list
# if it is an array, shell protect it (with single ticks)
# if it is a string, do nothing
def shellify(cmdlist):
- content="#!/bin/sh\n"
- escaped="%s%s%s%s" % ( "'", '\\', "'", "'" )
+ content = "#!/bin/sh\n"
+ escaped = "%s%s%s%s" % ("'", '\\', "'", "'")
for args in cmdlist:
# if the item is a list, wrap all items in single tick
# if its not, then just write it directly
- if isinstance(args,list):
- fixed = [ ]
+ if isinstance(args, list):
+ fixed = []
for f in args:
- fixed.append("'%s'" % str(f).replace("'",escaped))
- content="%s%s\n" % ( content, ' '.join(fixed) )
+ fixed.append("'%s'" % str(f).replace("'", escaped))
+ content = "%s%s\n" % (content, ' '.join(fixed))
else:
- content="%s%s\n" % ( content, str(args) )
+ content = "%s%s\n" % (content, str(args))
return content
+
def dos2unix(string):
# find first end of line
pos = string.find('\n')
- if pos <= 0 or string[pos-1] != '\r': return(string)
- return(string.replace('\r\n','\n'))
+ if pos <= 0 or string[pos - 1] != '\r':
+ return(string)
+ return(string.replace('\r\n', '\n'))
+
def islxc():
# is this host running lxc?
try:
with open("/proc/1/cgroup") as f:
- if f.read() == "/":
+ if f.read() == "/":
return True
except IOError as e:
if e.errno != errno.ENOENT:
raise
try:
- # try to run a program named 'lxc-is-container'. if it returns true, then
- # we're inside a container. otherwise, no
+ # try to run a program named 'lxc-is-container'. if it returns true,
+ # then we're inside a container. otherwise, no
sp = subprocess.Popen(['lxc-is-container'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
sp.communicate(None)
@@ -491,13 +535,14 @@ def islxc():
return False
+
def get_hostname_fqdn(cfg, cloud):
# return the hostname and fqdn from 'cfg'. If not found in cfg,
# then fall back to data from cloud
if "fqdn" in cfg:
# user specified a fqdn. Default hostname then is based off that
fqdn = cfg['fqdn']
- hostname = get_cfg_option_str(cfg,"hostname",fqdn.split('.')[0])
+ hostname = get_cfg_option_str(cfg, "hostname", fqdn.split('.')[0])
else:
if "hostname" in cfg and cfg['hostname'].find('.') > 0:
# user specified hostname, and it had '.' in it
@@ -505,7 +550,7 @@ def get_hostname_fqdn(cfg, cloud):
fqdn = cfg['hostname']
hostname = cfg['hostname'][:fqdn.find('.')]
else:
- # no fqdn set, get fqdn from cloud.
+ # no fqdn set, get fqdn from cloud.
# get hostname from cfg if available otherwise cloud
fqdn = cloud.get_hostname(fqdn=True)
if "hostname" in cfg:
@@ -514,9 +559,10 @@ def get_hostname_fqdn(cfg, cloud):
hostname = cloud.get_hostname()
return(hostname, fqdn)
+
def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
# this parses /etc/hosts to get a fqdn. It should return the same
- # result as 'hostname -f <hostname>' if /etc/hosts.conf
+ # result as 'hostname -f <hostname>' if /etc/hosts.conf
# did not have did not have 'bind' in the order attribute
fqdn = None
try:
@@ -542,6 +588,7 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
return fqdn
+
def is_resolvable(name):
""" determine if a url is resolvable, return a boolean """
try:
@@ -550,10 +597,12 @@ def is_resolvable(name):
except socket.gaierror:
return False
+
def is_resolvable_url(url):
""" determine if this url is resolvable (existing or ip) """
return(is_resolvable(urlparse.urlparse(url).hostname))
+
def search_for_mirror(candidates):
""" Search through a list of mirror urls for one that works """
for cand in candidates:
@@ -565,6 +614,7 @@ def search_for_mirror(candidates):
return None
+
def close_stdin():
"""
reopen stdin as /dev/null so even subprocesses or other os level things get
diff --git a/tests/unittests/test_handler_ca_certs.py b/tests/unittests/test_handler_ca_certs.py
index c289a4f6..d6513b5b 100644
--- a/tests/unittests/test_handler_ca_certs.py
+++ b/tests/unittests/test_handler_ca_certs.py
@@ -3,6 +3,7 @@ from mocker import MockerTestCase
from cloudinit.util import write_file, delete_dir_contents
from cloudinit.CloudConfig.cc_ca_certs import (
handle, update_ca_certs, add_ca_certs, remove_default_ca_certs)
+from logging import getLogger
class TestNoConfig(MockerTestCase):
@@ -10,7 +11,7 @@ class TestNoConfig(MockerTestCase):
super(TestNoConfig, self).setUp()
self.name = "ca-certs"
self.cloud_init = None
- self.log = None
+ self.log = getLogger("TestNoConfig")
self.args = []
def test_no_config(self):
@@ -31,7 +32,7 @@ class TestConfig(MockerTestCase):
super(TestConfig, self).setUp()
self.name = "ca-certs"
self.cloud_init = None
- self.log = None
+ self.log = getLogger("TestNoConfig")
self.args = []
# Mock out the functions that actually modify the system
diff --git a/tools/run-pylint b/tools/run-pylint
new file mode 100755
index 00000000..e271c3d5
--- /dev/null
+++ b/tools/run-pylint
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+def_files='cloud*.py cloudinit/*.py cloudinit/CloudConfig/*.py'
+
+if [ $# -eq 0 ]; then
+ files=( )
+ for f in $def_files; do
+ [ -f "$f" ] || { echo "failed, $f not a file" 1>&2; exit 1; }
+ files[${#files[@]}]=${f}
+ done
+else
+ files=( "$@" );
+fi
+
+cmd=(
+ pylint
+ --reports=n
+ --include-ids=y
+ --max-line-length=79
+
+ --disable=R
+ --disable=I
+
+ --disable=W0142 # Used * or ** magic
+ --disable=W0511 # TODO/FIXME note
+ --disable=W0702 # No exception type(s) specified
+ --disable=W0703 # Catch "Exception"
+
+ --disable=C0103 # Invalid name
+ --disable=C0111 # Missing docstring
+
+ "${files[@]}"
+)
+
+echo -e "\nRunning pylint:"
+echo "${cmd[@]}"
+"${cmd[@]}"
+
+cmd=(
+ pep8
+
+ --ignore=E501 # Line too long (these are caught by pylint above)
+
+ "${files[@]}"
+)
+
+echo -e "\nRunning pep8:"
+echo "${cmd[@]}"
+"${cmd[@]}"