diff options
43 files changed, 553 insertions, 398 deletions
diff --git a/cloud-init-cfg.py b/cloud-init-cfg.py index 37fe6398..def9583a 100755 --- a/cloud-init-cfg.py +++ b/cloud-init-cfg.py @@ -24,9 +24,11 @@ import cloudinit.CloudConfig as CC import logging import os -def Usage(out = sys.stdout): + +def Usage(out=sys.stdout): out.write("Usage: %s name\n" % sys.argv[0]) - + + def main(): # expect to be called with # name [ freq [ args ] @@ -38,7 +40,7 @@ def main(): util.close_stdin() modename = "config" - + if len(sys.argv) < 2: Usage(sys.stderr) sys.exit(1) @@ -59,10 +61,10 @@ def main(): cfg_path = cloudinit.get_ipath_cur("cloud_config") cfg_env_name = cloudinit.cfg_env_name - if os.environ.has_key(cfg_env_name): + if cfg_env_name in os.environ: cfg_path = os.environ[cfg_env_name] - cloud = cloudinit.CloudInit(ds_deps=[]) # ds_deps=[], get only cached + cloud = cloudinit.CloudInit(ds_deps=[]) # ds_deps=[], get only cached try: cloud.get_data_source() except cloudinit.DataSourceNotFoundException as e: @@ -81,7 +83,7 @@ def main(): log = logging.getLogger() log.info("cloud-init-cfg %s" % sys.argv[1:]) - module_list = [ ] + module_list = [] if name == "all": modlist_cfg_name = "cloud_%s_modules" % modename module_list = CC.read_cc_modules(cc.cfg, modlist_cfg_name) @@ -89,18 +91,20 @@ def main(): err("no modules to run in cloud_config [%s]" % modename, log) sys.exit(0) else: - module_list.append( [ name, freq ] + run_args ) + module_list.append([name, freq] + run_args) failures = CC.run_cc_modules(cc, module_list, log) if len(failures): err("errors running cloud_config [%s]: %s" % (modename, failures), log) sys.exit(len(failures)) + def err(msg, log=None): if log: log.error(msg) sys.stderr.write(msg + "\n") + def fail(msg, log=None): err(msg, log) sys.exit(1) diff --git a/cloud-init-query.py b/cloud-init-query.py index a066cb73..71987174 100755 --- a/cloud-init-query.py +++ b/cloud-init-query.py @@ -21,20 +21,23 @@ import sys import cloudinit import cloudinit.CloudConfig -def Usage(out = sys.stdout): + +def Usage(out=sys.stdout): out.write("Usage: %s name\n" % sys.argv[0]) - + + def main(): # expect to be called with name of item to fetch if len(sys.argv) != 2: Usage(sys.stderr) sys.exit(1) - cc = cloudinit.CloudConfig.CloudConfig(cloudinit.cloud_config) + cfg_path = cloudinit.get_ipath_cur("cloud_config") + cc = cloudinit.CloudConfig.CloudConfig(cfg_path) data = { - 'user_data' : cc.cloud.get_userdata(), - 'user_data_raw' : cc.cloud.get_userdata_raw(), - 'instance_id' : cc.cloud.get_instance_id(), + 'user_data': cc.cloud.get_userdata(), + 'user_data_raw': cc.cloud.get_userdata_raw(), + 'instance_id': cc.cloud.get_instance_id(), } name = sys.argv[1].replace('-', '_') diff --git a/cloud-init.py b/cloud-init.py index 9806c22c..f9f71949 100755 --- a/cloud-init.py +++ b/cloud-init.py @@ -30,15 +30,17 @@ import logging import errno import os + def warn(wstr): sys.stderr.write("WARN:%s" % wstr) + def main(): util.close_stdin() - cmds = ( "start", "start-local" ) - deps = { "start" : ( ds.DEP_FILESYSTEM, ds.DEP_NETWORK ), - "start-local" : ( ds.DEP_FILESYSTEM, ) } + cmds = ("start", "start-local") + deps = {"start": (ds.DEP_FILESYSTEM, ds.DEP_NETWORK), + "start-local": (ds.DEP_FILESYSTEM, )} cmd = "" if len(sys.argv) > 1: @@ -92,7 +94,7 @@ def main(): if cmd == "start": print netinfo.debug_info() - stop_files = ( cloudinit.get_ipath_cur("obj_pkl"), nonet_path ) + stop_files = (cloudinit.get_ipath_cur("obj_pkl"), nonet_path) # if starting as the network start, there are cases # where everything is already done for us, and it makes # most sense to exit early and silently @@ -102,7 +104,7 @@ def main(): fp.close() except: continue - + log.debug("no need for cloud-init start to run (%s)\n", f) sys.exit(0) elif cmd == "start-local": @@ -172,10 +174,10 @@ def main(): cc_path = cloudinit.get_ipath_cur('cloud_config') cc_ready = cc.cfg.get("cc_ready_cmd", ['initctl', 'emit', 'cloud-config', - '%s=%s' % (cloudinit.cfg_env_name, cc_path) ]) + '%s=%s' % (cloudinit.cfg_env_name, cc_path)]) if cc_ready: if isinstance(cc_ready, str): - cc_ready = [ 'sh', '-c', cc_ready] + cc_ready = ['sh', '-c', cc_ready] subprocess.Popen(cc_ready).communicate() module_list = CC.read_cc_modules(cc.cfg, "cloud_init_modules") diff --git a/cloudinit/CloudConfig/__init__.py b/cloudinit/CloudConfig/__init__.py index 76cafebd..c9acfbf9 100644 --- a/cloudinit/CloudConfig/__init__.py +++ b/cloudinit/CloudConfig/__init__.py @@ -29,6 +29,7 @@ per_instance = cloudinit.per_instance per_always = cloudinit.per_always per_once = cloudinit.per_once + class CloudConfig(): cfgfile = None cfg = None @@ -51,12 +52,12 @@ class CloudConfig(): cloudinit.log.debug(traceback.format_exc() + "\n") cfg = None if cfg is None: - cfg = { } + cfg = {} try: ds_cfg = self.cloud.datasource.get_config_obj() except: - ds_cfg = { } + ds_cfg = {} cfg = util.mergedict(cfg, ds_cfg) return(util.mergedict(cfg, self.cloud.cfg)) @@ -71,10 +72,11 @@ class CloudConfig(): freq = def_freq self.cloud.sem_and_run("config-" + name, freq, handler, - [ name, self.cfg, self.cloud, cloudinit.log, args ]) + [name, self.cfg, self.cloud, cloudinit.log, args]) except: raise + # reads a cloudconfig module list, returns # a 2 dimensional array suitable to pass to run_cc_modules def read_cc_modules(cfg, name): @@ -93,13 +95,14 @@ def read_cc_modules(cfg, name): else: raise TypeError("failed to read '%s' item in config") return(module_list) - + + def run_cc_modules(cc, module_list, log): failures = [] for cfg_mod in module_list: name = cfg_mod[0] freq = None - run_args = [ ] + run_args = [] if len(cfg_mod) > 1: freq = cfg_mod[1] if len(cfg_mod) > 2: @@ -107,7 +110,7 @@ def run_cc_modules(cc, module_list, log): try: log.debug("handling %s with freq=%s and args=%s" % - (name, freq, run_args )) + (name, freq, run_args)) cc.handle(name, run_args, freq=freq) except: log.warn(traceback.format_exc()) @@ -117,7 +120,8 @@ def run_cc_modules(cc, module_list, log): return(failures) -# always returns well formated values + +# always returns well formated values # cfg is expected to have an entry 'output' in it, which is a dictionary # that includes entries for 'init', 'config', 'final' or 'all' # init: /var/log/cloud.out @@ -128,7 +132,7 @@ def run_cc_modules(cc, module_list, log): # this returns the specific 'mode' entry, cleanly formatted, with value # None if if none is given def get_output_cfg(cfg, mode="init"): - ret = [ None, None ] + ret = [None, None] if not 'output' in cfg: return ret @@ -144,7 +148,7 @@ def get_output_cfg(cfg, mode="init"): # if value is a string, it specifies stdout and stderr if isinstance(modecfg, str): - ret = [ modecfg, modecfg ] + ret = [modecfg, modecfg] # if its a list, then we expect (stdout, stderr) if isinstance(modecfg, list): @@ -166,7 +170,7 @@ def get_output_cfg(cfg, mode="init"): if ret[1] == "&1": ret[1] = ret[0] - swlist = [ ">>", ">", "|" ] + swlist = [">>", ">", "|"] for i in range(len(ret)): if not ret[i]: continue @@ -179,12 +183,12 @@ def get_output_cfg(cfg, mode="init"): break if not found: # default behavior is append - val = "%s %s" % ( ">>", val.strip()) + val = "%s %s" % (">>", val.strip()) ret[i] = val return(ret) - + # redirect_output(outfmt, errfmt, orig_out, orig_err) # replace orig_out and orig_err with filehandles specified in outfmt or errfmt # fmt can be: @@ -193,7 +197,7 @@ def get_output_cfg(cfg, mode="init"): # | program [ arg1 [ arg2 [ ... ] ] ] # # with a '|', arguments are passed to shell, so one level of -# shell escape is required. +# shell escape is required. def redirect_output(outfmt, errfmt, o_out=sys.stdout, o_err=sys.stderr): if outfmt: (mode, arg) = outfmt.split(" ", 1) @@ -231,6 +235,7 @@ def redirect_output(outfmt, errfmt, o_out=sys.stdout, o_err=sys.stderr): os.dup2(new_fp.fileno(), o_err.fileno()) return + def run_per_instance(name, func, args, clear_on_fail=False): semfile = "%s/%s" % (cloudinit.get_ipath_cur("data"), name) if os.path.exists(semfile): @@ -244,6 +249,7 @@ def run_per_instance(name, func, args, clear_on_fail=False): os.unlink(semfile) raise + # apt_get top level command (install, update...), and args to pass it def apt_get(tlc, args=None): if args is None: @@ -255,9 +261,11 @@ def apt_get(tlc, args=None): cmd.extend(args) subprocess.check_call(cmd, env=e) + def update_package_sources(): run_per_instance("update-sources", apt_get, ("update",)) + def install_packages(pkglist): update_package_sources() apt_get("install", pkglist) diff --git a/cloudinit/CloudConfig/cc_apt_update_upgrade.py b/cloudinit/CloudConfig/cc_apt_update_upgrade.py index dea89d25..8aaaa334 100644 --- a/cloudinit/CloudConfig/cc_apt_update_upgrade.py +++ b/cloudinit/CloudConfig/cc_apt_update_upgrade.py @@ -22,6 +22,7 @@ import os import glob import cloudinit.CloudConfig as cc + def handle(_name, cfg, cloud, log, _args): update = util.get_cfg_option_bool(cfg, 'apt_update', False) upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False) @@ -39,7 +40,6 @@ def handle(_name, cfg, cloud, log, _args): "archive.ubuntu.com/ubuntu") rename_apt_lists(old_mir, mirror) - # set up proxy proxy = cfg.get("apt_proxy", None) proxy_filename = "/etc/apt/apt.conf.d/95cloud-init-proxy" @@ -54,9 +54,9 @@ def handle(_name, cfg, cloud, log, _args): os.unlink(proxy_filename) # process 'apt_sources' - if cfg.has_key('apt_sources'): + if 'apt_sources' in cfg: errors = add_sources(cfg['apt_sources'], - { 'MIRROR' : mirror, 'RELEASE' : release } ) + {'MIRROR': mirror, 'RELEASE': release}) for e in errors: log.warn("Source Error: %s\n" % ':'.join(e)) @@ -71,7 +71,7 @@ def handle(_name, cfg, cloud, log, _args): pkglist = util.get_cfg_option_list_or_str(cfg, 'packages', []) - errors = [ ] + errors = [] if update or len(pkglist) or upgrade: try: cc.update_package_sources() @@ -101,6 +101,7 @@ def handle(_name, cfg, cloud, log, _args): return(True) + def mirror2lists_fileprefix(mirror): string = mirror # take of http:// or ftp:// @@ -108,12 +109,12 @@ def mirror2lists_fileprefix(mirror): string = string[0:-1] pos = string.find("://") if pos >= 0: - string = string[pos+3:] + string = string[pos + 3:] string = string.replace("/", "_") return string + def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"): - oprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(omirror)) nprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(new_mirror)) if(oprefix == nprefix): @@ -122,26 +123,31 @@ def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"): for filename in glob.glob("%s_*" % oprefix): os.rename(filename, "%s%s" % (nprefix, filename[olen:])) + def get_release(): stdout, _stderr = subprocess.Popen(['lsb_release', '-cs'], stdout=subprocess.PIPE).communicate() - return(stdout.strip()) + return(str(stdout).strip()) + def generate_sources_list(codename, mirror): util.render_to_file('sources.list', '/etc/apt/sources.list', \ - { 'mirror' : mirror, 'codename' : codename }) + {'mirror': mirror, 'codename': codename}) + -# srclist is a list of dictionaries, -# each entry must have: 'source' -# may have: key, ( keyid and keyserver) def add_sources(srclist, searchList=None): + """ + add entries in /etc/apt/sources.list.d for each abbreviated + sources.list entry in 'srclist'. When rendering template, also + include the values in dictionary searchList + """ if searchList is None: searchList = {} elst = [] for ent in srclist: - if not ent.has_key('source'): - elst.append([ "", "missing source" ]) + if 'source' not in ent: + elst.append(["", "missing source"]) continue source = ent['source'] @@ -154,16 +160,16 @@ def add_sources(srclist, searchList=None): source = util.render_string(source, searchList) - if not ent.has_key('filename'): + if 'filename' not in ent: ent['filename'] = 'cloud_config_sources.list' if not ent['filename'].startswith("/"): ent['filename'] = "%s/%s" % \ ("/etc/apt/sources.list.d/", ent['filename']) - if ( ent.has_key('keyid') and not ent.has_key('key') ): + if ('keyid' in ent and 'key' not in ent): ks = "keyserver.ubuntu.com" - if ent.has_key('keyserver'): + if 'keyserver' in ent: ks = ent['keyserver'] try: ent['key'] = util.getkeybyid(ent['keyid'], ks) @@ -171,7 +177,7 @@ def add_sources(srclist, searchList=None): elst.append([source, "failed to get key from %s" % ks]) continue - if ent.has_key('key'): + if 'key' in ent: try: util.subp(('apt-key', 'add', '-'), ent['key']) except: @@ -199,7 +205,7 @@ def find_apt_mirror(cloud, cfg): cfg_mirror = cfg.get("apt_mirror", None) if cfg_mirror: mirror = cfg["apt_mirror"] - elif cfg.has_key("apt_mirror_search"): + elif "apt_mirror_search" in cfg: mirror = util.search_for_mirror(cfg['apt_mirror_search']) else: if cloud: @@ -211,7 +217,7 @@ def find_apt_mirror(cloud, cfg): if not mirror and cloud: # if we have a fqdn, then search its domain portion first - ( _hostname, fqdn ) = util.get_hostname_fqdn(cfg, cloud) + (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) mydom = ".".join(fqdn.split(".")[1:]) if mydom: doms.append(".%s" % mydom) @@ -220,7 +226,7 @@ def find_apt_mirror(cloud, cfg): doms.extend((".localdomain", "",)) mirror_list = [] - mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro ) + mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) for post in doms: mirror_list.append(mirrorfmt % post) diff --git a/cloudinit/CloudConfig/cc_bootcmd.py b/cloudinit/CloudConfig/cc_bootcmd.py index 98a7a747..66c452e9 100644 --- a/cloudinit/CloudConfig/cc_bootcmd.py +++ b/cloudinit/CloudConfig/cc_bootcmd.py @@ -18,11 +18,13 @@ import cloudinit.util as util import subprocess import tempfile +import os from cloudinit.CloudConfig import per_always frequency = per_always + def handle(_name, cfg, cloud, log, _args): - if not cfg.has_key("bootcmd"): + if "bootcmd" not in cfg: return try: @@ -33,7 +35,7 @@ def handle(_name, cfg, cloud, log, _args): except: log.warn("failed to shellify bootcmd") raise - + try: env = os.environ.copy() env['INSTANCE_ID'] = cloud.get_instance_id() diff --git a/cloudinit/CloudConfig/cc_byobu.py b/cloudinit/CloudConfig/cc_byobu.py index 04825521..7e455a7a 100644 --- a/cloudinit/CloudConfig/cc_byobu.py +++ b/cloudinit/CloudConfig/cc_byobu.py @@ -19,6 +19,7 @@ import cloudinit.util as util import subprocess import traceback + def handle(_name, cfg, _cloud, log, args): if len(args) != 0: value = args[0] @@ -31,8 +32,8 @@ def handle(_name, cfg, _cloud, log, args): if value == "user" or value == "system": value = "enable-%s" % value - valid = ( "enable-user", "enable-system", "enable", - "disable-user", "disable-system", "disable" ) + valid = ("enable-user", "enable-system", "enable", + "disable-user", "disable-system", "disable") if not value in valid: log.warn("Unknown value %s for byobu_by_default" % value) @@ -59,7 +60,7 @@ def handle(_name, cfg, _cloud, log, args): shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive" shcmd += " || X=$(($X+1)); " - cmd = [ "/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X" ) ] + cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")] log.debug("setting byobu to %s" % value) @@ -67,7 +68,7 @@ def handle(_name, cfg, _cloud, log, args): subprocess.check_call(cmd) except subprocess.CalledProcessError as e: log.debug(traceback.format_exc(e)) - raise Exception("Cmd returned %s: %s" % ( e.returncode, cmd)) + raise Exception("Cmd returned %s: %s" % (e.returncode, cmd)) except OSError as e: log.debug(traceback.format_exc(e)) - raise Exception("Cmd failed to execute: %s" % ( cmd )) + raise Exception("Cmd failed to execute: %s" % (cmd)) diff --git a/cloudinit/CloudConfig/cc_chef.py b/cloudinit/CloudConfig/cc_chef.py index 4f740aff..c9b464b5 100644 --- a/cloudinit/CloudConfig/cc_chef.py +++ b/cloudinit/CloudConfig/cc_chef.py @@ -23,9 +23,10 @@ import cloudinit.util as util ruby_version_default = "1.8" + def handle(_name, cfg, cloud, log, _args): # If there isn't a chef key in the configuration don't do anything - if not cfg.has_key('chef'): + if 'chef' not in cfg: return chef_cfg = cfg['chef'] @@ -36,8 +37,7 @@ def handle(_name, cfg, cloud, log, _args): # set the validation key based on the presence of either 'validation_key' # or 'validation_cert'. In the case where both exist, 'validation_key' # takes precedence - if (chef_cfg.has_key('validation_key') or - chef_cfg.has_key('validation_cert')): + if ('validation_key' in chef_cfg or 'validation_cert' in chef_cfg): validation_key = util.get_cfg_option_str(chef_cfg, 'validation_key', chef_cfg['validation_cert']) with open('/etc/chef/validation.pem', 'w') as validation_key_fh: @@ -55,9 +55,9 @@ def handle(_name, cfg, cloud, log, _args): # set the firstboot json with open('/etc/chef/firstboot.json', 'w') as firstboot_json_fh: initial_json = {} - if chef_cfg.has_key('run_list'): + if 'run_list' in chef_cfg: initial_json['run_list'] = chef_cfg['run_list'] - if chef_cfg.has_key('initial_attributes'): + if 'initial_attributes' in chef_cfg: initial_attributes = chef_cfg['initial_attributes'] for k in initial_attributes.keys(): initial_json[k] = initial_attributes[k] @@ -81,14 +81,16 @@ def handle(_name, cfg, cloud, log, _args): # this will install and run the chef-client from packages cc.install_packages(('chef',)) + def get_ruby_packages(version): # return a list of packages needed to install ruby at version - pkgs = [ 'ruby%s' % version, 'ruby%s-dev' % version ] + pkgs = ['ruby%s' % version, 'ruby%s-dev' % version] if version == "1.8": pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8')) return(pkgs) -def install_chef_from_gems(ruby_version, chef_version = None): + +def install_chef_from_gems(ruby_version, chef_version=None): cc.install_packages(get_ruby_packages(ruby_version)) if not os.path.exists('/usr/bin/gem'): os.symlink('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem') @@ -103,10 +105,12 @@ def install_chef_from_gems(ruby_version, chef_version = None): '--no-ri', '--no-rdoc', '--bindir', '/usr/bin', '-q']) + def ensure_dir(d): if not os.path.exists(d): os.makedirs(d) + def mkdirs(dirs): for d in dirs: ensure_dir(d) diff --git a/cloudinit/CloudConfig/cc_disable_ec2_metadata.py b/cloudinit/CloudConfig/cc_disable_ec2_metadata.py index 383e3b0c..7deec324 100644 --- a/cloudinit/CloudConfig/cc_disable_ec2_metadata.py +++ b/cloudinit/CloudConfig/cc_disable_ec2_metadata.py @@ -21,6 +21,7 @@ from cloudinit.CloudConfig import per_always frequency = per_always + def handle(_name, cfg, _cloud, _log, _args): if util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False): fwall = "route add -host 169.254.169.254 reject" diff --git a/cloudinit/CloudConfig/cc_final_message.py b/cloudinit/CloudConfig/cc_final_message.py index 7930bab5..63618fd2 100644 --- a/cloudinit/CloudConfig/cc_final_message.py +++ b/cloudinit/CloudConfig/cc_final_message.py @@ -24,6 +24,7 @@ frequency = per_always final_message = "cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds" + def handle(_name, cfg, _cloud, log, args): if len(args) != 0: msg_in = args[0] @@ -38,14 +39,13 @@ def handle(_name, cfg, _cloud, log, args): log.warn("unable to open /proc/uptime\n") uptime = "na" - try: ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime()) except: ts = "na" try: - subs = { 'UPTIME' : uptime, 'TIMESTAMP' : ts } + subs = {'UPTIME': uptime, 'TIMESTAMP': ts} sys.stdout.write("%s\n" % util.render_string(msg_in, subs)) except Exception as e: log.warn("failed to render string to stdout: %s" % e) diff --git a/cloudinit/CloudConfig/cc_foo.py b/cloudinit/CloudConfig/cc_foo.py index 82a44baf..98e2d648 100644 --- a/cloudinit/CloudConfig/cc_foo.py +++ b/cloudinit/CloudConfig/cc_foo.py @@ -22,5 +22,6 @@ from cloudinit.CloudConfig import per_instance frequency = per_instance + def handle(_name, _cfg, _cloud, _log, _args): print "hi" diff --git a/cloudinit/CloudConfig/cc_grub_dpkg.py b/cloudinit/CloudConfig/cc_grub_dpkg.py index 1437d481..69cc96b9 100644 --- a/cloudinit/CloudConfig/cc_grub_dpkg.py +++ b/cloudinit/CloudConfig/cc_grub_dpkg.py @@ -20,8 +20,8 @@ import cloudinit.util as util import traceback import os + def handle(_name, cfg, _cloud, log, _args): - idevs = None idevs_empty = None @@ -31,8 +31,8 @@ def handle(_name, cfg, _cloud, log, _args): idevs_empty = util.get_cfg_option_str(cfg["grub-dpkg"], "grub-pc/install_devices_empty", None) - if (( os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda") ) or - ( os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda") )): + if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or + (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))): if idevs == None: idevs = "" if idevs_empty == None: @@ -42,11 +42,11 @@ def handle(_name, cfg, _cloud, log, _args): idevs_empty = "false" if idevs == None: idevs = "/dev/sda" - for dev in ( "/dev/sda", "/dev/vda", "/dev/sda1", "/dev/vda1"): + for dev in ("/dev/sda", "/dev/vda", "/dev/sda1", "/dev/vda1"): if os.path.exists(dev): idevs = dev break - + # now idevs and idevs_empty are set to determined values # or, those set by user diff --git a/cloudinit/CloudConfig/cc_keys_to_console.py b/cloudinit/CloudConfig/cc_keys_to_console.py index d462a0a8..941c49de 100644 --- a/cloudinit/CloudConfig/cc_keys_to_console.py +++ b/cloudinit/CloudConfig/cc_keys_to_console.py @@ -21,8 +21,9 @@ import subprocess frequency = per_instance + def handle(_name, cfg, _cloud, log, _args): - cmd = [ '/usr/lib/cloud-init/write-ssh-key-fingerprints' ] + cmd = ['/usr/lib/cloud-init/write-ssh-key-fingerprints'] fp_blacklist = util.get_cfg_option_list_or_str(cfg, "ssh_fp_console_blacklist", []) key_blacklist = util.get_cfg_option_list_or_str(cfg, diff --git a/cloudinit/CloudConfig/cc_landscape.py b/cloudinit/CloudConfig/cc_landscape.py index d2d2bd19..f228d2cf 100644 --- a/cloudinit/CloudConfig/cc_landscape.py +++ b/cloudinit/CloudConfig/cc_landscape.py @@ -23,7 +23,7 @@ frequency = per_instance lsc_client_cfg_file = "/etc/landscape/client.conf" # defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2 -lsc_builtincfg = { +lsc_builtincfg = { 'client': { 'log_level': "info", 'url': "https://landscape.canonical.com/message-system", @@ -32,6 +32,7 @@ lsc_builtincfg = { } } + def handle(_name, cfg, _cloud, log, _args): """ Basically turn a top level 'landscape' entry with a 'client' dict @@ -39,7 +40,7 @@ def handle(_name, cfg, _cloud, log, _args): /etc/landscape/client.conf """ - ls_cloudcfg = cfg.get("landscape", { }) + ls_cloudcfg = cfg.get("landscape", {}) if not isinstance(ls_cloudcfg, dict): raise(Exception("'landscape' existed in config, but not a dict")) @@ -51,6 +52,7 @@ def handle(_name, cfg, _cloud, log, _args): log.debug("updated %s" % lsc_client_cfg_file) + def mergeTogether(objs): """ merge together ConfigObj objects or things that ConfigObj() will take in diff --git a/cloudinit/CloudConfig/cc_locale.py b/cloudinit/CloudConfig/cc_locale.py index 991f5861..9129ca30 100644 --- a/cloudinit/CloudConfig/cc_locale.py +++ b/cloudinit/CloudConfig/cc_locale.py @@ -20,13 +20,15 @@ import os.path import subprocess import traceback + def apply_locale(locale, cfgfile): if os.path.exists('/usr/sbin/locale-gen'): subprocess.Popen(['locale-gen', locale]).communicate() if os.path.exists('/usr/sbin/update-locale'): subprocess.Popen(['update-locale', locale]).communicate() - util.render_to_file('default-locale', cfgfile, { 'locale' : locale }) + util.render_to_file('default-locale', cfgfile, {'locale': locale}) + def handle(_name, cfg, cloud, log, args): if len(args) != 0: diff --git a/cloudinit/CloudConfig/cc_mcollective.py b/cloudinit/CloudConfig/cc_mcollective.py index 8ad8caab..2b8b2f96 100644 --- a/cloudinit/CloudConfig/cc_mcollective.py +++ b/cloudinit/CloudConfig/cc_mcollective.py @@ -27,11 +27,13 @@ import cloudinit.util as util pubcert_file = "/etc/mcollective/ssl/server-public.pem" pricert_file = "/etc/mcollective/ssl/server-private.pem" + # Our fake header section class FakeSecHead(object): def __init__(self, fp): self.fp = fp self.sechead = '[nullsection]\n' + def readline(self): if self.sechead: try: @@ -41,16 +43,17 @@ class FakeSecHead(object): else: return self.fp.readline() + def handle(_name, cfg, _cloud, _log, _args): # If there isn't a mcollective key in the configuration don't do anything - if not cfg.has_key('mcollective'): + if 'mcollective' not in cfg: return mcollective_cfg = cfg['mcollective'] # Start by installing the mcollective package ... cc.install_packages(("mcollective",)) # ... and then update the mcollective configuration - if mcollective_cfg.has_key('conf'): + if 'conf' in mcollective_cfg: # Create object for reading server.cfg values mcollective_config = ConfigParser.ConfigParser() # Read server.cfg values from original file in order to be able to mix @@ -92,4 +95,3 @@ def handle(_name, cfg, _cloud, _log, _args): # Start mcollective subprocess.check_call(['service', 'mcollective', 'start']) - diff --git a/cloudinit/CloudConfig/cc_mounts.py b/cloudinit/CloudConfig/cc_mounts.py index f7d8c702..dbd9c454 100644 --- a/cloudinit/CloudConfig/cc_mounts.py +++ b/cloudinit/CloudConfig/cc_mounts.py @@ -20,28 +20,30 @@ import os import re import string + def is_mdname(name): # return true if this is a metadata service name - if name in [ "ami", "root", "swap" ]: + if name in ["ami", "root", "swap"]: return True # names 'ephemeral0' or 'ephemeral1' # 'ebs[0-9]' appears when '--block-device-mapping sdf=snap-d4d90bbc' - for enumname in ( "ephemeral", "ebs" ): + for enumname in ("ephemeral", "ebs"): if name.startswith(enumname) and name.find(":") == -1: return True return False + def handle(_name, cfg, cloud, log, _args): # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno - defvals = [ None, None, "auto", "defaults,nobootwait", "0", "2" ] + defvals = [None, None, "auto", "defaults,nobootwait", "0", "2"] defvals = cfg.get("mount_default_fields", defvals) # these are our default set of mounts - defmnts = [ [ "ephemeral0", "/mnt", "auto", defvals[3], "0", "2" ], - [ "swap", "none", "swap", "sw", "0", "0" ] ] + defmnts = [["ephemeral0", "/mnt", "auto", defvals[3], "0", "2"], + ["swap", "none", "swap", "sw", "0", "0"]] - cfgmnt = [ ] - if cfg.has_key("mounts"): + cfgmnt = [] + if "mounts" in cfg: cfgmnt = cfg["mounts"] # shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1 @@ -94,7 +96,6 @@ def handle(_name, cfg, cloud, log, _args): if cfgmnt[j][0] == cfgmnt[i][0]: cfgmnt[j][1] = None - # for each of the "default" mounts, add them only if no other # entry has the same device name for defmnt in defmnts: @@ -111,12 +112,11 @@ def handle(_name, cfg, cloud, log, _args): if cfgm[0] == defmnt[0]: cfgmnt_has = True break - + if cfgmnt_has: continue cfgmnt.append(defmnt) - # now, each entry in the cfgmnt list has all fstab values # if the second field is None (not the string, the value) we skip it actlist = [x for x in cfgmnt if x[1] is not None] @@ -125,9 +125,9 @@ def handle(_name, cfg, cloud, log, _args): return comment = "comment=cloudconfig" - cc_lines = [ ] + cc_lines = [] needswap = False - dirs = [ ] + dirs = [] for line in actlist: # write 'comment' in the fs_mntops, entry, claiming this line[3] = "%s,comment=cloudconfig" % line[3] @@ -137,7 +137,7 @@ def handle(_name, cfg, cloud, log, _args): dirs.append(line[1]) cc_lines.append('\t'.join(line)) - fstab_lines = [ ] + fstab_lines = [] fstab = open("/etc/fstab", "r+") ws = re.compile("[%s]+" % string.whitespace) for line in fstab.read().splitlines(): @@ -150,7 +150,7 @@ def handle(_name, cfg, cloud, log, _args): fstab_lines.append(line) fstab_lines.extend(cc_lines) - + fstab.seek(0) fstab.write("%s\n" % '\n'.join(fstab_lines)) fstab.truncate() diff --git a/cloudinit/CloudConfig/cc_phone_home.py b/cloudinit/CloudConfig/cc_phone_home.py index 05caf8eb..73066444 100644 --- a/cloudinit/CloudConfig/cc_phone_home.py +++ b/cloudinit/CloudConfig/cc_phone_home.py @@ -23,6 +23,7 @@ frequency = per_instance post_list_all = ['pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa', 'instance_id', 'hostname'] + # phone_home: # url: http://my.foo.bar/$INSTANCE/ # post: all @@ -31,10 +32,10 @@ post_list_all = ['pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa', 'instance_id', # phone_home: # url: http://my.foo.bar/$INSTANCE_ID/ # post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id -# +# def handle(_name, cfg, cloud, log, args): if len(args) != 0: - ph_cfg = util.readconf(args[0]) + ph_cfg = util.read_conf(args[0]) else: if not 'phone_home' in cfg: return @@ -56,7 +57,7 @@ def handle(_name, cfg, cloud, log, args): if post_list == "all": post_list = post_list_all - all_keys = { } + all_keys = {} all_keys['instance_id'] = cloud.get_instance_id() all_keys['hostname'] = cloud.get_hostname() @@ -74,7 +75,7 @@ def handle(_name, cfg, cloud, log, args): except: log.warn("%s: failed to open in phone_home" % path) - submit_keys = { } + submit_keys = {} for k in post_list: if k in all_keys: submit_keys[k] = all_keys[k] @@ -82,20 +83,22 @@ def handle(_name, cfg, cloud, log, args): submit_keys[k] = "N/A" log.warn("requested key %s from 'post' list not available") - url = util.render_string(url, { 'INSTANCE_ID' : all_keys['instance_id'] }) + url = util.render_string(url, {'INSTANCE_ID': all_keys['instance_id']}) - last_e = None + null_exc = object() + last_e = null_exc for i in range(0, tries): try: util.readurl(url, submit_keys) - log.debug("succeeded submit to %s on try %i" % (url, i+1)) + log.debug("succeeded submit to %s on try %i" % (url, i + 1)) return except Exception as e: - log.debug("failed to post to %s on try %i" % (url, i+1)) + log.debug("failed to post to %s on try %i" % (url, i + 1)) last_e = e sleep(3) log.warn("failed to post to %s in %i tries" % (url, tries)) - if last_e: raise(last_e) - + if last_e is not null_exc: + raise(last_e) + return diff --git a/cloudinit/CloudConfig/cc_puppet.py b/cloudinit/CloudConfig/cc_puppet.py index 5fb0c1ee..6db1ed5c 100644 --- a/cloudinit/CloudConfig/cc_puppet.py +++ b/cloudinit/CloudConfig/cc_puppet.py @@ -25,16 +25,17 @@ import ConfigParser import cloudinit.CloudConfig as cc import cloudinit.util as util + def handle(_name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything - if not cfg.has_key('puppet'): + if 'puppet' not in cfg: return puppet_cfg = cfg['puppet'] # Start by installing the puppet package ... cc.install_packages(("puppet",)) # ... and then update the puppet configuration - if puppet_cfg.has_key('conf'): + if 'conf' in puppet_cfg: # Add all sections from the conf object to puppet.conf puppet_conf_fh = open('/etc/puppet/puppet.conf', 'r') # Create object for reading puppet.conf values @@ -102,4 +103,3 @@ def handle(_name, cfg, cloud, log, _args): log.warn("Do not know how to enable puppet service on this system") # Start puppetd subprocess.check_call(['service', 'puppet', 'start']) - diff --git a/cloudinit/CloudConfig/cc_resizefs.py b/cloudinit/CloudConfig/cc_resizefs.py index d960afd5..f29f886d 100644 --- a/cloudinit/CloudConfig/cc_resizefs.py +++ b/cloudinit/CloudConfig/cc_resizefs.py @@ -25,10 +25,11 @@ from cloudinit.CloudConfig import per_always frequency = per_always + def handle(_name, cfg, _cloud, log, args): if len(args) != 0: resize_root = False - if str(args[0]).lower() in [ 'true', '1', 'on', 'yes']: + if str(args[0]).lower() in ['true', '1', 'on', 'yes']: resize_root = True else: resize_root = util.get_cfg_option_bool(cfg, "resize_rootfs", True) @@ -40,7 +41,7 @@ def handle(_name, cfg, _cloud, log, args): (fd, devpth) = tempfile.mkstemp() os.unlink(devpth) os.close(fd) - + try: st_dev = os.stat("/").st_dev dev = os.makedev(os.major(st_dev), os.minor(st_dev)) @@ -52,7 +53,7 @@ def handle(_name, cfg, _cloud, log, args): log.warn("Failed to make device node to resize /") raise - cmd = [ 'blkid', '-c', '/dev/null', '-sTYPE', '-ovalue', devpth ] + cmd = ['blkid', '-c', '/dev/null', '-sTYPE', '-ovalue', devpth] try: (fstype, _err) = util.subp(cmd) except subprocess.CalledProcessError as e: @@ -62,13 +63,13 @@ def handle(_name, cfg, _cloud, log, args): os.unlink(devpth) raise - log.debug("resizing root filesystem (type=%s, maj=%i, min=%i)" % + log.debug("resizing root filesystem (type=%s, maj=%i, min=%i)" % (str(fstype).rstrip("\n"), os.major(st_dev), os.minor(st_dev))) if str(fstype).startswith("ext"): - resize_cmd = [ 'resize2fs', devpth ] + resize_cmd = ['resize2fs', devpth] elif fstype == "xfs": - resize_cmd = [ 'xfs_growfs', devpth ] + resize_cmd = ['xfs_growfs', devpth] else: os.unlink(devpth) log.debug("not resizing unknown filesystem %s" % fstype) diff --git a/cloudinit/CloudConfig/cc_rightscale_userdata.py b/cloudinit/CloudConfig/cc_rightscale_userdata.py index 61aa89d1..d6e93aa3 100644 --- a/cloudinit/CloudConfig/cc_rightscale_userdata.py +++ b/cloudinit/CloudConfig/cc_rightscale_userdata.py @@ -24,9 +24,9 @@ ## for cloud-init support, there will be a key named ## 'CLOUD_INIT_REMOTE_HOOK'. ## -## This cloud-config module will +## This cloud-config module will ## - read the blob of data from raw user data, and parse it as key/value -## - for each key that is found, download the content to +## - for each key that is found, download the content to ## the local instance/scripts directory and set them executable. ## - the files in that directory will be run by the user-scripts module ## Therefore, this must run before that. @@ -42,6 +42,7 @@ frequency = per_instance my_name = "cc_rightscale_userdata" my_hookname = 'CLOUD_INIT_REMOTE_HOOK' + def handle(_name, _cfg, cloud, log, _args): try: ud = cloud.get_userdata_raw() @@ -62,7 +63,7 @@ def handle(_name, _cfg, cloud, log, _args): first_e = None for url in mdict[my_hookname]: fname = "%s/rightscale-%02i" % (scripts_d, i) - i = i +1 + i = i + 1 try: content = util.readurl(url) util.write_file(fname, content, mode=0700) @@ -70,6 +71,6 @@ def handle(_name, _cfg, cloud, log, _args): if not first_e: first_e = None log.warn("%s failed to read %s: %s" % (my_name, url, e)) - + if first_e: raise(e) diff --git a/cloudinit/CloudConfig/cc_rsyslog.py b/cloudinit/CloudConfig/cc_rsyslog.py index e5f38c36..552597a5 100644 --- a/cloudinit/CloudConfig/cc_rsyslog.py +++ b/cloudinit/CloudConfig/cc_rsyslog.py @@ -24,6 +24,7 @@ import traceback DEF_FILENAME = "20-cloud-config.conf" DEF_DIR = "/etc/rsyslog.d" + def handle(_name, cfg, _cloud, log, _args): # rsyslog: # - "*.* @@192.158.1.1" @@ -39,8 +40,8 @@ def handle(_name, cfg, _cloud, log, _args): def_dir = cfg.get('rsyslog_dir', DEF_DIR) def_fname = cfg.get('rsyslog_filename', DEF_FILENAME) - files = [ ] - elst = [ ] + files = [] + elst = [] for ent in cfg['rsyslog']: if isinstance(ent, dict): if not "content" in ent: @@ -70,7 +71,7 @@ def handle(_name, cfg, _cloud, log, _args): # need to restart syslogd restarted = False try: - # if this config module is running at cloud-init time + # if this config module is running at cloud-init time # (before rsyslog is running) we don't actually have to # restart syslog. # @@ -84,7 +85,7 @@ def handle(_name, cfg, _cloud, log, _args): except Exception as e: elst.append(("restart", str(e))) - + if restarted: # this only needs to run if we *actually* restarted # syslog above. diff --git a/cloudinit/CloudConfig/cc_runcmd.py b/cloudinit/CloudConfig/cc_runcmd.py index 3c9baa6f..cb297568 100644 --- a/cloudinit/CloudConfig/cc_runcmd.py +++ b/cloudinit/CloudConfig/cc_runcmd.py @@ -18,8 +18,9 @@ import cloudinit.util as util + def handle(_name, cfg, cloud, log, _args): - if not cfg.has_key("runcmd"): + if "runcmd" not in cfg: return outfile = "%s/runcmd" % cloud.get_ipath('scripts') try: diff --git a/cloudinit/CloudConfig/cc_scripts_per_boot.py b/cloudinit/CloudConfig/cc_scripts_per_boot.py index ee79f0a3..2eb77c18 100644 --- a/cloudinit/CloudConfig/cc_scripts_per_boot.py +++ b/cloudinit/CloudConfig/cc_scripts_per_boot.py @@ -23,6 +23,7 @@ from cloudinit import get_cpath frequency = per_always runparts_path = "%s/%s" % (get_cpath(), "scripts/per-boot") + def handle(_name, _cfg, _cloud, log, _args): try: util.runparts(runparts_path) diff --git a/cloudinit/CloudConfig/cc_scripts_per_instance.py b/cloudinit/CloudConfig/cc_scripts_per_instance.py index 499829ec..0141c977 100644 --- a/cloudinit/CloudConfig/cc_scripts_per_instance.py +++ b/cloudinit/CloudConfig/cc_scripts_per_instance.py @@ -23,6 +23,7 @@ from cloudinit import get_cpath frequency = per_instance runparts_path = "%s/%s" % (get_cpath(), "scripts/per-instance") + def handle(_name, _cfg, _cloud, log, _args): try: util.runparts(runparts_path) diff --git a/cloudinit/CloudConfig/cc_scripts_per_once.py b/cloudinit/CloudConfig/cc_scripts_per_once.py index 6c43c6f0..bbf77dfb 100644 --- a/cloudinit/CloudConfig/cc_scripts_per_once.py +++ b/cloudinit/CloudConfig/cc_scripts_per_once.py @@ -23,6 +23,7 @@ from cloudinit import get_cpath frequency = per_once runparts_path = "%s/%s" % (get_cpath(), "scripts/per-once") + def handle(_name, _cfg, _cloud, log, _args): try: util.runparts(runparts_path) diff --git a/cloudinit/CloudConfig/cc_scripts_user.py b/cloudinit/CloudConfig/cc_scripts_user.py index 3db3c7a6..949b4198 100644 --- a/cloudinit/CloudConfig/cc_scripts_user.py +++ b/cloudinit/CloudConfig/cc_scripts_user.py @@ -23,6 +23,7 @@ from cloudinit import get_ipath_cur frequency = per_instance runparts_path = "%s/%s" % (get_ipath_cur(), "scripts") + def handle(_name, _cfg, _cloud, log, _args): try: util.runparts(runparts_path) diff --git a/cloudinit/CloudConfig/cc_set_hostname.py b/cloudinit/CloudConfig/cc_set_hostname.py index 18189ed0..0b1c8924 100644 --- a/cloudinit/CloudConfig/cc_set_hostname.py +++ b/cloudinit/CloudConfig/cc_set_hostname.py @@ -18,12 +18,13 @@ import cloudinit.util as util + def handle(_name, cfg, cloud, log, _args): if util.get_cfg_option_bool(cfg, "preserve_hostname", False): log.debug("preserve_hostname is set. not setting hostname") return(True) - ( hostname, _fqdn ) = util.get_hostname_fqdn(cfg, cloud) + (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud) try: set_hostname(hostname, log) except Exception: @@ -32,6 +33,7 @@ def handle(_name, cfg, cloud, log, _args): return(True) + def set_hostname(hostname, log): util.subp(['hostname', hostname]) util.write_file("/etc/hostname", "%s\n" % hostname, 0644) diff --git a/cloudinit/CloudConfig/cc_set_passwords.py b/cloudinit/CloudConfig/cc_set_passwords.py index 15533460..05384f4f 100644 --- a/cloudinit/CloudConfig/cc_set_passwords.py +++ b/cloudinit/CloudConfig/cc_set_passwords.py @@ -21,6 +21,7 @@ import sys import random import string + def handle(_name, cfg, _cloud, log, args): if len(args) != 0: # if run from command line, and give args, wipe the chpasswd['list'] @@ -56,7 +57,7 @@ def handle(_name, cfg, _cloud, log, args): randlist.append("%s:%s" % (u, p)) plist_in.append("%s:%s" % (u, p)) users.append(u) - + ch_in = '\n'.join(plist_in) try: util.subp(['chpasswd'], ch_in) @@ -67,7 +68,7 @@ def handle(_name, cfg, _cloud, log, args): if len(randlist): sys.stdout.write("%s\n%s\n" % ("Set the following passwords\n", - '\n'.join(randlist) )) + '\n'.join(randlist))) if expire: enum = len(errors) @@ -76,27 +77,27 @@ def handle(_name, cfg, _cloud, log, args): util.subp(['passwd', '--expire', u]) except Exception as e: errors.append(e) - log.warn("failed to expire account for %s" % u ) + log.warn("failed to expire account for %s" % u) if enum == len(errors): log.debug("expired passwords for: %s" % u) if 'ssh_pwauth' in cfg: val = str(cfg['ssh_pwauth']).lower() - if val in ( "true", "1", "yes"): + if val in ("true", "1", "yes"): pw_auth = "yes" change_pwauth = True - elif val in ( "false", "0", "no"): + elif val in ("false", "0", "no"): pw_auth = "no" change_pwauth = True else: change_pwauth = False - + if change_pwauth: pa_s = "\(#*\)\(PasswordAuthentication[[:space:]]\+\)\(yes\|no\)" msg = "set PasswordAuthentication to '%s'" % pw_auth try: - cmd = [ 'sed', '-i', 's,%s,\\2%s,' % (pa_s, pw_auth), - '/etc/ssh/sshd_config' ] + cmd = ['sed', '-i', 's,%s,\\2%s,' % (pa_s, pw_auth), + '/etc/ssh/sshd_config'] util.subp(cmd) log.debug(msg) except Exception as e: @@ -104,7 +105,8 @@ def handle(_name, cfg, _cloud, log, args): errors.append(e) try: - p = util.subp(['service', cfg.get('ssh_svcname', 'ssh'), 'restart']) + p = util.subp(['service', cfg.get('ssh_svcname', 'ssh'), + 'restart']) log.debug("restarted sshd") except: log.warn("restart of ssh failed") @@ -114,11 +116,12 @@ def handle(_name, cfg, _cloud, log, args): return -def rand_str(strlen=32, select_from=string.letters+string.digits): + +def rand_str(strlen=32, select_from=string.letters + string.digits): return("".join([random.choice(select_from) for _x in range(0, strlen)])) + def rand_user_password(pwlen=9): selfrom = (string.letters.translate(None, 'loLOI') + string.digits.translate(None, '01')) return(rand_str(pwlen, select_from=selfrom)) - diff --git a/cloudinit/CloudConfig/cc_ssh.py b/cloudinit/CloudConfig/cc_ssh.py index b6ac1edb..39862117 100644 --- a/cloudinit/CloudConfig/cc_ssh.py +++ b/cloudinit/CloudConfig/cc_ssh.py @@ -25,9 +25,9 @@ DISABLE_ROOT_OPTS = "no-port-forwarding,no-agent-forwarding," \ "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" " \ "rather than the user \\\"root\\\".\';echo;sleep 10\"" - global_log = None + def handle(_name, cfg, cloud, log, _args): global global_log global_log = log @@ -40,23 +40,23 @@ def handle(_name, cfg, cloud, log, _args): except: pass - if cfg.has_key("ssh_keys"): + if "ssh_keys" in cfg: # if there are keys in cloud-config, use them key2file = { - "rsa_private" : ("/etc/ssh/ssh_host_rsa_key", 0600), - "rsa_public" : ("/etc/ssh/ssh_host_rsa_key.pub", 0644), - "dsa_private" : ("/etc/ssh/ssh_host_dsa_key", 0600), - "dsa_public" : ("/etc/ssh/ssh_host_dsa_key.pub", 0644), - "ecdsa_private" : ("/etc/ssh/ssh_host_ecdsa_key", 0600), - "ecdsa_public" : ("/etc/ssh/ssh_host_ecdsa_key.pub", 0644), + "rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0600), + "rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0644), + "dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0600), + "dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0644), + "ecdsa_private": ("/etc/ssh/ssh_host_ecdsa_key", 0600), + "ecdsa_public": ("/etc/ssh/ssh_host_ecdsa_key.pub", 0644), } for key, val in cfg["ssh_keys"].items(): - if key2file.has_key(key): + if key in key2file: util.write_file(key2file[key][0], val, key2file[key][1]) - priv2pub = { 'rsa_private':'rsa_public', 'dsa_private':'dsa_public', - 'ecdsa_private': 'ecdsa_public', } + priv2pub = {'rsa_private': 'rsa_public', 'dsa_private': 'dsa_public', + 'ecdsa_private': 'ecdsa_public', } cmd = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"' for priv, pub in priv2pub.iteritems(): @@ -68,7 +68,7 @@ def handle(_name, cfg, cloud, log, _args): else: # if not, generate them for keytype in util.get_cfg_option_list_or_str(cfg, 'ssh_genkeytypes', - ['rsa', 'dsa', 'ecdsa']): + ['rsa', 'dsa', 'ecdsa']): keyfile = '/etc/ssh/ssh_host_%s_key' % keytype if not os.path.exists(keyfile): subprocess.call(['ssh-keygen', '-t', keytype, '-N', '', @@ -83,7 +83,7 @@ def handle(_name, cfg, cloud, log, _args): DISABLE_ROOT_OPTS) keys = cloud.get_public_ssh_keys() - if cfg.has_key("ssh_authorized_keys"): + if "ssh_authorized_keys" in cfg: cfgkeys = cfg["ssh_authorized_keys"] keys.extend(cfgkeys) @@ -92,16 +92,16 @@ def handle(_name, cfg, cloud, log, _args): util.logexc(log) log.warn("applying credentials failed!\n") + def apply_credentials(keys, user, disable_root, disable_root_opts=DISABLE_ROOT_OPTS, log=global_log): keys = set(keys) if user: sshutil.setup_user_keys(keys, user, '', log) - + if disable_root: key_prefix = disable_root_opts.replace('$USER', user) else: key_prefix = '' sshutil.setup_user_keys(keys, 'root', key_prefix, log) - diff --git a/cloudinit/CloudConfig/cc_ssh_import_id.py b/cloudinit/CloudConfig/cc_ssh_import_id.py index efcd4296..f14a99cd 100644 --- a/cloudinit/CloudConfig/cc_ssh_import_id.py +++ b/cloudinit/CloudConfig/cc_ssh_import_id.py @@ -19,10 +19,11 @@ import cloudinit.util as util import subprocess import traceback + def handle(_name, cfg, _cloud, log, args): if len(args) != 0: user = args[0] - ids = [ ] + ids = [] if len(args) > 1: ids = args[1:] else: @@ -32,7 +33,7 @@ def handle(_name, cfg, _cloud, log, args): if len(ids) == 0: return - cmd = [ "sudo", "-Hu", user, "ssh-import-id" ] + ids + cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids log.debug("importing ssh ids. cmd = %s" % cmd) @@ -40,7 +41,7 @@ def handle(_name, cfg, _cloud, log, args): subprocess.check_call(cmd) except subprocess.CalledProcessError as e: log.debug(traceback.format_exc(e)) - raise Exception("Cmd returned %s: %s" % ( e.returncode, cmd)) + raise Exception("Cmd returned %s: %s" % (e.returncode, cmd)) except OSError as e: log.debug(traceback.format_exc(e)) - raise Exception("Cmd failed to execute: %s" % ( cmd )) + raise Exception("Cmd failed to execute: %s" % (cmd)) diff --git a/cloudinit/CloudConfig/cc_timezone.py b/cloudinit/CloudConfig/cc_timezone.py index 87855503..6f0e8f6b 100644 --- a/cloudinit/CloudConfig/cc_timezone.py +++ b/cloudinit/CloudConfig/cc_timezone.py @@ -24,6 +24,7 @@ import shutil frequency = per_instance tz_base = "/usr/share/zoneinfo" + def handle(_name, cfg, _cloud, log, args): if len(args) != 0: timezone = args[0] @@ -33,7 +34,7 @@ def handle(_name, cfg, _cloud, log, args): if not timezone: return - tz_file = "%s/%s" % (tz_base , timezone) + tz_file = "%s/%s" % (tz_base, timezone) if not os.path.isfile(tz_file): log.debug("Invalid timezone %s" % tz_file) @@ -59,6 +60,6 @@ def handle(_name, cfg, _cloud, log, args): except: log.debug("failed to copy %s to /etc/localtime" % tz_file) raise - + log.debug("set timezone to %s" % timezone) return diff --git a/cloudinit/CloudConfig/cc_update_etc_hosts.py b/cloudinit/CloudConfig/cc_update_etc_hosts.py index 66f0537c..131e1a1e 100644 --- a/cloudinit/CloudConfig/cc_update_etc_hosts.py +++ b/cloudinit/CloudConfig/cc_update_etc_hosts.py @@ -21,8 +21,9 @@ import StringIO frequency = per_always + def handle(_name, cfg, cloud, log, _args): - ( hostname, fqdn ) = util.get_hostname_fqdn(cfg, cloud) + (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) manage_hosts = util.get_cfg_option_bool(cfg, "manage_etc_hosts", False) if manage_hosts in ("True", "true", True, "template"): @@ -32,8 +33,8 @@ def handle(_name, cfg, cloud, log, _args): log.info("manage_etc_hosts was set, but no hostname found") return - util.render_to_file('hosts', '/etc/hosts', \ - { 'hostname' : hostname, 'fqdn' : fqdn }) + util.render_to_file('hosts', '/etc/hosts', + {'hostname': hostname, 'fqdn': fqdn}) except Exception: log.warn("failed to update /etc/hosts") raise @@ -81,4 +82,3 @@ def update_etc_hosts(hostname, fqdn, _log): new_etcfile.close() new_etchosts.close() return - diff --git a/cloudinit/CloudConfig/cc_update_hostname.py b/cloudinit/CloudConfig/cc_update_hostname.py index 893c99e0..2387a8dc 100644 --- a/cloudinit/CloudConfig/cc_update_hostname.py +++ b/cloudinit/CloudConfig/cc_update_hostname.py @@ -22,19 +22,21 @@ from cloudinit.CloudConfig import per_always frequency = per_always + def handle(_name, cfg, cloud, log, _args): if util.get_cfg_option_bool(cfg, "preserve_hostname", False): log.debug("preserve_hostname is set. not updating hostname") return - ( hostname, _fqdn ) = util.get_hostname_fqdn(cfg, cloud) + (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud) try: - prev ="%s/%s" % (cloud.get_cpath('data'), "previous-hostname") + prev = "%s/%s" % (cloud.get_cpath('data'), "previous-hostname") update_hostname(hostname, prev, log) except Exception: log.warn("failed to set hostname\n") raise + # read hostname from a 'hostname' file # allow for comments and stripping line endings. # if file doesn't exist, or no contents, return default @@ -54,7 +56,8 @@ def read_hostname(filename, default=None): if e.errno != errno.ENOENT: raise return default - + + def update_hostname(hostname, prev_file, log): etc_file = "/etc/hostname" @@ -75,7 +78,7 @@ def update_hostname(hostname, prev_file, log): if not hostname_prev or hostname_prev != hostname: update_files.append(prev_file) - if (not hostname_in_etc or + if (not hostname_in_etc or (hostname_in_etc == hostname_prev and hostname_in_etc != hostname)): update_files.append(etc_file) @@ -93,4 +96,3 @@ def update_hostname(hostname, prev_file, log): if etc_file in update_files: log.debug("setting hostname to %s" % hostname) subprocess.Popen(['hostname', hostname]).communicate() - diff --git a/cloudinit/DataSource.py b/cloudinit/DataSource.py index 7e539b67..0985c6b2 100644 --- a/cloudinit/DataSource.py +++ b/cloudinit/DataSource.py @@ -20,20 +20,21 @@ DEP_FILESYSTEM = "FILESYSTEM" DEP_NETWORK = "NETWORK" -import UserDataHandler as ud +import cloudinit.UserDataHandler as ud import cloudinit.util as util import socket + class DataSource: userdata = None metadata = None userdata_raw = None cfgname = "" - # system config (passed in from cloudinit, + # system config (passed in from cloudinit, # cloud-config before input from the DataSource) - sys_cfg = { } + sys_cfg = {} # datasource config, the cloud-config['datasource']['__name__'] - ds_cfg = { } # datasource config + ds_cfg = {} # datasource config def __init__(self, sys_cfg=None): if not self.cfgname: @@ -55,27 +56,26 @@ class DataSource: def get_userdata_raw(self): return(self.userdata_raw) - # the data sources' config_obj is a cloud-config formated # object that came to it from ways other than cloud-config # because cloud-config content would be handled elsewhere def get_config_obj(self): - return({ }) + return({}) def get_public_ssh_keys(self): keys = [] - if not self.metadata.has_key('public-keys'): + if 'public-keys' not in self.metadata: return([]) if isinstance(self.metadata['public-keys'], str): - return([self.metadata['public-keys'],]) - + return([self.metadata['public-keys'], ]) + for _keyname, klist in self.metadata['public-keys'].items(): # lp:506332 uec metadata service responds with # data that makes boto populate a string for 'klist' rather # than a list. if isinstance(klist, str): - klist = [ klist ] + klist = [klist] for pkey in klist: # there is an empty string at the end of the keylist, trim it if pkey: @@ -104,7 +104,7 @@ class DataSource: def get_hostname(self, fqdn=False): defdomain = "localdomain" - defhost = "localhost" + defhost = "localhost" domain = defdomain if not 'local-hostname' in self.metadata: @@ -120,12 +120,11 @@ class DataSource: fqdn = util.get_fqdn_from_hosts(hostname) if fqdn and fqdn.find(".") > 0: - toks = fqdn.split(".") + toks = str(fqdn).split(".") elif hostname: - toks = [ hostname, defdomain ] + toks = [hostname, defdomain] else: - toks = [ defhost, defdomain ] - + toks = [defhost, defdomain] else: # if there is an ipv4 address in 'local-hostname', then @@ -147,6 +146,7 @@ class DataSource: else: return hostname + # return a list of classes that have the same depends as 'depends' # iterate through cfg_list, loading "DataSourceCollections" modules # and calling their "get_datasource_list". @@ -177,15 +177,16 @@ def list_sources(cfg_list, depends, pkglist=None): raise return(retlist) + # depends is a list of dependencies (DEP_FILESYSTEM) # dslist is a list of 2 item lists -# dslist = [ +# dslist = [ # ( class, ( depends-that-this-class-needs ) ) # } # it returns a list of 'class' that matched these deps exactly # it is a helper function for DataSourceCollections def list_from_depends(depends, dslist): - retlist = [ ] + retlist = [] depset = set(depends) for elem in dslist: (cls, deps) = elem diff --git a/cloudinit/DataSourceEc2.py b/cloudinit/DataSourceEc2.py index 14484bdd..37dfcb5d 100644 --- a/cloudinit/DataSourceEc2.py +++ b/cloudinit/DataSourceEc2.py @@ -16,9 +16,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -import DataSource +import cloudinit.DataSource as DataSource -from cloudinit import seeddir, log # pylint: disable=W0611 +from cloudinit import seeddir as base_seeddir +from cloudinit import log import cloudinit.util as util import socket import urllib2 @@ -26,22 +27,23 @@ import time import boto.utils as boto_utils import os.path + class DataSourceEc2(DataSource.DataSource): - api_ver = '2009-04-04' - seeddir = seeddir + '/ec2' + api_ver = '2009-04-04' + seeddir = base_seeddir + '/ec2' metadata_address = "http://169.254.169.254" def __str__(self): return("DataSourceEc2") def get_data(self): - seedret = { } - if util.read_optional_seed(seedret, base=self.seeddir+"/"): + seedret = {} + if util.read_optional_seed(seedret, base=self.seeddir + "/"): self.userdata_raw = seedret['user-data'] self.metadata = seedret['meta-data'] log.debug("using seeded ec2 data in %s" % self.seeddir) return True - + try: if not self.wait_for_metadata_service(): return False @@ -66,7 +68,7 @@ class DataSourceEc2(DataSource.DataSource): def get_local_mirror(self): return(self.get_mirror_from_availability_zone()) - def get_mirror_from_availability_zone(self, availability_zone = None): + def get_mirror_from_availability_zone(self, availability_zone=None): # availability is like 'us-west-1b' or 'eu-west-1a' if availability_zone == None: availability_zone = self.get_availability_zone() @@ -87,7 +89,7 @@ class DataSourceEc2(DataSource.DataSource): mcfg = self.ds_cfg if not hasattr(mcfg, "get"): - mcfg = {} + mcfg = {} max_wait = 120 try: @@ -122,8 +124,8 @@ class DataSourceEc2(DataSource.DataSource): log.warn("Empty metadata url list! using default list") mdurls = def_mdurls - urls = [ ] - url2base = { False: False } + urls = [] + url2base = {False: False} for url in mdurls: cur = "%s/%s/meta-data/instance-id" % (url, self.api_ver) urls.append(cur) @@ -137,7 +139,7 @@ class DataSourceEc2(DataSource.DataSource): log.debug("Using metadata source: '%s'" % url2base[url]) else: log.critical("giving up on md after %i seconds\n" % - int(time.time()-starttime)) + int(time.time() - starttime)) self.metadata_address = url2base[url] return (bool(url)) @@ -146,7 +148,7 @@ class DataSourceEc2(DataSource.DataSource): # consult metadata service, that has # ephemeral0: sdb # and return 'sdb' for input 'ephemeral0' - if not self.metadata.has_key('block-device-mapping'): + if 'block-device-mapping' not in self.metadata: return(None) found = None @@ -166,10 +168,10 @@ class DataSourceEc2(DataSource.DataSource): # when the kernel named them 'vda' or 'xvda' # we want to return the correct value for what will actually # exist in this instance - mappings = { "sd": ("vd", "xvd") } + mappings = {"sd": ("vd", "xvd")} ofound = found short = os.path.basename(found) - + if not found.startswith("/"): found = "/dev/%s" % found @@ -213,7 +215,7 @@ def wait_for_metadata_service(urls, max_wait=None, timeout=None, be tried once and given the timeout provided. timeout: the timeout provided to urllib2.urlopen status_cb: call method with string message when a url is not available - + the idea of this routine is to wait for the EC2 metdata service to come up. On both Eucalyptus and EC2 we have seen the case where the instance hit the MD before the MD service was up. EC2 seems @@ -233,16 +235,19 @@ def wait_for_metadata_service(urls, max_wait=None, timeout=None, sleeptime = 1 + def nullstatus_cb(msg): + return + if status_cb == None: - def status_cb(msg): return + status_cb = nullstatus_cb def timeup(max_wait, starttime): return((max_wait <= 0 or max_wait == None) or - (time.time()-starttime > max_wait)) + (time.time() - starttime > max_wait)) loop_n = 0 while True: - sleeptime = int(loop_n/5)+1 + sleeptime = int(loop_n / 5) + 1 for url in urls: now = time.time() if loop_n != 0: @@ -270,7 +275,8 @@ def wait_for_metadata_service(urls, max_wait=None, timeout=None, if log: status_cb("'%s' failed [%s/%ss]: %s" % - (url, int(time.time()-starttime), max_wait, reason)) + (url, int(time.time() - starttime), max_wait, + reason)) if timeup(max_wait, starttime): break @@ -281,10 +287,11 @@ def wait_for_metadata_service(urls, max_wait=None, timeout=None, return False -datasources = [ - ( DataSourceEc2, ( DataSource.DEP_FILESYSTEM , DataSource.DEP_NETWORK ) ), +datasources = [ + (DataSourceEc2, (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)), ] + # return a list of data sources that match this set of dependencies def get_datasource_list(depends): return(DataSource.list_from_depends(depends, datasources)) diff --git a/cloudinit/DataSourceNoCloud.py b/cloudinit/DataSourceNoCloud.py index d63cdc95..0d5f15b3 100644 --- a/cloudinit/DataSourceNoCloud.py +++ b/cloudinit/DataSourceNoCloud.py @@ -16,19 +16,21 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -import DataSource +import cloudinit.DataSource as DataSource -from cloudinit import seeddir, log # pylint: disable=W0611 +from cloudinit import seeddir as base_seeddir +from cloudinit import log import cloudinit.util as util + class DataSourceNoCloud(DataSource.DataSource): metadata = None userdata = None userdata_raw = None - supported_seed_starts = ( "/" , "file://" ) + supported_seed_starts = ("/", "file://") seed = None cmdline_id = "ds=nocloud" - seeddir = seeddir + '/nocloud' + seeddir = base_seeddir + '/nocloud' def __str__(self): mstr = "DataSourceNoCloud" @@ -36,12 +38,12 @@ class DataSourceNoCloud(DataSource.DataSource): return(mstr) def get_data(self): - defaults = { - "instance-id" : "nocloud" + defaults = { + "instance-id": "nocloud" } - found = [ ] - md = { } + found = [] + md = {} ud = "" try: @@ -53,7 +55,7 @@ class DataSourceNoCloud(DataSource.DataSource): return False # check to see if the seeddir has data. - seedret = { } + seedret = {} if util.read_optional_seed(seedret, base=self.seeddir + "/"): md = util.mergedict(md, seedret['meta-data']) ud = seedret['user-data'] @@ -94,6 +96,7 @@ class DataSourceNoCloud(DataSource.DataSource): self.userdata_raw = ud return True + # returns true or false indicating if cmdline indicated # that this module should be used # example cmdline: @@ -103,7 +106,7 @@ def parse_cmdline_data(ds_id, fill, cmdline=None): cmdline = util.get_cmdline() cmdline = " %s " % cmdline - if not ( " %s " % ds_id in cmdline or " %s;" % ds_id in cmdline ): + if not (" %s " % ds_id in cmdline or " %s;" % ds_id in cmdline): return False argline = "" @@ -112,7 +115,7 @@ def parse_cmdline_data(ds_id, fill, cmdline=None): for tok in cmdline.split(): if tok.startswith(ds_id): argline = tok.split("=", 1) - + # argline array is now 'nocloud' followed optionally by # a ';' and then key=value pairs also terminated with ';' tmp = argline[1].split(";") @@ -122,7 +125,7 @@ def parse_cmdline_data(ds_id, fill, cmdline=None): kvpairs = () # short2long mapping to save cmdline typing - s2l = { "h" : "local-hostname", "i" : "instance-id", "s" : "seedfrom" } + s2l = {"h": "local-hostname", "i": "instance-id", "s": "seedfrom"} for item in kvpairs: try: (k, v) = item.split("=", 1) @@ -135,17 +138,20 @@ def parse_cmdline_data(ds_id, fill, cmdline=None): return(True) + class DataSourceNoCloudNet(DataSourceNoCloud): cmdline_id = "ds=nocloud-net" - supported_seed_starts = ( "http://", "https://", "ftp://" ) - seeddir = seeddir + '/nocloud-net' + supported_seed_starts = ("http://", "https://", "ftp://") + seeddir = base_seeddir + '/nocloud-net' + datasources = ( - ( DataSourceNoCloud, ( DataSource.DEP_FILESYSTEM, ) ), - ( DataSourceNoCloudNet, - ( DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK ) ), + (DataSourceNoCloud, (DataSource.DEP_FILESYSTEM, )), + (DataSourceNoCloudNet, + (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)), ) + # return a list of data sources that match this set of dependencies def get_datasource_list(depends): return(DataSource.list_from_depends(depends, datasources)) diff --git a/cloudinit/DataSourceOVF.py b/cloudinit/DataSourceOVF.py index 4e960ffa..372211f3 100644 --- a/cloudinit/DataSourceOVF.py +++ b/cloudinit/DataSourceOVF.py @@ -16,9 +16,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -import DataSource +import cloudinit.DataSource as DataSource -from cloudinit import seeddir, log +from cloudinit import seeddir as base_seeddir +from cloudinit import log import cloudinit.util as util import sys import os.path @@ -29,14 +30,15 @@ import re import tempfile import subprocess + class DataSourceOVF(DataSource.DataSource): seed = None - seeddir = seeddir + '/ovf' + seeddir = base_seeddir + '/ovf' environment = None - cfg = { } + cfg = {} userdata_raw = None metadata = None - supported_seed_starts = ( "/" , "file://" ) + supported_seed_starts = ("/", "file://") def __str__(self): mstr = "DataSourceOVF" @@ -44,25 +46,25 @@ class DataSourceOVF(DataSource.DataSource): return(mstr) def get_data(self): - found = [ ] - md = { } + found = [] + md = {} ud = "" - defaults = { - "instance-id" : "iid-dsovf" + defaults = { + "instance-id": "iid-dsovf" } - (seedfile, contents) = get_ovf_env(seeddir) + (seedfile, contents) = get_ovf_env(base_seeddir) if seedfile: # found a seed dir - seed = "%s/%s" % (seeddir, seedfile) + seed = "%s/%s" % (base_seeddir, seedfile) (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(seed) else: - np = { 'iso' : transport_iso9660, - 'vmware-guestd' : transport_vmware_guestd, } + np = {'iso': transport_iso9660, + 'vmware-guestd': transport_vmware_guestd, } for name, transfunc in np.iteritems(): (contents, _dev, _fname) = transfunc() if contents: @@ -94,7 +96,6 @@ class DataSourceOVF(DataSource.DataSource): md = util.mergedict(md, md_seed) found.append(seedfrom) - md = util.mergedict(md, defaults) self.seed = ",".join(found) @@ -106,27 +107,29 @@ class DataSourceOVF(DataSource.DataSource): def get_public_ssh_keys(self): if not 'public-keys' in self.metadata: return([]) - return([self.metadata['public-keys'],]) - + return([self.metadata['public-keys'], ]) + # the data sources' config_obj is a cloud-config formated # object that came to it from ways other than cloud-config # because cloud-config content would be handled elsewhere def get_config_obj(self): return(self.cfg) + class DataSourceOVFNet(DataSourceOVF): - seeddir = seeddir + '/ovf-net' - supported_seed_starts = ( "http://", "https://", "ftp://" ) + seeddir = base_seeddir + '/ovf-net' + supported_seed_starts = ("http://", "https://", "ftp://") + # this will return a dict with some content # meta-data, user-data def read_ovf_environment(contents): props = getProperties(contents) - md = { } - cfg = { } + md = {} + cfg = {} ud = "" - cfg_props = [ 'password', ] - md_props = [ 'seedfrom', 'local-hostname', 'public-keys', 'instance-id' ] + cfg_props = ['password', ] + md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id'] for prop, val in props.iteritems(): if prop == 'hostname': prop = "local-hostname" @@ -140,12 +143,12 @@ def read_ovf_environment(contents): except: ud = val return(md, ud, cfg) - + # returns tuple of filename (in 'dirname', and the contents of the file) # on "not found", returns 'None' for filename and False for contents def get_ovf_env(dirname): - env_names = ("ovf-env.xml", "ovf_env.xml", "OVF_ENV.XML", "OVF-ENV.XML" ) + env_names = ("ovf-env.xml", "ovf_env.xml", "OVF_ENV.XML", "OVF-ENV.XML") for fname in env_names: if os.path.isfile("%s/%s" % (dirname, fname)): fp = open("%s/%s" % (dirname, fname)) @@ -154,11 +157,12 @@ def get_ovf_env(dirname): return(fname, contents) return(None, False) + # transport functions take no input and return # a 3 tuple of content, path, filename def transport_iso9660(require_iso=False): - # default_regex matches values in + # default_regex matches values in # /lib/udev/rules.d/60-cdrom_id.rules # KERNEL!="sr[0-9]*|hd[a-z]|xvd*", GOTO="cdrom_end" envname = "CLOUD_INIT_CDROM_DEV_REGEX" @@ -172,7 +176,7 @@ def transport_iso9660(require_iso=False): mounts = fp.readlines() fp.close() - mounted = { } + mounted = {} for mpline in mounts: (dev, mp, fstype, _opts, _freq, _passno) = mpline.split() mounted[dev] = (dev, fstype, mp, False) @@ -180,9 +184,9 @@ def transport_iso9660(require_iso=False): if fstype != "iso9660" and require_iso: continue - if cdmatch.match(dev[5:]) == None: # take off '/dev/' + if cdmatch.match(dev[5:]) == None: # take off '/dev/' continue - + (fname, contents) = get_ovf_env(mp) if contents is not False: return(contents, dev, fname) @@ -217,7 +221,7 @@ def transport_iso9660(require_iso=False): except: pass - cmd = [ "mount", "-o", "ro", fullp, tmpd ] + cmd = ["mount", "-o", "ro", fullp, tmpd] if require_iso: cmd.extend(('-t', 'iso9660')) @@ -241,6 +245,7 @@ def transport_iso9660(require_iso=False): return(False, None, None) + def transport_vmware_guestd(): # pylint: disable=C0301 # http://blogs.vmware.com/vapp/2009/07/selfconfiguration-and-the-ovf-environment.html @@ -265,6 +270,7 @@ def findChild(node, filter_func): ret.append(child) return(ret) + def getProperties(environString): dom = minidom.parseString(environString) if dom.documentElement.localName != "Environment": @@ -275,7 +281,7 @@ def getProperties(environString): envNsURI = "http://schemas.dmtf.org/ovf/environment/1" - # could also check here that elem.namespaceURI == + # could also check here that elem.namespaceURI == # "http://schemas.dmtf.org/ovf/environment/1" propSections = findChild(dom.documentElement, lambda n: n.localName == "PropertySection") @@ -283,7 +289,7 @@ def getProperties(environString): if len(propSections) == 0: raise Exception("No 'PropertySection's") - props = { } + props = {} propElems = findChild(propSections[0], lambda n: n.localName == "Property") for elem in propElems: @@ -293,23 +299,26 @@ def getProperties(environString): return(props) + datasources = ( - ( DataSourceOVF, ( DataSource.DEP_FILESYSTEM, ) ), - ( DataSourceOVFNet, - ( DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK ) ), + (DataSourceOVF, (DataSource.DEP_FILESYSTEM, )), + (DataSourceOVFNet, + (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)), ) + # return a list of data sources that match this set of dependencies def get_datasource_list(depends): return(DataSource.list_from_depends(depends, datasources)) + if __name__ == "__main__": import sys envStr = open(sys.argv[1]).read() props = getProperties(envStr) import pprint pprint.pprint(props) - + md, ud, cfg = read_ovf_environment(envStr) print "=== md ===" pprint.pprint(md) diff --git a/cloudinit/SshUtil.py b/cloudinit/SshUtil.py index 7ca60388..c6c30c5c 100644 --- a/cloudinit/SshUtil.py +++ b/cloudinit/SshUtil.py @@ -4,6 +4,7 @@ import os import os.path import cloudinit.util as util + class AuthKeyEntry(): # lines are options, keytype, base64-encoded key, comment # man page says the following which I did not understand: @@ -37,7 +38,7 @@ class AuthKeyEntry(): quoted = False # taken from auth_rsa_key_allowed in auth-rsa.c try: - while (i < len(ent) and + while (i < len(ent) and ((quoted) or (ent[i] not in (" ", "\t")))): curc = ent[i] nextc = ent[i + 1] @@ -48,12 +49,12 @@ class AuthKeyEntry(): i = i + 1 except IndexError: self.is_comment = True - return() + return try: self.options = ent[0:i] (self.keytype, self.base64, self.comment) = \ - ent[i+1:].split(None, 3) + ent[i + 1:].split(None, 3) except ValueError: # we did not understand this line self.is_comment = True @@ -67,28 +68,30 @@ class AuthKeyEntry(): print("line_in=%s\ncomment: %s\noptions=%s\nkeytype=%s\nbase64=%s\n" "comment=%s\n" % (self.line_in, self.is_comment, self.options, self.keytype, self.base64, self.comment)), + def __repr__(self): if self.is_comment: return(self.line_in) else: - toks = [ ] + toks = [] for e in (self.options, self.keytype, self.base64, self.comment): if e: toks.append(e) - + return(' '.join(toks)) - + + def update_authorized_keys(fname, keys): # keys is a list of AuthKeyEntries # key_prefix is the prefix (options) to prepend try: fp = open(fname, "r") - lines = fp.readlines() # lines have carriage return + lines = fp.readlines() # lines have carriage return fp.close() except IOError: - lines = [ ] + lines = [] - ka_stats = { } # keys_added status + ka_stats = {} # keys_added status for k in keys: ka_stats[k] = False @@ -116,7 +119,7 @@ def update_authorized_keys(fname, keys): else: return('\n'.join(lines) + "\n") - + def setup_user_keys(keys, user, key_prefix, log=None): import pwd saved_umask = os.umask(077) @@ -152,25 +155,25 @@ def setup_user_keys(keys, user, key_prefix, log=None): os.umask(saved_umask) + if __name__ == "__main__": import sys - # pylint: disable=C0301 # usage: orig_file, new_keys, [key_prefix] # prints out merged, where 'new_keys' will trump old ## example - ## ### begin authorized_keys ### - # ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA28CDAGtxSucHezSKqwh1wAs39xdeZTSVmmyMcKDI5Njnd1d/Uhgj/awxP0Whep8eRSm6F+Xgwi0pH1KNPCszPvq+03K+yi3YkYkQIkVBhctK6AP/UmlVQTVmjJdEvgtrppFTjCzf16q0BT0mXX5YFV3csgm8cJn7UveKHkYjJp8= smoser-work - # ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies + ## ### begin auth_keys ### + # ssh-rsa AAAAB3NzaC1xxxxxxxxxV3csgm8cJn7UveKHkYjJp8= smoser-work + # ssh-rsa AAAAB3NzaC1xxxxxxxxxCmXp5Kt5/82cD/VN3NtHw== smoser@brickies # ### end authorized_keys ### - # + # # ### begin new_keys ### # ssh-rsa nonmatch smoser@newhost - # ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEA28CDAGtxSucHezSKqwh1wAs39xdeZTSVmmyMcKDI5Njnd1d/Uhgj/awxP0Whep8eRSm6F+Xgwi0pH1KNPCszPvq+03K+yi3YkYkQIkVBhctK6AP/UmlVQTVmjJdEvgtrppFTjCzf16q0BT0mXX5YFV3csgm8cJn7UveKHkYjJp8= new_comment + # ssh-rsa AAAAB3NzaC1xxxxxxxxxV3csgm8cJn7UveKHkYjJp8= new_comment # ### end new_keys ### # # Then run as: - # program authorized_keys new_keys 'no-port-forwarding,command=\"echo hi world;\"' - # pylint: enable=C0301 + # program auth_keys new_keys \ + # 'no-port-forwarding,command=\"echo hi world;\"' def_prefix = None orig_key_file = sys.argv[1] new_key_file = sys.argv[2] @@ -178,15 +181,16 @@ if __name__ == "__main__": def_prefix = sys.argv[3] fp = open(new_key_file) - newkeys = [ ] + newkeys = [] for line in fp.readlines(): newkeys.append(AuthKeyEntry(line, def_prefix)) fp.close() print update_authorized_keys(orig_key_file, newkeys) + def parse_ssh_config(fname="/etc/ssh/sshd_config"): - ret = { } + ret = {} fp = open(fname) for l in fp.readlines(): l = l.strip() @@ -196,4 +200,3 @@ def parse_ssh_config(fname="/etc/ssh/sshd_config"): ret[key] = val fp.close() return(ret) - diff --git a/cloudinit/UserDataHandler.py b/cloudinit/UserDataHandler.py index 19c331be..48bd9780 100644 --- a/cloudinit/UserDataHandler.py +++ b/cloudinit/UserDataHandler.py @@ -26,17 +26,19 @@ import cloudinit.util as util import hashlib import urllib + starts_with_mappings = { - '#include' : 'text/x-include-url', - '#include-once' : 'text/x-include-once-url', - '#!' : 'text/x-shellscript', - '#cloud-config' : 'text/cloud-config', - '#upstart-job' : 'text/upstart-job', - '#part-handler' : 'text/part-handler', - '#cloud-boothook' : 'text/cloud-boothook', - '#cloud-config-archive' : 'text/cloud-config-archive', + '#include': 'text/x-include-url', + '#include-once': 'text/x-include-once-url', + '#!': 'text/x-shellscript', + '#cloud-config': 'text/cloud-config', + '#upstart-job': 'text/upstart-job', + '#part-handler': 'text/part-handler', + '#cloud-boothook': 'text/cloud-boothook', + '#cloud-config-archive': 'text/cloud-config-archive', } + # if 'string' is compressed return decompressed otherwise return it def decomp_str(string): import StringIO @@ -47,6 +49,7 @@ def decomp_str(string): except: return(string) + def do_include(content, appendmsg): import os # is just a list of urls, one per line @@ -67,7 +70,7 @@ def do_include(content, appendmsg): continue # urls cannot not have leading or trailing white space - msum = hashlib.md5() + msum = hashlib.md5() # pylint: disable=E1101 msum.update(line.strip()) includeonce_filename = "%s/urlcache/%s" % ( cloudinit.get_ipath_cur("data"), msum.hexdigest()) @@ -88,14 +91,14 @@ def do_include(content, appendmsg): def explode_cc_archive(archive, appendmsg): for ent in yaml.load(archive): # ent can be one of: - # dict { 'filename' : 'value' , 'content' : 'value', 'type' : 'value' } + # dict { 'filename' : 'value', 'content' : 'value', 'type' : 'value' } # filename and type not be present # or # scalar(payload) - + def_type = "text/cloud-config" if isinstance(ent, str): - ent = { 'content': ent } + ent = {'content': ent} content = ent.get('content', '') mtype = ent.get('type', None) @@ -135,6 +138,7 @@ def multi_part_count(outermsg, newcount=None): return(int(outermsg.get('Number-Attachments', 0))) + def _attach_part(outermsg, part): """ Attach an part to an outer message. outermsg must be a MIMEMultipart. @@ -143,18 +147,20 @@ def _attach_part(outermsg, part): cur = multi_part_count(outermsg) if not part.get_filename(None): part.add_header('Content-Disposition', 'attachment', - filename = 'part-%03d' % (cur+1)) + filename='part-%03d' % (cur + 1)) outermsg.attach(part) - multi_part_count(outermsg, cur+1) - + multi_part_count(outermsg, cur + 1) + + def type_from_startswith(payload, default=None): # slist is sorted longest first - slist = sorted(starts_with_mappings.keys(), key=lambda e: 0-len(e)) + slist = sorted(starts_with_mappings.keys(), key=lambda e: 0 - len(e)) for sstr in slist: if payload.startswith(sstr): return(starts_with_mappings[sstr]) return default + def process_includes(msg, appendmsg=None): if appendmsg == None: appendmsg = MIMEMultipart() @@ -190,6 +196,7 @@ def process_includes(msg, appendmsg=None): _attach_part(appendmsg, part) + def message_from_string(data, headers=None): if headers is None: headers = {} @@ -208,15 +215,17 @@ def message_from_string(data, headers=None): return(msg) + # this is heavily wasteful, reads through userdata string input def preprocess_userdata(data): newmsg = MIMEMultipart() process_includes(message_from_string(decomp_str(data)), newmsg) return(newmsg.as_string()) + # callback is a function that will be called with (data, content_type, # filename, payload) -def walk_userdata(istr, callback, data = None): +def walk_userdata(istr, callback, data=None): partnum = 0 for part in message_from_string(istr).walk(): # multipart/* are just containers @@ -233,7 +242,8 @@ def walk_userdata(istr, callback, data = None): callback(data, ctype, filename, part.get_payload(decode=True)) - partnum = partnum+1 + partnum = partnum + 1 + if __name__ == "__main__": import sys diff --git a/cloudinit/__init__.py b/cloudinit/__init__.py index 3bc6516a..90d6b77d 100644 --- a/cloudinit/__init__.py +++ b/cloudinit/__init__.py @@ -26,31 +26,31 @@ seeddir = varlibdir + "/seed" cfg_env_name = "CLOUD_CFG" cfg_builtin = """ -log_cfgs: [ ] -datasource_list: [ "NoCloud", "OVF", "Ec2" ] +log_cfgs: [] +datasource_list: ["NoCloud", "OVF", "Ec2"] def_log_file: /var/log/cloud-init.log syslog_fix_perms: syslog:adm """ logger_name = "cloudinit" pathmap = { - "handlers" : "/handlers", - "scripts" : "/scripts", - "sem" : "/sem", - "boothooks" : "/boothooks", - "userdata_raw" : "/user-data.txt", - "userdata" : "/user-data.txt.i", - "obj_pkl" : "/obj.pkl", - "cloud_config" : "/cloud-config.txt", - "data" : "/data", - None : "", + "handlers": "/handlers", + "scripts": "/scripts", + "sem": "/sem", + "boothooks": "/boothooks", + "userdata_raw": "/user-data.txt", + "userdata": "/user-data.txt.i", + "obj_pkl": "/obj.pkl", + "cloud_config": "/cloud-config.txt", + "data": "/data", + None: "", } per_instance = "once-per-instance" per_always = "always" per_once = "once" -parsed_cfgs = { } +parsed_cfgs = {} import os from configobj import ConfigObj @@ -62,30 +62,35 @@ import errno import pwd import subprocess import yaml -import util import logging import logging.config import StringIO import glob import traceback +import cloudinit.util as util + + class NullHandler(logging.Handler): def emit(self, record): pass + log = logging.getLogger(logger_name) log.addHandler(NullHandler()) + def logging_set_from_cfg_file(cfg_file=system_config): logging_set_from_cfg(util.get_base_cfg(cfg_file, cfg_builtin, parsed_cfgs)) + def logging_set_from_cfg(cfg): log_cfgs = [] logcfg = util.get_cfg_option_str(cfg, "log_cfg", False) if logcfg: # if there is a 'logcfg' entry in the config, respect # it, it is the old keyname - log_cfgs = [ logcfg ] + log_cfgs = [logcfg] elif "log_cfgs" in cfg: for cfg in cfg['log_cfgs']: if isinstance(cfg, list): @@ -107,26 +112,27 @@ def logging_set_from_cfg(cfg): raise Exception("no valid logging found\n") -import DataSource -import UserDataHandler +import cloudinit.DataSource as DataSource +import cloudinit.UserDataHandler as UserDataHandler + class CloudInit: cfg = None - part_handlers = { } + part_handlers = {} old_conffile = '/etc/ec2-init/ec2-config.cfg' - ds_deps = [ DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK ] + ds_deps = [DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK] datasource = None cloud_config_str = '' datasource_name = '' - builtin_handlers = [ ] + builtin_handlers = [] - def __init__(self, ds_deps = None, sysconfig=system_config): + def __init__(self, ds_deps=None, sysconfig=system_config): self.builtin_handlers = [ - [ 'text/x-shellscript', self.handle_user_script, per_always ], - [ 'text/cloud-config', self.handle_cloud_config, per_always ], - [ 'text/upstart-job', self.handle_upstart_job, per_instance ], - [ 'text/cloud-boothook', self.handle_cloud_boothook, per_always ], + ['text/x-shellscript', self.handle_user_script, per_always], + ['text/cloud-config', self.handle_cloud_config, per_always], + ['text/upstart-job', self.handle_upstart_job, per_instance], + ['text/cloud-boothook', self.handle_cloud_boothook, per_always], ] if ds_deps != None: @@ -149,7 +155,7 @@ class CloudInit: from configobj import ConfigObj oldcfg = ConfigObj(self.old_conffile) if oldcfg is None: - oldcfg = { } + oldcfg = {} conf = util.mergedict(conf, oldcfg) except: pass @@ -177,7 +183,7 @@ class CloudInit: except OSError as e: if e.errno != errno.EEXIST: return False - + try: f = open(cache, "wb") cPickle.dump(self.datasource, f) @@ -185,7 +191,7 @@ class CloudInit: os.chmod(cache, 0400) except: raise - + def get_data_source(self): if self.datasource is not None: return True @@ -226,12 +232,12 @@ class CloudInit: os.symlink("./instances/%s" % iid, cur_instance_link) idir = self.get_ipath() dlist = [] - for d in [ "handlers", "scripts", "sem" ]: + for d in ["handlers", "scripts", "sem"]: dlist.append("%s/%s" % (idir, d)) - + util.ensure_dirs(dlist) - ds = "%s: %s\n" % ( self.datasource.__class__, str(self.datasource) ) + ds = "%s: %s\n" % (self.datasource.__class__, str(self.datasource)) dp = self.get_cpath('data') util.write_file("%s/%s" % (idir, 'datasource'), ds) util.write_file("%s/%s" % (dp, 'previous-datasource'), ds) @@ -260,7 +266,7 @@ class CloudInit: if freq == 'once-per-instance': return("%s/%s" % (self.get_ipath("sem"), name)) return("%s/%s.%s" % (get_cpath("sem"), name, freq)) - + def sem_has_run(self, name, freq): if freq == per_always: return False @@ -268,20 +274,20 @@ class CloudInit: if os.path.exists(semfile): return True return False - + def sem_acquire(self, name, freq): from time import time semfile = self.sem_getpath(name, freq) - + try: os.makedirs(os.path.dirname(semfile)) except OSError as e: if e.errno != errno.EEXIST: raise e - + if os.path.exists(semfile) and freq != per_always: return False - + # race condition try: f = open(semfile, "w") @@ -290,7 +296,7 @@ class CloudInit: except: return(False) return(True) - + def sem_clear(self, name, freq): semfile = self.sem_getpath(name, freq) try: @@ -298,7 +304,7 @@ class CloudInit: except OSError as e: if e.errno != errno.ENOENT: return False - + return True # acquire lock on 'name' for given 'freq' @@ -326,7 +332,7 @@ class CloudInit: # get_ipath : get the instance path for a name in pathmap # (/var/lib/cloud/instances/<instance>/name)<name>) def get_ipath(self, name=None): - return("%s/instances/%s%s" + return("%s/instances/%s%s" % (varlibdir, self.get_instance_id(), pathmap[name])) def consume_userdata(self, frequency=per_instance): @@ -341,7 +347,7 @@ class CloudInit: sys.path.insert(0, cdir) sys.path.insert(0, idir) - part_handlers = { } + part_handlers = {} # add handlers in cdir for fname in glob.glob("%s/*.py" % cdir): if not os.path.isfile(fname): @@ -364,13 +370,13 @@ class CloudInit: part_handlers, data, frequency) # walk the data - pdata = { 'handlers': part_handlers, 'handlerdir': idir, - 'data' : data, 'frequency': frequency } + pdata = {'handlers': part_handlers, 'handlerdir': idir, + 'data': data, 'frequency': frequency} UserDataHandler.walk_userdata(self.get_userdata(), - partwalker_callback, data = pdata) + partwalker_callback, data=pdata) # give callbacks opportunity to finalize - called = [ ] + called = [] for (_mtype, mod) in part_handlers.iteritems(): if mod in called: continue @@ -385,7 +391,7 @@ class CloudInit: filename = filename.replace(os.sep, '_') scriptsdir = get_ipath_cur('scripts') - util.write_file("%s/%s" % + util.write_file("%s/%s" % (scriptsdir, filename), util.dos2unix(payload), 0700) def handle_upstart_job(self, _data, ctype, filename, payload, frequency): @@ -396,7 +402,7 @@ class CloudInit: if ctype == "__end__" or ctype == "__begin__": return if not filename.endswith(".conf"): - filename = filename+".conf" + filename = filename + ".conf" util.write_file("%s/%s" % ("/etc/init", filename), util.dos2unix(payload), 0644) @@ -414,7 +420,7 @@ class CloudInit: ## as CloudConfig does that also, merging it with this cfg ## # ccfg = yaml.load(self.cloud_config_str) - # if ccfg is None: ccfg = { } + # if ccfg is None: ccfg = {} # self.cfg = util.mergedict(ccfg, self.cfg) return @@ -434,7 +440,7 @@ class CloudInit: start = 0 if payload.startswith(prefix): start = len(prefix) + 1 - + boothooks_dir = self.get_ipath("boothooks") filepath = "%s/%s" % (boothooks_dir, filename) util.write_file(filepath, payload[start:], 0700) @@ -473,9 +479,9 @@ class CloudInit: def initfs(): - subds = [ 'scripts/per-instance', 'scripts/per-once', 'scripts/per-boot', - 'seed', 'instances', 'handlers', 'sem', 'data' ] - dlist = [ ] + subds = ['scripts/per-instance', 'scripts/per-once', 'scripts/per-boot', + 'seed', 'instances', 'handlers', 'sem', 'data'] + dlist = [] for subd in subds: dlist.append("%s/%s" % (varlibdir, subd)) util.ensure_dirs(dlist) @@ -494,8 +500,9 @@ def initfs(): g = None util.chownbyname(log_file, u, g) + def purge_cache(rmcur=True): - rmlist = [ boot_finished ] + rmlist = [boot_finished] if rmcur: rmlist.append(cur_instance_link) for f in rmlist: @@ -509,28 +516,35 @@ def purge_cache(rmcur=True): return(False) return(True) + # get_ipath_cur: get the current instance path for an item def get_ipath_cur(name=None): return("%s/%s%s" % (varlibdir, "instance", pathmap[name])) + # get_cpath : get the "clouddir" (/var/lib/cloud/<name>) # for a name in dirmap def get_cpath(name=None): return("%s%s" % (varlibdir, pathmap[name])) + def get_base_cfg(cfg_path=None): if cfg_path is None: cfg_path = system_config return(util.get_base_cfg(cfg_path, cfg_builtin, parsed_cfgs)) + def get_builtin_cfg(): return(yaml.load(cfg_builtin)) + class DataSourceNotFoundException(Exception): pass + def list_sources(cfg_list, depends): - return(DataSource.list_sources(cfg_list, depends, ["cloudinit", "" ])) + return(DataSource.list_sources(cfg_list, depends, ["cloudinit", ""])) + def handler_register(mod, part_handlers, data, frequency=per_instance): if not hasattr(mod, "handler_version"): @@ -542,27 +556,30 @@ def handler_register(mod, part_handlers, data, frequency=per_instance): handler_call_begin(mod, data, frequency) return(mod) + def handler_call_begin(mod, data, frequency): handler_handle_part(mod, data, "__begin__", None, None, frequency) + def handler_call_end(mod, data, frequency): handler_handle_part(mod, data, "__end__", None, None, frequency) + def handler_handle_part(mod, data, ctype, filename, payload, frequency): # only add the handler if the module should run modfreq = getattr(mod, "frequency", per_instance) - if not ( modfreq == per_always or - ( frequency == per_instance and modfreq == per_instance)): + if not (modfreq == per_always or + (frequency == per_instance and modfreq == per_instance)): return if mod.handler_version == 1: mod.handle_part(data, ctype, filename, payload) else: mod.handle_part(data, ctype, filename, payload, frequency) -def partwalker_handle_handler(pdata, _ctype, _filename, payload): +def partwalker_handle_handler(pdata, _ctype, _filename, payload): curcount = pdata['handlercount'] - modname = 'part-handler-%03d' % curcount + modname = 'part-handler-%03d' % curcount frequency = pdata['frequency'] modfname = modname + ".py" @@ -578,6 +595,7 @@ def partwalker_handle_handler(pdata, _ctype, _filename, payload): traceback.print_exc(file=sys.stderr) return + def partwalker_callback(pdata, ctype, filename, payload): # data here is the part_handlers array and then the data to pass through if ctype == "text/part-handler": @@ -590,18 +608,20 @@ def partwalker_callback(pdata, ctype, filename, payload): handler_handle_part(pdata['handlers'][ctype], pdata['data'], ctype, filename, payload, pdata['frequency']) + class InternalPartHandler: freq = per_instance - mtypes = [ ] + mtypes = [] handler_version = 1 handler = None - def __init__(self, handler, mtypes, frequency, version = 2): + + def __init__(self, handler, mtypes, frequency, version=2): self.handler = handler self.mtypes = mtypes self.frequency = frequency self.handler_version = version - def __repr__(): + def __repr__(self): return("InternalPartHandler: [%s]" % self.mtypes) def list_types(self): diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 9cfd5ba3..40bfa7ea 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -1,15 +1,16 @@ import subprocess + def netdev_info(): - fields = ( "hwaddr", "addr", "bcast", "mask" ) - ifcfg_out = subprocess.check_output(["ifconfig", "-a"]) - devs = { } + fields = ("hwaddr", "addr", "bcast", "mask") + ifcfg_out = str(subprocess.check_output(["ifconfig", "-a"])) + devs = {} for line in ifcfg_out.splitlines(): if len(line) == 0: continue if line[0] not in ("\t", " "): curdev = line.split()[0] - devs[curdev] = { "up": False } + devs[curdev] = {"up": False} for field in fields: devs[curdev][field] = "" toks = line.lower().strip().split() @@ -23,7 +24,7 @@ def netdev_info(): for i in range(len(toks)): if toks[i] == "hwaddr": try: - devs[curdev]["hwaddr"] = toks[i+1] + devs[curdev]["hwaddr"] = toks[i + 1] except IndexError: pass for field in ("addr", "bcast", "mask"): @@ -32,16 +33,17 @@ def netdev_info(): continue if toks[i] == "%s:" % field: try: - devs[curdev][target] = toks[i+1] + devs[curdev][target] = toks[i + 1] except IndexError: pass elif toks[i].startswith("%s:" % field): - devs[curdev][target] = toks[i][len(field)+1:] + devs[curdev][target] = toks[i][len(field) + 1:] return(devs) + def route_info(): - route_out = subprocess.check_output(["route", "-n"]) - routes = [ ] + route_out = str(subprocess.check_output(["route", "-n"])) + routes = [] for line in route_out.splitlines()[1:]: if not line: continue @@ -51,14 +53,16 @@ def route_info(): routes.append(toks) return(routes) + def getgateway(): for r in route_info(): if r[3].find("G") >= 0: return("%s[%s]" % (r[1], r[7])) return(None) + def debug_info(pre="ci-info: "): - lines = [ ] + lines = [] try: netdev = netdev_info() except Exception: @@ -76,8 +80,9 @@ def debug_info(pre="ci-info: "): for r in routes: lines.append("%sroute-%d: %-15s %-15s %-15s %-6s %s" % (pre, n, r[0], r[1], r[2], r[7], r[3])) - n = n+1 + n = n + 1 return('\n'.join(lines)) + if __name__ == '__main__': print debug_info() diff --git a/cloudinit/util.py b/cloudinit/util.py index 7df773ce..d8d735cc 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -37,6 +37,7 @@ try: except ImportError: HAVE_LIBSELINUX = False + def read_conf(fname): try: stream = open(fname, "r") @@ -45,12 +46,13 @@ def read_conf(fname): return conf except IOError as e: if e.errno == errno.ENOENT: - return { } + return {} raise + def get_base_cfg(cfgfile, cfg_builtin="", parsed_cfgs=None): - kerncfg = { } - syscfg = { } + kerncfg = {} + syscfg = {} if parsed_cfgs and cfgfile in parsed_cfgs: return(parsed_cfgs[cfgfile]) @@ -73,23 +75,26 @@ def get_base_cfg(cfgfile, cfg_builtin="", parsed_cfgs=None): parsed_cfgs[cfgfile] = fin return(fin) + def get_cfg_option_bool(yobj, key, default=False): - if not yobj.has_key(key): + if key not in yobj: return default val = yobj[key] if val is True: return True - if str(val).lower() in [ 'true', '1', 'on', 'yes']: + if str(val).lower() in ['true', '1', 'on', 'yes']: return True return False + def get_cfg_option_str(yobj, key, default=None): - if not yobj.has_key(key): + if key not in yobj: return default return yobj[key] + def get_cfg_option_list_or_str(yobj, key, default=None): - if not yobj.has_key(key): + if key not in yobj: return default if yobj[key] is None: return [] @@ -97,6 +102,7 @@ def get_cfg_option_list_or_str(yobj, key, default=None): return yobj[key] return([yobj[key]]) + # get a cfg entry by its path array # for f['a']['b']: get_cfg_by_path(mycfg,('a','b')) def get_cfg_by_path(yobj, keyp, default=None): @@ -107,6 +113,7 @@ def get_cfg_by_path(yobj, keyp, default=None): cur = cur[tok] return(cur) + # merge values from cand into source # if src has a key, cand will not override def mergedict(src, cand): @@ -118,6 +125,7 @@ def mergedict(src, cand): src[k] = mergedict(src[k], v) return src + def write_file(filename, content, mode=0644, omode="wb"): try: os.makedirs(os.path.dirname(filename)) @@ -132,10 +140,12 @@ def write_file(filename, content, mode=0644, omode="wb"): f.close() restorecon_if_possible(filename) + def restorecon_if_possible(path, recursive=False): if HAVE_LIBSELINUX and selinux.is_selinux_enabled(): selinux.restorecon(path, recursive=recursive) + # get keyid from keyserver def getkeybyid(keyid, keyserver): shcmd = """ @@ -153,21 +163,23 @@ def getkeybyid(keyid, keyserver): args = ['sh', '-c', shcmd, "export-gpg-keyid", keyid, keyserver] return(subp(args)[0]) + def runparts(dirp, skip_no_exist=True): if skip_no_exist and not os.path.isdir(dirp): return - + # per bug 857926, Fedora's run-parts will exit failure on empty dir if os.path.isdir(dirp) and os.listdir(dirp) == []: return - cmd = [ 'run-parts', '--regex', '.*', dirp ] + cmd = ['run-parts', '--regex', '.*', dirp] sp = subprocess.Popen(cmd) sp.communicate() if sp.returncode is not 0: raise subprocess.CalledProcessError(sp.returncode, cmd) return + def subp(args, input_=None): sp = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) @@ -176,6 +188,7 @@ def subp(args, input_=None): raise subprocess.CalledProcessError(sp.returncode, args, (out, err)) return(out, err) + def render_to_file(template, outfile, searchList): t = Template(file='/etc/cloud/templates/%s.tmpl' % template, searchList=[searchList]) @@ -183,6 +196,7 @@ def render_to_file(template, outfile, searchList): f.write(t.respond()) f.close() + def render_string(template, searchList): return(Template(template, searchList=[searchList]).respond()) @@ -201,7 +215,7 @@ def read_optional_seed(fill, base="", ext="", timeout=5): if e.errno == errno.ENOENT: return False raise - + # raise OSError with enoent if not found def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): @@ -219,20 +233,22 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): ud_url = "%s%s%s" % (base, "user-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) - raise_err = None - for attempt in range(0, retries+1): + no_exc = object() + raise_err = no_exc + for attempt in range(0, retries + 1): try: md_str = readurl(md_url, timeout=timeout) ud = readurl(ud_url, timeout=timeout) md = yaml.load(md_str) - + return(md, ud) except urllib2.HTTPError as e: raise_err = e except urllib2.URLError as e: raise_err = e - if isinstance(e.reason, OSError) and e.reason.errno == errno.ENOENT: - raise_err = e.reason + if (isinstance(e.reason, OSError) and + e.reason.errno == errno.ENOENT): + raise_err = e.reason if attempt == retries: break @@ -242,13 +258,16 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): raise(raise_err) + def logexc(log, lvl=logging.DEBUG): log.log(lvl, traceback.format_exc()) + class RecursiveInclude(Exception): pass -def read_file_with_includes(fname, rel = ".", stack=None, patt = None): + +def read_file_with_includes(fname, rel=".", stack=None, patt=None): if stack is None: stack = [] if not fname.startswith("/"): @@ -293,27 +312,29 @@ def read_file_with_includes(fname, rel = ".", stack=None, patt = None): inc_contents = "" else: raise - contents = contents[0:loc] + inc_contents + contents[endl+1:] + contents = contents[0:loc] + inc_contents + contents[endl + 1:] cur = loc + len(inc_contents) stack.pop() return(contents) + def read_conf_d(confd): # get reverse sorted list (later trumps newer) confs = sorted(os.listdir(confd), reverse=True) - + # remove anything not ending in '.cfg' confs = [f for f in confs if f.endswith(".cfg")] # remove anything not a file confs = [f for f in confs if os.path.isfile("%s/%s" % (confd, f))] - cfg = { } + cfg = {} for conf in confs: cfg = mergedict(cfg, read_conf("%s/%s" % (confd, conf))) return(cfg) + def read_conf_with_confd(cfgfile): cfg = read_conf(cfgfile) confd = False @@ -345,7 +366,8 @@ def get_cmdline(): except: cmdline = "" return(cmdline) - + + def read_cc_from_cmdline(cmdline=None): # this should support reading cloud-config information from # the kernel command line. It is intended to support content of the @@ -363,18 +385,20 @@ def read_cc_from_cmdline(cmdline=None): begin_l = len(tag_begin) end_l = len(tag_end) clen = len(cmdline) - tokens = [ ] + tokens = [] begin = cmdline.find(tag_begin) while begin >= 0: end = cmdline.find(tag_end, begin + begin_l) if end < 0: end = clen - tokens.append(cmdline[begin+begin_l:end].lstrip().replace("\\n", "\n")) - + tokens.append(cmdline[begin + begin_l:end].lstrip().replace("\\n", + "\n")) + begin = cmdline.find(tag_begin, end + end_l) return('\n'.join(tokens)) + def ensure_dirs(dirlist, mode=0755): fixmodes = [] for d in dirlist: @@ -392,6 +416,7 @@ def ensure_dirs(dirlist, mode=0755): for d in fixmodes: os.chmod(d, mode) + def chownbyname(fname, user=None, group=None): uid = -1 gid = -1 @@ -406,8 +431,9 @@ def chownbyname(fname, user=None, group=None): os.chown(fname, uid, gid) + def readurl(url, data=None, timeout=None): - openargs = { } + openargs = {} if timeout != None: openargs['timeout'] = timeout @@ -420,37 +446,40 @@ def readurl(url, data=None, timeout=None): response = urllib2.urlopen(req, **openargs) return(response.read()) + # shellify, takes a list of commands # for each entry in the list # if it is an array, shell protect it (with single ticks) # if it is a string, do nothing def shellify(cmdlist): content = "#!/bin/sh\n" - escaped = "%s%s%s%s" % ( "'", '\\', "'", "'" ) + escaped = "%s%s%s%s" % ("'", '\\', "'", "'") for args in cmdlist: # if the item is a list, wrap all items in single tick # if its not, then just write it directly if isinstance(args, list): - fixed = [ ] + fixed = [] for f in args: fixed.append("'%s'" % str(f).replace("'", escaped)) - content = "%s%s\n" % ( content, ' '.join(fixed) ) + content = "%s%s\n" % (content, ' '.join(fixed)) else: - content = "%s%s\n" % ( content, str(args) ) + content = "%s%s\n" % (content, str(args)) return content + def dos2unix(string): # find first end of line pos = string.find('\n') - if pos <= 0 or string[pos-1] != '\r': + if pos <= 0 or string[pos - 1] != '\r': return(string) return(string.replace('\r\n', '\n')) + def islxc(): # is this host running lxc? try: with open("/proc/1/cgroup") as f: - if f.read() == "/": + if f.read() == "/": return True except IOError as e: if e.errno != errno.ENOENT: @@ -469,6 +498,7 @@ def islxc(): return False + def get_hostname_fqdn(cfg, cloud): # return the hostname and fqdn from 'cfg'. If not found in cfg, # then fall back to data from cloud @@ -483,7 +513,7 @@ def get_hostname_fqdn(cfg, cloud): fqdn = cfg['hostname'] hostname = cfg['hostname'][:fqdn.find('.')] else: - # no fqdn set, get fqdn from cloud. + # no fqdn set, get fqdn from cloud. # get hostname from cfg if available otherwise cloud fqdn = cloud.get_hostname(fqdn=True) if "hostname" in cfg: @@ -492,9 +522,10 @@ def get_hostname_fqdn(cfg, cloud): hostname = cloud.get_hostname() return(hostname, fqdn) + def get_fqdn_from_hosts(hostname, filename="/etc/hosts"): # this parses /etc/hosts to get a fqdn. It should return the same - # result as 'hostname -f <hostname>' if /etc/hosts.conf + # result as 'hostname -f <hostname>' if /etc/hosts.conf # did not have did not have 'bind' in the order attribute fqdn = None try: @@ -520,6 +551,7 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"): return fqdn + def is_resolvable(name): """ determine if a url is resolvable, return a boolean """ try: @@ -528,10 +560,12 @@ def is_resolvable(name): except socket.gaierror: return False + def is_resolvable_url(url): """ determine if this url is resolvable (existing or ip) """ return(is_resolvable(urlparse.urlparse(url).hostname)) + def search_for_mirror(candidates): """ Search through a list of mirror urls for one that works """ for cand in candidates: @@ -543,6 +577,7 @@ def search_for_mirror(candidates): return None + def close_stdin(): """ reopen stdin as /dev/null so even subprocesses or other os level things get |