diff options
Diffstat (limited to 'cloudinit')
26 files changed, 224 insertions, 144 deletions
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index 22d9167e..620b3c07 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -82,9 +82,6 @@ class Cloud(object): def get_locale(self): return self.datasource.get_locale() - def get_local_mirror(self): - return self.datasource.get_local_mirror() - def get_hostname(self, fqdn=False): return self.datasource.get_hostname(fqdn=fqdn) diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index 3426099e..02056ee0 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -16,8 +16,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -from cloudinit import util from cloudinit.settings import PER_INSTANCE +from cloudinit import util frequency = PER_INSTANCE @@ -50,7 +50,7 @@ def handle(_name, cfg, cloud, log, _args): def write_apt_snippet(cloud, setting, log, f_name): - """ Writes f_name with apt pipeline depth 'setting' """ + """Writes f_name with apt pipeline depth 'setting'.""" file_contents = APT_PIPE_TPL % (setting) diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py index 1bffa47d..356bb98d 100644 --- a/cloudinit/config/cc_apt_update_upgrade.py +++ b/cloudinit/config/cc_apt_update_upgrade.py @@ -20,6 +20,7 @@ import glob import os +import time from cloudinit import templater from cloudinit import util @@ -50,20 +51,25 @@ def handle(name, cfg, cloud, log, _args): upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False) release = get_release() - mirror = find_apt_mirror(cloud, cfg) - if not mirror: + mirrors = find_apt_mirror_info(cloud, cfg) + if not mirrors or "primary" not in mirrors: log.debug(("Skipping module named %s," " no package 'mirror' located"), name) return - log.debug("Selected mirror at: %s" % mirror) + # backwards compatibility + mirror = mirrors["primary"] + mirrors["mirror"] = mirror + + log.debug("mirror info: %s" % mirrors) if not util.get_cfg_option_bool(cfg, 'apt_preserve_sources_list', False): - generate_sources_list(release, mirror, cloud, log) - old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror', - "archive.ubuntu.com/ubuntu") - rename_apt_lists(old_mir, mirror) + generate_sources_list(release, mirrors, cloud, log) + old_mirrors = cfg.get('apt_old_mirrors', + {"primary": "archive.ubuntu.com/ubuntu", + "security": "security.ubuntu.com/ubuntu"}) + rename_apt_lists(old_mirrors, mirrors) # Set up any apt proxy proxy = cfg.get("apt_proxy", None) @@ -81,8 +87,10 @@ def handle(name, cfg, cloud, log, _args): # Process 'apt_sources' if 'apt_sources' in cfg: - errors = add_sources(cloud, cfg['apt_sources'], - {'MIRROR': mirror, 'RELEASE': release}) + params = mirrors + params['RELEASE'] = release + params['MIRROR'] = mirror + errors = add_sources(cloud, cfg['apt_sources'], params) for e in errors: log.warn("Source Error: %s", ':'.join(e)) @@ -118,6 +126,20 @@ def handle(name, cfg, cloud, log, _args): util.logexc(log, "Failed to install packages: %s ", pkglist) errors.append(e) + # kernel and openssl (possibly some other packages) + # write a file /var/run/reboot-required after upgrading. + # if that file exists and configured, then just stop right now and reboot + # TODO(smoser): handle this less voilently + reboot_file = "/var/run/reboot-required" + if ((upgrade or pkglist) and cfg.get("apt_reboot_if_required", False) and + os.path.isfile(reboot_file)): + log.warn("rebooting after upgrade or install per %s" % reboot_file) + time.sleep(1) # give the warning time to get out + util.subp(["/sbin/reboot"]) + time.sleep(60) + log.warn("requested reboot did not happen!") + errors.append(Exception("requested reboot did not happen!")) + if len(errors): log.warn("%s failed with exceptions, re-raising the last one", len(errors)) @@ -146,15 +168,18 @@ def mirror2lists_fileprefix(mirror): return string -def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"): - oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror)) - nprefix = os.path.join(lists_d, mirror2lists_fileprefix(new_mirror)) - if oprefix == nprefix: - return - olen = len(oprefix) - for filename in glob.glob("%s_*" % oprefix): - # TODO use the cloud.paths.join... - util.rename(filename, "%s%s" % (nprefix, filename[olen:])) +def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"): + for (name, omirror) in old_mirrors.iteritems(): + nmirror = new_mirrors.get(name) + if not nmirror: + continue + oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror)) + nprefix = os.path.join(lists_d, mirror2lists_fileprefix(nmirror)) + if oprefix == nprefix: + continue + olen = len(oprefix) + for filename in glob.glob("%s_*" % oprefix): + util.rename(filename, "%s%s" % (nprefix, filename[olen:])) def get_release(): @@ -162,14 +187,17 @@ def get_release(): return stdout.strip() -def generate_sources_list(codename, mirror, cloud, log): +def generate_sources_list(codename, mirrors, cloud, log): template_fn = cloud.get_template_filename('sources.list') - if template_fn: - params = {'mirror': mirror, 'codename': codename} - out_fn = cloud.paths.join(False, '/etc/apt/sources.list') - templater.render_to_file(template_fn, out_fn, params) - else: + if not template_fn: log.warn("No template found, not rendering /etc/apt/sources.list") + return + + params = {'codename': codename} + for k in mirrors: + params[k] = mirrors[k] + out_fn = cloud.paths.join(False, '/etc/apt/sources.list') + templater.render_to_file(template_fn, out_fn, params) def add_sources(cloud, srclist, template_params=None): @@ -231,43 +259,47 @@ def add_sources(cloud, srclist, template_params=None): return errorlist -def find_apt_mirror(cloud, cfg): - """ find an apt_mirror given the cloud and cfg provided """ +def find_apt_mirror_info(cloud, cfg): + """find an apt_mirror given the cloud and cfg provided.""" mirror = None - cfg_mirror = cfg.get("apt_mirror", None) - if cfg_mirror: - mirror = cfg["apt_mirror"] - elif "apt_mirror_search" in cfg: - mirror = util.search_for_mirror(cfg['apt_mirror_search']) - else: - mirror = cloud.get_local_mirror() + # this is less preferred way of specifying mirror preferred would be to + # use the distro's search or package_mirror. + mirror = cfg.get("apt_mirror", None) - mydom = "" + search = cfg.get("apt_mirror_search", None) + if not mirror and search: + mirror = util.search_for_mirror(search) + if (not mirror and + util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)): + mydom = "" doms = [] - if not mirror: - # if we have a fqdn, then search its domain portion first - (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) - mydom = ".".join(fqdn.split(".")[1:]) - if mydom: - doms.append(".%s" % mydom) + # if we have a fqdn, then search its domain portion first + (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) + mydom = ".".join(fqdn.split(".")[1:]) + if mydom: + doms.append(".%s" % mydom) + + doms.extend((".localdomain", "",)) - if (not mirror and - util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)): - doms.extend((".localdomain", "",)) + mirror_list = [] + distro = cloud.distro.name + mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) + for post in doms: + mirror_list.append(mirrorfmt % (post)) - mirror_list = [] - distro = cloud.distro.name - mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) - for post in doms: - mirror_list.append(mirrorfmt % (post)) + mirror = util.search_for_mirror(mirror_list) - mirror = util.search_for_mirror(mirror_list) + mirror_info = cloud.datasource.get_package_mirror_info() - if not mirror: - mirror = cloud.distro.get_package_mirror() + # this is a bit strange. + # if mirror is set, then one of the legacy options above set it + # but they do not cover security. so we need to get that from + # get_package_mirror_info + if mirror: + mirror_info.update({'primary': mirror}) - return mirror + return mirror_info diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index bae1ea54..896cb4d0 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -20,8 +20,8 @@ import os -from cloudinit import util from cloudinit.settings import PER_ALWAYS +from cloudinit import util frequency = PER_ALWAYS diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index 68b86ff6..6d376184 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -20,8 +20,8 @@ import os -from cloudinit import util from cloudinit.settings import PER_ALWAYS +from cloudinit import util frequency = PER_ALWAYS @@ -44,5 +44,5 @@ def handle(name, _cfg, cloud, log, args): try: util.subp(cmd) except Exception as e: - # TODO, use log exception from utils?? + # TODO(harlowja), use log exception from utils?? log.warn("Emission of upstart event %s failed due to: %s", n, e) diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 467c1496..74ee18e1 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -48,7 +48,8 @@ def handle(name, cfg, cloud, log, _args): # Create object for reading puppet.conf values puppet_config = helpers.DefaultingConfigParser() # Read puppet.conf values from original file in order to be able to - # mix the rest up. First clean them up (TODO is this really needed??) + # mix the rest up. First clean them up + # (TODO(harlowja) is this really needed??) cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = '\n'.join(cleaned_lines) puppet_config.readfp(StringIO(cleaned_contents), @@ -80,7 +81,7 @@ def handle(name, cfg, cloud, log, _args): for (o, v) in cfg.iteritems(): if o == 'certname': # Expand %f as the fqdn - # TODO should this use the cloud fqdn?? + # TODO(harlowja) should this use the cloud fqdn?? v = v.replace("%f", socket.getfqdn()) # Expand %i as the instance id v = v.replace("%i", cloud.get_instance_id()) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 256a194f..e7f27944 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -22,8 +22,8 @@ import os import stat import time -from cloudinit import util from cloudinit.settings import PER_ALWAYS +from cloudinit import util frequency = PER_ALWAYS @@ -72,12 +72,12 @@ def handle(name, cfg, cloud, log, args): log.debug("Skipping module named %s, resizing disabled", name) return - # TODO is the directory ok to be used?? + # TODO(harlowja) is the directory ok to be used?? resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run") resize_root_d = cloud.paths.join(False, resize_root_d) util.ensure_dir(resize_root_d) - # TODO: allow what is to be resized to be configurable?? + # TODO(harlowja): allow what is to be resized to be configurable?? resize_what = cloud.paths.join(False, "/") with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.", dir=resize_root_d, delete=True) as tfh: @@ -136,5 +136,5 @@ def do_resize(resize_cmd, log): raise tot_time = time.time() - start log.debug("Resizing took %.3f seconds", tot_time) - # TODO: Should we add a fsck check after this to make + # TODO(harlowja): Should we add a fsck check after this to make # sure we didn't corrupt anything? diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py index 45d41b3f..4bf18516 100644 --- a/cloudinit/config/cc_rightscale_userdata.py +++ b/cloudinit/config/cc_rightscale_userdata.py @@ -37,9 +37,9 @@ import os +from cloudinit.settings import PER_INSTANCE from cloudinit import url_helper as uhelp from cloudinit import util -from cloudinit.settings import PER_INSTANCE from urlparse import parse_qs @@ -72,7 +72,7 @@ def handle(name, _cfg, cloud, log, _args): captured_excps = [] # These will eventually be then ran by the cc_scripts_user - # TODO: maybe this should just be a new user data handler?? + # TODO(harlowja): maybe this should just be a new user data handler?? # Instead of a late module that acts like a user data handler? scripts_d = cloud.get_ipath_cur('scripts') urls = mdict[MY_HOOKNAME] diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 4019ae90..3431bd2a 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -18,11 +18,11 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -import os import glob +import os -from cloudinit import util from cloudinit import ssh_util +from cloudinit import util DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding," "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" " @@ -76,7 +76,7 @@ def handle(_name, cfg, cloud, log, _args): pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0]) cmd = ['sh', '-xc', KEY_GEN_TPL % pair] try: - # TODO: Is this guard needed? + # TODO(harlowja): Is this guard needed? with util.SeLinuxGuard("/etc/ssh", recursive=True): util.subp(cmd, capture=False) log.debug("Generated a key for %s from %s", pair[0], pair[1]) @@ -94,7 +94,7 @@ def handle(_name, cfg, cloud, log, _args): if not os.path.exists(keyfile): cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile] try: - # TODO: Is this guard needed? + # TODO(harlowja): Is this guard needed? with util.SeLinuxGuard("/etc/ssh", recursive=True): util.subp(cmd, capture=False) except: diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 81d6e89e..23f5755a 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -28,7 +28,7 @@ from cloudinit import util def _split_hash(bin_hash): split_up = [] for i in xrange(0, len(bin_hash), 2): - split_up.append(bin_hash[i:i+2]) + split_up.append(bin_hash[i:i + 2]) return split_up diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index 38108da7..4d75000f 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -18,8 +18,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -from cloudinit import util from cloudinit import templater +from cloudinit import util from cloudinit.settings import PER_ALWAYS diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index b84a1a06..1d6679ea 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -20,8 +20,8 @@ import os -from cloudinit import util from cloudinit.settings import PER_ALWAYS +from cloudinit import util frequency = PER_ALWAYS diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index 1bfa4c25..a73d6f4e 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -19,8 +19,8 @@ import base64 import os -from cloudinit import util from cloudinit.settings import PER_INSTANCE +from cloudinit import util frequency = PER_INSTANCE @@ -46,7 +46,7 @@ def canonicalize_extraction(encoding_type, log): return ['application/x-gzip'] if encoding_type in ['gz+base64', 'gzip+base64', 'gz+b64', 'gzip+b64']: return ['application/base64', 'application/x-gzip'] - # Yaml already encodes binary data as base64 if it is given to the + # Yaml already encodes binary data as base64 if it is given to the # yaml file as binary, so those will be automatically decoded for you. # But the above b64 is just for people that are more 'comfortable' # specifing it manually (which might be a possiblity) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index da4d0180..357209a4 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -23,12 +23,14 @@ from StringIO import StringIO import abc +import os +import re from cloudinit import importer from cloudinit import log as logging from cloudinit import util -# TODO: Make this via config?? +# TODO(harlowja): Make this via config?? IFACE_ACTIONS = { 'up': ['ifup', '--all'], 'down': ['ifdown', '--all'], @@ -75,8 +77,26 @@ class Distro(object): def update_package_sources(self): raise NotImplementedError() - def get_package_mirror(self): - return self.get_option('package_mirror') + def get_primary_arch(self): + arch = os.uname[4] + if arch in ("i386", "i486", "i586", "i686"): + return "i386" + return arch + + def _get_arch_package_mirror_info(self, arch=None): + mirror_info = self.get_option("package_mirrors", None) + if arch == None: + arch = self.get_primary_arch() + return _get_arch_package_mirror_info(mirror_info, arch) + + def get_package_mirror_info(self, arch=None, + availability_zone=None): + # this resolves the package_mirrors config option + # down to a single dict of {mirror_name: mirror_url} + arch_info = self._get_arch_package_mirror_info(arch) + + return _get_package_mirror_info(availability_zone=availability_zone, + mirror_info=arch_info) def apply_network(self, settings, bring_up=True): # Write it out @@ -151,6 +171,55 @@ class Distro(object): return False +def _get_package_mirror_info(mirror_info, availability_zone=None, + mirror_filter=util.search_for_mirror): + # given a arch specific 'mirror_info' entry (from package_mirrors) + # search through the 'search' entries, and fallback appropriately + # return a dict with only {name: mirror} entries. + + ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % + "north|northeast|east|southeast|south|southwest|west|northwest") + + subst = {} + if availability_zone: + subst['availability_zone'] = availability_zone + + if availability_zone and re.match(ec2_az_re, availability_zone): + subst['ec2_region'] = "%s" % availability_zone[0:-1] + + results = {} + for (name, mirror) in mirror_info.get('failsafe', {}).iteritems(): + results[name] = mirror + + for (name, searchlist) in mirror_info.get('search', {}).iteritems(): + mirrors = [] + for tmpl in searchlist: + try: + mirrors.append(tmpl % subst) + except KeyError: + pass + + found = mirror_filter(mirrors) + if found: + results[name] = found + + LOG.debug("filtered distro mirror info: %s" % results) + + return results + + +def _get_arch_package_mirror_info(package_mirrors, arch): + # pull out the specific arch from a 'package_mirrors' config option + default = None + for item in package_mirrors: + arches = item.get("arches") + if arch in arches: + return item + if "default" in arches: + default = item + return default + + def fetch(name): locs = importer.find_module(name, ['', __name__], diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 3247d7ce..da8c1a5b 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -147,3 +147,7 @@ class Distro(distros.Distro): def update_package_sources(self): self._runner.run("update-sources", self.package_command, ["update"], freq=PER_INSTANCE) + + def get_primary_arch(self): + (arch, _err) = util.subp(['dpkg', '--print-architecture']) + return str(arch).strip() diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 7fa69f03..d81ee5fb 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -69,7 +69,7 @@ class Distro(distros.Distro): self.package_command('install', pkglist) def _write_network(self, settings): - # TODO fix this... since this is the ubuntu format + # TODO(harlowja) fix this... since this is the ubuntu format entries = translate_network(settings) LOG.debug("Translated ubuntu style network settings %s into %s", settings, entries) @@ -258,7 +258,7 @@ class QuotingConfigObj(ConfigObj): # This is a util function to translate a ubuntu /etc/network/interfaces 'blob' # to a rhel equiv. that can then be written to /etc/sysconfig/network-scripts/ -# TODO remove when we have python-netcf active... +# TODO(harlowja) remove when we have python-netcf active... def translate_network(settings): # Get the standard cmd, args from the ubuntu format entries = [] diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 6d1502f4..99caed1f 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -133,7 +133,7 @@ def walker_handle_handler(pdata, _ctype, _filename, payload): modfname = os.path.join(pdata['handlerdir'], "%s" % (modname)) if not modfname.endswith(".py"): modfname = "%s.py" % (modfname) - # TODO: Check if path exists?? + # TODO(harlowja): Check if path exists?? util.write_file(modfname, payload, 0600) handlers = pdata['handlers'] try: diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py index a9d8e544..6c5c11ca 100644 --- a/cloudinit/handlers/shell_script.py +++ b/cloudinit/handlers/shell_script.py @@ -43,7 +43,7 @@ class ShellScriptPartHandler(handlers.Handler): def _handle_part(self, _data, ctype, filename, payload, _frequency): if ctype in handlers.CONTENT_SIGNALS: - # TODO: maybe delete existing things here + # TODO(harlowja): maybe delete existing things here return filename = util.clean_filename(filename) diff --git a/cloudinit/log.py b/cloudinit/log.py index 819c85b6..2333e5ee 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -21,8 +21,8 @@ # along with this program. If not, see <http://www.gnu.org/licenses/>. import logging -import logging.handlers import logging.config +import logging.handlers import collections import os diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 751bef4f..f7ffa7cb 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -49,8 +49,7 @@ class DataSourceCloudStack(sources.DataSource): self.metadata_address = "http://%s/" % (gw_addr) def get_default_gateway(self): - """ Returns the default gateway ip address in the dotted format - """ + """Returns the default gateway ip address in the dotted format.""" lines = util.load_file("/proc/net/route").splitlines() for line in lines: items = line.split("\t") @@ -132,7 +131,8 @@ class DataSourceCloudStack(sources.DataSource): def get_instance_id(self): return self.metadata['instance-id'] - def get_availability_zone(self): + @property + def availability_zone(self): return self.metadata['availability-zone'] diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 320dd1d1..850b281c 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -124,12 +124,12 @@ class NonConfigDriveDir(Exception): def find_cfg_drive_device(): - """ Get the config drive device. Return a string like '/dev/vdb' - or None (if there is no non-root device attached). This does not - check the contents, only reports that if there *were* a config_drive - attached, it would be this device. - Note: per config_drive documentation, this is - "associated as the last available disk on the instance" + """Get the config drive device. Return a string like '/dev/vdb' + or None (if there is no non-root device attached). This does not + check the contents, only reports that if there *were* a config_drive + attached, it would be this device. + Note: per config_drive documentation, this is + "associated as the last available disk on the instance" """ # This seems to be for debugging?? @@ -160,7 +160,7 @@ def read_config_drive_dir(source_dir): string populated. If not a valid dir, raise a NonConfigDriveDir """ - # TODO: fix this for other operating systems... + # TODO(harlowja): fix this for other operating systems... # Ie: this is where https://fedorahosted.org/netcf/ or similar should # be hooked in... (or could be) found = {} diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index d9eb8f17..556dcafb 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -83,40 +83,6 @@ class DataSourceEc2(sources.DataSource): def get_availability_zone(self): return self.metadata['placement']['availability-zone'] - def get_local_mirror(self): - return self.get_mirror_from_availability_zone() - - def get_mirror_from_availability_zone(self, availability_zone=None): - # Return type None indicates there is no cloud specific mirror - # Availability is like 'us-west-1b' or 'eu-west-1a' - if availability_zone is None: - availability_zone = self.get_availability_zone() - - if self.is_vpc(): - return None - - if not availability_zone: - return None - - mirror_tpl = self.distro.get_option('package_mirror_ec2_template', - None) - - if mirror_tpl is None: - return None - - # in EC2, the 'region' is 'us-east-1' if 'zone' is 'us-east-1a' - tpl_params = { - 'zone': availability_zone.strip(), - 'region': availability_zone[:-1] - } - mirror_url = mirror_tpl % (tpl_params) - - found = util.search_for_mirror([mirror_url]) - if found is not None: - return mirror_url - - return None - def _get_url_settings(self): mcfg = self.ds_cfg if not mcfg: @@ -255,6 +221,12 @@ class DataSourceEc2(sources.DataSource): return True return False + @property + def availability_zone(self): + try: + return self.metadata['placement']['availability-zone'] + except KeyError: + return None # Used to match classes to dependencies datasources = [ diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index ca9f58e5..4719d254 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -117,9 +117,9 @@ class DataSource(object): def get_locale(self): return 'en_US.UTF-8' - def get_local_mirror(self): - # ?? - return None + @property + def availability_zone(self): + return self.metadata.get('availability-zone') def get_instance_id(self): if not self.metadata or 'instance-id' not in self.metadata: @@ -166,6 +166,10 @@ class DataSource(object): else: return hostname + def get_package_mirror_info(self): + return self.distro.get_package_mirror_info( + availability_zone=self.availability_zone) + def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list): ds_list = list_sources(cfg_list, ds_deps, pkg_list) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 2f6a566c..c9634a90 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -326,7 +326,7 @@ class Init(object): 'paths': self.paths, 'datasource': self.datasource, } - # TODO Hmmm, should we dynamically import these?? + # TODO(harlowja) Hmmm, should we dynamically import these?? def_handlers = [ cc_part.CloudConfigPartHandler(**opts), ss_part.ShellScriptPartHandler(**opts), @@ -519,7 +519,7 @@ class Modules(object): " but not on %s distro. It may or may not work" " correctly."), name, worked_distros, d_name) # Use the configs logger and not our own - # TODO: possibly check the module + # TODO(harlowja): possibly check the module # for having a LOG attr and just give it back # its own logger? func_args = [name, self.cfg, diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index f5d01818..af98b488 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -23,9 +23,9 @@ import os import email +from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText -from email.mime.base import MIMEBase from cloudinit import handlers from cloudinit import log as logging @@ -159,7 +159,7 @@ class UserDataProcessor(object): if isinstance(ent, (str, basestring)): ent = {'content': ent} if not isinstance(ent, (dict)): - # TODO raise? + # TODO(harlowja) raise? continue content = ent.get('content', '') diff --git a/cloudinit/util.py b/cloudinit/util.py index a8c0cceb..825867a7 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -24,8 +24,8 @@ from StringIO import StringIO -import copy as obj_copy import contextlib +import copy as obj_copy import errno import glob import grp @@ -317,8 +317,9 @@ def multi_log(text, console=True, stderr=True, else: log.log(log_level, text) + def is_ipv4(instr): - """ determine if input string is a ipv4 address. return boolean""" + """determine if input string is a ipv4 address. return boolean.""" toks = instr.split('.') if len(toks) != 4: return False @@ -826,12 +827,12 @@ def get_cmdline_url(names=('cloud-config-url', 'url'), def is_resolvable(name): - """ determine if a url is resolvable, return a boolean + """determine if a url is resolvable, return a boolean This also attempts to be resilent against dns redirection. Note, that normal nsswitch resolution is used here. So in order to avoid any utilization of 'search' entries in /etc/resolv.conf - we have to append '.'. + we have to append '.'. The top level 'invalid' domain is invalid per RFC. And example.com should also not exist. The random entry will be resolved inside @@ -847,7 +848,7 @@ def is_resolvable(name): try: result = socket.getaddrinfo(iname, None, 0, 0, socket.SOCK_STREAM, socket.AI_CANONNAME) - badresults[iname] = [] + badresults[iname] = [] for (_fam, _stype, _proto, cname, sockaddr) in result: badresults[iname].append("%s: %s" % (cname, sockaddr[0])) badips.add(sockaddr[0]) @@ -856,7 +857,7 @@ def is_resolvable(name): _DNS_REDIRECT_IP = badips if badresults: LOG.debug("detected dns redirection: %s" % badresults) - + try: result = socket.getaddrinfo(name, None) # check first result's sockaddr field @@ -874,7 +875,7 @@ def get_hostname(): def is_resolvable_url(url): - """ determine if this url is resolvable (existing or ip) """ + """determine if this url is resolvable (existing or ip).""" return (is_resolvable(urlparse.urlparse(url).hostname)) @@ -1105,7 +1106,7 @@ def hash_blob(blob, routine, mlen=None): def rename(src, dest): LOG.debug("Renaming %s to %s", src, dest) - # TODO use a se guard here?? + # TODO(harlowja) use a se guard here?? os.rename(src, dest) |