diff options
Diffstat (limited to 'cloudinit')
57 files changed, 1530 insertions, 336 deletions
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 9e9e9e26..702977cb 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -91,7 +91,8 @@ def handle(name, cfg, cloud, log, _args): if matchcfg: matcher = re.compile(matchcfg).search else: - matcher = lambda f: False + def matcher(x): + return False errors = add_sources(cfg['apt_sources'], params, aa_repo_match=matcher) @@ -173,7 +174,8 @@ def add_sources(srclist, template_params=None, aa_repo_match=None): template_params = {} if aa_repo_match is None: - aa_repo_match = lambda f: False + def aa_repo_match(x): + return False errorlist = [] for ent in srclist: diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index d5b0d1d7..0ecc2e4c 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -167,11 +167,12 @@ def enumerate_disk(device, nodeps=False): parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0] for part in parts: - d = {'name': None, - 'type': None, - 'fstype': None, - 'label': None, - } + d = { + 'name': None, + 'type': None, + 'fstype': None, + 'label': None, + } for key, value in value_splitter(part): d[key.lower()] = value @@ -701,11 +702,12 @@ def lookup_force_flag(fs): """ A force flag might be -F or -F, this look it up """ - flags = {'ext': '-F', - 'btrfs': '-f', - 'xfs': '-f', - 'reiserfs': '-f', - } + flags = { + 'ext': '-F', + 'btrfs': '-f', + 'xfs': '-f', + 'reiserfs': '-f', + } if 'ext' in fs.lower(): fs = 'ext' @@ -824,10 +826,11 @@ def mkfs(fs_cfg): # Create the commands if fs_cmd: - fs_cmd = fs_cfg['cmd'] % {'label': label, - 'filesystem': fs_type, - 'device': device, - } + fs_cmd = fs_cfg['cmd'] % { + 'label': label, + 'filesystem': fs_type, + 'device': device, + } else: # Find the mkfs command mkfs_cmd = util.which("mkfs.%s" % fs_type) diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py index ad957e12..4a51476f 100644 --- a/cloudinit/config/cc_final_message.py +++ b/cloudinit/config/cc_final_message.py @@ -28,9 +28,9 @@ frequency = PER_ALWAYS # Jinja formated default message FINAL_MESSAGE_DEF = ( - "## template: jinja\n" - "Cloud-init v. {{version}} finished at {{timestamp}}." - " Datasource {{datasource}}. Up {{uptime}} seconds" + "## template: jinja\n" + "Cloud-init v. {{version}} finished at {{timestamp}}." + " Datasource {{datasource}}. Up {{uptime}} seconds" ) diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py index 456597af..3c2d9985 100644 --- a/cloudinit/config/cc_grub_dpkg.py +++ b/cloudinit/config/cc_grub_dpkg.py @@ -37,12 +37,11 @@ def handle(name, cfg, _cloud, log, _args): return idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None) - idevs_empty = util.get_cfg_option_str(mycfg, - "grub-pc/install_devices_empty", None) + idevs_empty = util.get_cfg_option_str( + mycfg, "grub-pc/install_devices_empty", None) if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or - (os.path.exists("/dev/xvda1") - and not os.path.exists("/dev/xvda"))): + (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))): if idevs is None: idevs = "" if idevs_empty is None: @@ -66,7 +65,7 @@ def handle(name, cfg, _cloud, log, _args): (idevs, idevs_empty)) log.debug("Setting grub debconf-set-selections with '%s','%s'" % - (idevs, idevs_empty)) + (idevs, idevs_empty)) try: util.subp(['debconf-set-selections'], dconf_sel) diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index f1c1adff..aa844ee9 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -48,7 +48,7 @@ def handle(name, cfg, cloud, log, _args): "ssh_fp_console_blacklist", []) key_blacklist = util.get_cfg_option_list(cfg, "ssh_key_console_blacklist", - ["ssh-dss"]) + ["ssh-dss"]) try: cmd = [helper_path] diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 0b9d846e..68fcb27f 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -38,12 +38,12 @@ distros = ['ubuntu'] # defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2 LSC_BUILTIN_CFG = { - 'client': { - 'log_level': "info", - 'url': "https://landscape.canonical.com/message-system", - 'ping_url': "http://landscape.canonical.com/ping", - 'data_path': "/var/lib/landscape/client", - } + 'client': { + 'log_level': "info", + 'url': "https://landscape.canonical.com/message-system", + 'ping_url': "http://landscape.canonical.com/ping", + 'data_path': "/var/lib/landscape/client", + } } diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py new file mode 100644 index 00000000..63b8fb63 --- /dev/null +++ b/cloudinit/config/cc_lxd.py @@ -0,0 +1,85 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2016 Canonical Ltd. +# +# Author: Wesley Wiedenmeier <wesley.wiedenmeier@canonical.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +""" +This module initializes lxd using 'lxd init' + +Example config: + #cloud-config + lxd: + init: + network_address: <ip addr> + network_port: <port> + storage_backend: <zfs/dir> + storage_create_device: <dev> + storage_create_loop: <size> + storage_pool: <name> + trust_password: <password> +""" + +from cloudinit import util + + +def handle(name, cfg, cloud, log, args): + # Get config + lxd_cfg = cfg.get('lxd') + if not lxd_cfg: + log.debug("Skipping module named %s, not present or disabled by cfg") + return + if not isinstance(lxd_cfg, dict): + log.warn("lxd config must be a dictionary. found a '%s'", + type(lxd_cfg)) + return + + init_cfg = lxd_cfg.get('init') + if not isinstance(init_cfg, dict): + log.warn("lxd/init config must be a dictionary. found a '%s'", + type(init_cfg)) + init_cfg = {} + + if not init_cfg: + log.debug("no lxd/init config. disabled.") + return + + packages = [] + # Ensure lxd is installed + if not util.which("lxd"): + packages.append('lxd') + + # if using zfs, get the utils + if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'): + packages.append('zfs') + + if len(packages): + try: + cloud.distro.install_packages(packages) + except util.ProcessExecutionError as exc: + log.warn("failed to install packages %s: %s", packages, exc) + return + + # Set up lxd if init config is given + init_keys = ( + 'network_address', 'network_port', 'storage_backend', + 'storage_create_device', 'storage_create_loop', + 'storage_pool', 'trust_password') + cmd = ['lxd', 'init', '--auto'] + for k in init_keys: + if init_cfg.get(k): + cmd.extend(["--%s=%s" % + (k.replace('_', '-'), str(init_cfg[k]))]) + util.subp(cmd) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 11089d8d..4fe3ee21 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -204,12 +204,12 @@ def setup_swapfile(fname, size=None, maxsize=None): try: util.ensure_dir(tdir) util.log_time(LOG.debug, msg, func=util.subp, - args=[['sh', '-c', - ('rm -f "$1" && umask 0066 && ' - '{ fallocate -l "${2}M" "$1" || ' - ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' - 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), - 'setup_swap', fname, mbsize]]) + args=[['sh', '-c', + ('rm -f "$1" && umask 0066 && ' + '{ fallocate -l "${2}M" "$1" || ' + ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' + 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), + 'setup_swap', fname, mbsize]]) except Exception as e: raise IOError("Failed %s: %s" % (msg, e)) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 7d9567e3..cc3f7f70 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -105,7 +105,7 @@ def handle(_name, cfg, _cloud, log, _args): log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args))) - util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, + util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, condition, execmd, [args, devnull_fp]) diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 4501598e..774d3322 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -36,8 +36,8 @@ def _autostart_puppet(log): # Set puppet to automatically start if os.path.exists('/etc/default/puppet'): util.subp(['sed', '-i', - '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False) + '-e', 's/^START=.*/START=yes/', + '/etc/default/puppet'], capture=False) elif os.path.exists('/bin/systemctl'): util.subp(['/bin/systemctl', 'enable', 'puppet.service'], capture=False) @@ -65,7 +65,7 @@ def handle(name, cfg, cloud, log, _args): " doing nothing.")) elif install: log.debug(("Attempting to install puppet %s,"), - version if version else 'latest') + version if version else 'latest') cloud.distro.install_packages(('puppet', version)) # ... and then update the puppet configuration diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index cbc07853..2a2a9f59 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -166,7 +166,7 @@ def handle(name, cfg, _cloud, log, args): func=do_resize, args=(resize_cmd, log)) else: util.log_time(logfunc=log.debug, msg="Resizing", - func=do_resize, args=(resize_cmd, log)) + func=do_resize, args=(resize_cmd, log)) action = 'Resized' if resize_root == NOBLOCK: diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 3b30c47e..6087c45c 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -126,10 +126,8 @@ class SubscriptionManager(object): "(True/False " return False, not_bool - if (self.servicelevel is not None) and \ - ((not self.auto_attach) - or (util.is_false(str(self.auto_attach)))): - + if (self.servicelevel is not None) and ((not self.auto_attach) or + (util.is_false(str(self.auto_attach)))): no_auto = ("The service-level key must be used in conjunction " "with the auto-attach key. Please re-run with " "auto-attach: True") diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index 3288a853..1b011216 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -83,7 +83,7 @@ def handle(name, cfg, cloud, log, _args): len(seed_data), seed_path) util.append_file(seed_path, seed_data) - command = mycfg.get('command', ['pollinate', '-q']) + command = mycfg.get('command', None) req = mycfg.get('command_required', False) try: env = os.environ.copy() diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index 5d7f4331..f43d8d5a 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -24,7 +24,7 @@ from cloudinit import util def handle(name, cfg, cloud, log, _args): if util.get_cfg_option_bool(cfg, "preserve_hostname", False): log.debug(("Configuration option 'preserve_hostname' is set," - " not setting the hostname in module %s"), name) + " not setting the hostname in module %s"), name) return (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 5bd2dec6..d24e43c0 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -30,9 +30,10 @@ from cloudinit import distros as ds from cloudinit import ssh_util from cloudinit import util -DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding," -"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" " -"rather than the user \\\"root\\\".\';echo;sleep 10\"") +DISABLE_ROOT_OPTS = ( + "no-port-forwarding,no-agent-forwarding," + "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\"" + " rather than the user \\\"root\\\".\';echo;sleep 10\"") GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519'] KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key' diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index d3dd1f32..15703efe 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -41,10 +41,10 @@ def handle(name, cfg, cloud, log, _args): if not tpl_fn_name: raise RuntimeError(("No hosts template could be" " found for distro %s") % - (cloud.distro.osfamily)) + (cloud.distro.osfamily)) templater.render_to_file(tpl_fn_name, '/etc/hosts', - {'hostname': hostname, 'fqdn': fqdn}) + {'hostname': hostname, 'fqdn': fqdn}) elif manage_hosts == "localhost": (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) @@ -57,4 +57,4 @@ def handle(name, cfg, cloud, log, _args): cloud.distro.update_etc_hosts(hostname, fqdn) else: log.debug(("Configuration option 'manage_etc_hosts' is not set," - " not managing /etc/hosts in module %s"), name) + " not managing /etc/hosts in module %s"), name) diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index e396ba13..5b78afe1 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -29,7 +29,7 @@ frequency = PER_ALWAYS def handle(name, cfg, cloud, log, _args): if util.get_cfg_option_bool(cfg, "preserve_hostname", False): log.debug(("Configuration option 'preserve_hostname' is set," - " not updating the hostname in module %s"), name) + " not updating the hostname in module %s"), name) return (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 3b821af9..64fba869 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -92,7 +92,7 @@ def handle(name, cfg, _cloud, log, _args): for req_field in ['baseurl']: if req_field not in repo_config: log.warn(("Repository %s does not contain a %s" - " configuration 'required' entry"), + " configuration 'required' entry"), repo_id, req_field) missing_required += 1 if not missing_required: diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 71884b32..e8220985 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -211,8 +211,8 @@ class Distro(object): # If the system hostname is different than the previous # one or the desired one lets update it as well - if (not sys_hostname) or (sys_hostname == prev_hostname - and sys_hostname != hostname): + if ((not sys_hostname) or (sys_hostname == prev_hostname and + sys_hostname != hostname)): update_files.append(sys_fn) # If something else has changed the hostname after we set it @@ -221,7 +221,7 @@ class Distro(object): if (sys_hostname and prev_hostname and sys_hostname != prev_hostname): LOG.info("%s differs from %s, assuming user maintained hostname.", - prev_hostname_fn, sys_fn) + prev_hostname_fn, sys_fn) return # Remove duplicates (incase the previous config filename) @@ -289,7 +289,7 @@ class Distro(object): def _bring_up_interface(self, device_name): cmd = ['ifup', device_name] LOG.debug("Attempting to run bring up interface %s using command %s", - device_name, cmd) + device_name, cmd) try: (_out, err) = util.subp(cmd) if len(err): @@ -319,6 +319,11 @@ class Distro(object): LOG.info("User %s already exists, skipping." % name) return + if 'create_groups' in kwargs: + create_groups = kwargs.pop('create_groups') + else: + create_groups = True + adduser_cmd = ['useradd', name] log_adduser_cmd = ['useradd', name] @@ -346,6 +351,19 @@ class Distro(object): redact_opts = ['passwd'] + groups = kwargs.get('groups') + if groups: + if isinstance(groups, (list, tuple)): + kwargs['groups'] = ",".join(groups) + else: + groups = groups.split(",") + + if create_groups: + for group in kwargs.get('groups').split(","): + if not util.is_group(group): + self.create_group(group) + LOG.debug("created group %s for user %s", name, group) + # Check the values and create the command for key, val in kwargs.items(): @@ -393,6 +411,10 @@ class Distro(object): if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']: self.set_passwd(name, kwargs['plain_text_passwd']) + # Set password if hashed password is provided and non-empty + if 'hashed_passwd' in kwargs and kwargs['hashed_passwd']: + self.set_passwd(name, kwargs['hashed_passwd'], hashed=True) + # Default locking down the account. 'lock_passwd' defaults to True. # lock account unless lock_password is False. if kwargs.get('lock_passwd', True): @@ -530,8 +552,10 @@ class Distro(object): util.logexc(LOG, "Failed to append sudoers file %s", sudo_file) raise e - def create_group(self, name, members): + def create_group(self, name, members=None): group_add_cmd = ['groupadd', name] + if not members: + members = [] # Check if group exists, and then add it doesn't if util.is_group(name): @@ -541,14 +565,14 @@ class Distro(object): util.subp(group_add_cmd) LOG.info("Created new group %s" % name) except Exception: - util.logexc("Failed to create group %s", name) + util.logexc(LOG, "Failed to create group %s", name) # Add members to the group, if so defined if len(members) > 0: for member in members: if not util.is_user(member): LOG.warn("Unable to add group member '%s' to group '%s'" - "; user does not exist.", member, name) + "; user does not exist.", member, name) continue util.subp(['usermod', '-a', '-G', name, member]) @@ -886,7 +910,7 @@ def fetch(name): locs, looked_locs = importer.find_module(name, ['', __name__], ['Distro']) if not locs: raise ImportError("No distribution found for distro %s (searched %s)" - % (name, looked_locs)) + % (name, looked_locs)) mod = importer.import_module(locs[0]) cls = getattr(mod, 'Distro') return cls @@ -897,5 +921,9 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone", util.write_file(tz_conf, str(tz).rstrip() + "\n") # This ensures that the correct tz will be used for the system if tz_local and tz_file: - util.copy(tz_file, tz_local) + # use a symlink if there exists a symlink or tz_local is not present + if os.path.islink(tz_local) or not os.path.exists(tz_local): + os.symlink(tz_file, tz_local) + else: + util.copy(tz_file, tz_local) return diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 45fcf26f..93a2e008 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -74,7 +74,7 @@ class Distro(distros.Distro): 'Interface': dev, 'IP': info.get('bootproto'), 'Address': "('%s/%s')" % (info.get('address'), - info.get('netmask')), + info.get('netmask')), 'Gateway': info.get('gateway'), 'DNS': str(tuple(info.get('dns-nameservers'))).replace(',', '') } @@ -86,7 +86,7 @@ class Distro(distros.Distro): if nameservers: util.write_file(self.resolve_conf_fn, - convert_resolv_conf(nameservers)) + convert_resolv_conf(nameservers)) return dev_names @@ -102,7 +102,7 @@ class Distro(distros.Distro): def _bring_up_interface(self, device_name): cmd = ['netctl', 'restart', device_name] LOG.debug("Attempting to run bring up interface %s using command %s", - device_name, cmd) + device_name, cmd) try: (_out, err) = util.subp(cmd) if len(err): diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 6d3a82bf..db5890b1 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -159,8 +159,9 @@ class Distro(distros.Distro): # Allow the output of this to flow outwards (ie not be captured) util.log_time(logfunc=LOG.debug, - msg="apt-%s [%s]" % (command, ' '.join(cmd)), func=util.subp, - args=(cmd,), kwargs={'env': e, 'capture': False}) + msg="apt-%s [%s]" % (command, ' '.join(cmd)), + func=util.subp, + args=(cmd,), kwargs={'env': e, 'capture': False}) def update_package_sources(self): self._runner.run("update-sources", self.package_command, diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 4c484639..72012056 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -205,8 +205,8 @@ class Distro(distros.Distro): redact_opts = ['passwd'] for key, val in kwargs.items(): - if (key in adduser_opts and val - and isinstance(val, six.string_types)): + if (key in adduser_opts and val and + isinstance(val, six.string_types)): adduser_cmd.extend([adduser_opts[key], val]) # Redact certain fields from the logs diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py index 9e80583c..6267dd6e 100644 --- a/cloudinit/distros/gentoo.py +++ b/cloudinit/distros/gentoo.py @@ -66,7 +66,7 @@ class Distro(distros.Distro): def _bring_up_interface(self, device_name): cmd = ['/etc/init.d/net.%s' % device_name, 'restart'] LOG.debug("Attempting to run bring up interface %s using command %s", - device_name, cmd) + device_name, cmd) try: (_out, err) = util.subp(cmd) if len(err): @@ -88,7 +88,7 @@ class Distro(distros.Distro): (_out, err) = util.subp(cmd) if len(err): LOG.warn("Running %s resulted in stderr output: %s", cmd, - err) + err) except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) return False diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py index 84a1de42..efb185d4 100644 --- a/cloudinit/distros/parsers/hostname.py +++ b/cloudinit/distros/parsers/hostname.py @@ -84,5 +84,5 @@ class HostnameConf(object): hostnames_found.add(head) if len(hostnames_found) > 1: raise IOError("Multiple hostnames (%s) found!" - % (hostnames_found)) + % (hostnames_found)) return entries diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index 8aee03a4..2ed13d9c 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -132,7 +132,7 @@ class ResolvConf(object): # Some hard limit on 256 chars total raise ValueError(("Adding %r would go beyond the " "256 maximum search list character limit") - % (search_domain)) + % (search_domain)) self._remove_option('search') self._contents.append(('option', ['search', s_list, ''])) return flat_sds diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py index d795e12f..6157cf32 100644 --- a/cloudinit/distros/parsers/sys_conf.py +++ b/cloudinit/distros/parsers/sys_conf.py @@ -77,8 +77,7 @@ class SysConf(configobj.ConfigObj): quot_func = None if value[0] in ['"', "'"] and value[-1] in ['"', "'"]: if len(value) == 1: - quot_func = (lambda x: - self._get_single_quote(x) % x) + quot_func = (lambda x: self._get_single_quote(x) % x) else: # Quote whitespace if it isn't the start + end of a shell command if value.strip().startswith("$(") and value.strip().endswith(")"): @@ -91,10 +90,10 @@ class SysConf(configobj.ConfigObj): # to use single quotes which won't get expanded... if re.search(r"[\n\"']", value): quot_func = (lambda x: - self._get_triple_quote(x) % x) + self._get_triple_quote(x) % x) else: quot_func = (lambda x: - self._get_single_quote(x) % x) + self._get_single_quote(x) % x) else: quot_func = pipes.quote if not quot_func: diff --git a/cloudinit/filters/launch_index.py b/cloudinit/filters/launch_index.py index 5bebd318..baecdac9 100644 --- a/cloudinit/filters/launch_index.py +++ b/cloudinit/filters/launch_index.py @@ -61,7 +61,7 @@ class Filter(object): discarded += 1 LOG.debug(("Discarding %s multipart messages " "which do not match launch index %s"), - discarded, self.wanted_idx) + discarded, self.wanted_idx) new_message = copy.copy(message) new_message.set_payload(new_msgs) new_message[ud.ATTACHMENT_FIELD] = str(len(new_msgs)) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 5e99d185..0cf982f3 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -139,9 +139,10 @@ class FileSemaphores(object): # but the item had run before we did canon_sem_name. if cname != name and os.path.exists(self._get_path(name, freq)): LOG.warn("%s has run without canonicalized name [%s].\n" - "likely the migrator has not yet run. It will run next boot.\n" - "run manually with: cloud-init single --name=migrator" - % (name, cname)) + "likely the migrator has not yet run. " + "It will run next boot.\n" + "run manually with: cloud-init single --name=migrator" + % (name, cname)) return True return False @@ -335,19 +336,19 @@ class Paths(object): template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/') self.template_tpl = os.path.join(template_dir, '%s.tmpl') self.lookups = { - "handlers": "handlers", - "scripts": "scripts", - "vendor_scripts": "scripts/vendor", - "sem": "sem", - "boothooks": "boothooks", - "userdata_raw": "user-data.txt", - "userdata": "user-data.txt.i", - "obj_pkl": "obj.pkl", - "cloud_config": "cloud-config.txt", - "vendor_cloud_config": "vendor-cloud-config.txt", - "data": "data", - "vendordata_raw": "vendor-data.txt", - "vendordata": "vendor-data.txt.i", + "handlers": "handlers", + "scripts": "scripts", + "vendor_scripts": "scripts/vendor", + "sem": "sem", + "boothooks": "boothooks", + "userdata_raw": "user-data.txt", + "userdata": "user-data.txt.i", + "obj_pkl": "obj.pkl", + "cloud_config": "cloud-config.txt", + "vendor_cloud_config": "vendor-cloud-config.txt", + "data": "data", + "vendordata_raw": "vendor-data.txt", + "vendordata": "vendor-data.txt.i", } # Set when a datasource becomes active self.datasource = ds diff --git a/cloudinit/settings.py b/cloudinit/settings.py index b61e5613..8c258ea1 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -42,6 +42,7 @@ CFG_BUILTIN = { 'CloudSigma', 'CloudStack', 'SmartOS', + 'Bigstep', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 60d58d6d..cd61df31 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -284,7 +284,7 @@ class DataSourceAltCloud(sources.DataSource): # In the future 'dsmode' like behavior can be added to offer user # the ability to run before networking. datasources = [ - (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index c6228e6c..2af0ad9b 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -31,17 +31,16 @@ from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS from cloudinit import sources from cloudinit import util -from cloudinit.sources.helpers.azure import ( - get_metadata_from_fabric, iid_from_shared_config_content) +from cloudinit.sources.helpers.azure import get_metadata_from_fabric LOG = logging.getLogger(__name__) DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} AGENT_START = ['service', 'walinuxagent', 'start'] -BOUNCE_COMMAND = ['sh', '-xc', +BOUNCE_COMMAND = [ + 'sh', '-xc', "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"] -DATA_DIR_CLEAN_LIST = ['SharedConfig.xml'] BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START, @@ -93,9 +92,9 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): """ policy = cfg['hostname_bounce']['policy'] previous_hostname = get_hostname(hostname_command) - if (not util.is_true(cfg.get('set_hostname')) - or util.is_false(policy) - or (previous_hostname == temp_hostname and policy != 'force')): + if (not util.is_true(cfg.get('set_hostname')) or + util.is_false(policy) or + (previous_hostname == temp_hostname and policy != 'force')): yield None return set_hostname(temp_hostname, hostname_command) @@ -125,8 +124,8 @@ class DataSourceAzureNet(sources.DataSource): with temporary_hostname(temp_hostname, self.ds_cfg, hostname_command=hostname_command) \ as previous_hostname: - if (previous_hostname is not None - and util.is_true(self.ds_cfg.get('set_hostname'))): + if (previous_hostname is not None and + util.is_true(self.ds_cfg.get('set_hostname'))): cfg = self.ds_cfg['hostname_bounce'] try: perform_hostname_bounce(hostname=temp_hostname, @@ -144,8 +143,6 @@ class DataSourceAzureNet(sources.DataSource): self.ds_cfg['agent_command']) ddir = self.ds_cfg['data_dir'] - shcfgxml = os.path.join(ddir, "SharedConfig.xml") - wait_for = [shcfgxml] fp_files = [] key_value = None @@ -156,23 +153,16 @@ class DataSourceAzureNet(sources.DataSource): else: bname = str(pk['fingerprint'] + ".crt") fp_files += [os.path.join(ddir, bname)] - LOG.debug("ssh authentication: using fingerprint from fabirc") + LOG.debug("ssh authentication: " + "using fingerprint from fabirc") missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", func=wait_for_files, - args=(wait_for + fp_files,)) + args=(fp_files,)) if len(missing): LOG.warn("Did not find files, but going on: %s", missing) metadata = {} - if shcfgxml in missing: - LOG.warn("SharedConfig.xml missing, using static instance-id") - else: - try: - metadata['instance-id'] = iid_from_shared_config(shcfgxml) - except ValueError as e: - LOG.warn("failed to get instance id in %s: %s", shcfgxml, e) - metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) return metadata @@ -229,21 +219,6 @@ class DataSourceAzureNet(sources.DataSource): user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) - if found != ddir: - cached_ovfenv = util.load_file( - os.path.join(ddir, 'ovf-env.xml'), quiet=True, decode=False) - if cached_ovfenv != files['ovf-env.xml']: - # source was not walinux-agent's datadir, so we have to clean - # up so 'wait_for_files' doesn't return early due to stale data - cleaned = [] - for f in [os.path.join(ddir, f) for f in DATA_DIR_CLEAN_LIST]: - if os.path.exists(f): - util.del_file(f) - cleaned.append(f) - if cleaned: - LOG.info("removed stale file(s) in '%s': %s", - ddir, str(cleaned)) - # walinux agent writes files world readable, but expects # the directory to be protected. write_files(ddir, files, dirmode=0o700) @@ -259,6 +234,7 @@ class DataSourceAzureNet(sources.DataSource): " on Azure.", exc_info=True) return False + self.metadata['instance-id'] = util.read_dmi_data('system-uuid') self.metadata.update(fabric_data) found_ephemeral = find_fabric_formatted_ephemeral_disk() @@ -532,7 +508,7 @@ def read_azure_ovf(contents): raise BrokenAzureDataSource("invalid xml: %s" % e) results = find_child(dom.documentElement, - lambda n: n.localName == "ProvisioningSection") + lambda n: n.localName == "ProvisioningSection") if len(results) == 0: raise NonAzureDataSource("No ProvisioningSection") @@ -542,7 +518,8 @@ def read_azure_ovf(contents): provSection = results[0] lpcs_nodes = find_child(provSection, - lambda n: n.localName == "LinuxProvisioningConfigurationSet") + lambda n: + n.localName == "LinuxProvisioningConfigurationSet") if len(results) == 0: raise NonAzureDataSource("No LinuxProvisioningConfigurationSet") @@ -649,12 +626,6 @@ def load_azure_ds_dir(source_dir): return (md, ud, cfg, {'ovf-env.xml': contents}) -def iid_from_shared_config(path): - with open(path, "rb") as fp: - content = fp.read() - return iid_from_shared_config_content(content) - - class BrokenAzureDataSource(Exception): pass @@ -665,7 +636,7 @@ class NonAzureDataSource(Exception): # Used to match classes to dependencies datasources = [ - (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py new file mode 100644 index 00000000..b5ee4129 --- /dev/null +++ b/cloudinit/sources/DataSourceBigstep.py @@ -0,0 +1,57 @@ +# +# Copyright (C) 2015-2016 Bigstep Cloud Ltd. +# +# Author: Alexandru Sirbu <alexandru.sirbu@bigstep.com> +# + +import json +import errno + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util +from cloudinit import url_helper + +LOG = logging.getLogger(__name__) + + +class DataSourceBigstep(sources.DataSource): + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.metadata = {} + self.vendordata_raw = "" + self.userdata_raw = "" + + def get_data(self, apply_filter=False): + url = get_url_from_file() + if url is None: + return False + response = url_helper.readurl(url) + decoded = json.loads(response.contents) + self.metadata = decoded["metadata"] + self.vendordata_raw = decoded["vendordata_raw"] + self.userdata_raw = decoded["userdata_raw"] + return True + + +def get_url_from_file(): + try: + content = util.load_file("/var/lib/cloud/data/seed/bigstep/url") + except IOError as e: + # If the file doesn't exist, then the server probably isn't a Bigstep + # instance; otherwise, another problem exists which needs investigation + if e.errno == errno.ENOENT: + return None + else: + raise + return content + +# Used to match classes to dependencies +datasources = [ + (DataSourceBigstep, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index eb474079..e3916208 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -39,7 +39,7 @@ FS_TYPES = ('vfat', 'iso9660') LABEL_TYPES = ('config-2',) POSSIBLE_MOUNTS = ('sr', 'cd') OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS - for i in range(0, 2))) + for i in range(0, 2))) class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 5d47564d..12e863d2 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -101,8 +101,8 @@ class DataSourceDigitalOcean(sources.DataSource): # Used to match classes to dependencies datasources = [ - (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), - ] + (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] # Return a list of data sources that match this set of dependencies diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 0032d06c..3ef2c6af 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -61,12 +61,12 @@ class DataSourceEc2(sources.DataSource): if not self.wait_for_metadata_service(): return False start_time = time.time() - self.userdata_raw = ec2.get_instance_userdata(self.api_ver, - self.metadata_address) + self.userdata_raw = \ + ec2.get_instance_userdata(self.api_ver, self.metadata_address) self.metadata = ec2.get_instance_metadata(self.api_ver, self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", - int(time.time() - start_time)) + int(time.time() - start_time)) return True except Exception: util.logexc(LOG, "Failed reading from metadata address %s", @@ -132,13 +132,13 @@ class DataSourceEc2(sources.DataSource): start_time = time.time() url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, - timeout=timeout, status_cb=LOG.warn) + timeout=timeout, status_cb=LOG.warn) if url: LOG.debug("Using metadata source: '%s'", url2base[url]) else: LOG.critical("Giving up on md from %s after %s seconds", - urls, int(time.time() - start_time)) + urls, int(time.time() - start_time)) self.metadata_address = url2base.get(url) return bool(url) @@ -206,7 +206,7 @@ class DataSourceEc2(sources.DataSource): # Used to match classes to dependencies datasources = [ - (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index cfc59ca5..d828f078 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -254,7 +254,7 @@ class MAASSeedDirMalformed(Exception): # Used to match classes to dependencies datasources = [ - (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] @@ -275,17 +275,18 @@ if __name__ == "__main__": parser = argparse.ArgumentParser(description='Interact with MAAS DS') parser.add_argument("--config", metavar="file", - help="specify DS config file", default=None) + help="specify DS config file", default=None) parser.add_argument("--ckey", metavar="key", - help="the consumer key to auth with", default=None) + help="the consumer key to auth with", default=None) parser.add_argument("--tkey", metavar="key", - help="the token key to auth with", default=None) + help="the token key to auth with", default=None) parser.add_argument("--csec", metavar="secret", - help="the consumer secret (likely '')", default="") + help="the consumer secret (likely '')", default="") parser.add_argument("--tsec", metavar="secret", - help="the token secret to auth with", default=None) + help="the token secret to auth with", default=None) parser.add_argument("--apiver", metavar="version", - help="the apiver to use ("" can be used)", default=MD_VERSION) + help="the apiver to use ("" can be used)", + default=MD_VERSION) subcmds = parser.add_subparsers(title="subcommands", dest="subcmd") subcmds.add_parser('crawl', help="crawl the datasource") @@ -297,7 +298,7 @@ if __name__ == "__main__": args = parser.parse_args() creds = {'consumer_key': args.ckey, 'token_key': args.tkey, - 'token_secret': args.tsec, 'consumer_secret': args.csec} + 'token_secret': args.tsec, 'consumer_secret': args.csec} if args.config: cfg = util.read_conf(args.config) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 4dffe6e6..4cad6877 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -263,8 +263,8 @@ class DataSourceNoCloudNet(DataSourceNoCloud): # Used to match classes to dependencies datasources = [ - (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )), - (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )), + (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py index 12a8a992..d1a62b2a 100644 --- a/cloudinit/sources/DataSourceNone.py +++ b/cloudinit/sources/DataSourceNone.py @@ -47,8 +47,8 @@ class DataSourceNone(sources.DataSource): # Used to match classes to dependencies datasources = [ - (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), - (DataSourceNone, []), + (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceNone, []), ] diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 58a4b2a2..23996b4a 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -24,11 +24,16 @@ from xml.dom import minidom import base64 import os +import shutil import re +import time from cloudinit import log as logging from cloudinit import sources from cloudinit import util +from cloudinit.sources.helpers.vmware.imc.config import Config +from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile +from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator LOG = logging.getLogger(__name__) @@ -50,13 +55,53 @@ class DataSourceOVF(sources.DataSource): found = [] md = {} ud = "" + vmwarePlatformFound = False + vmwareImcConfigFilePath = '' defaults = { "instance-id": "iid-dsovf", } (seedfile, contents) = get_ovf_env(self.paths.seed_dir) - if seedfile: + + system_type = util.read_dmi_data("system-product-name") + if system_type is None: + LOG.debug("No system-product-name found") + elif 'vmware' in system_type.lower(): + LOG.debug("VMware Virtualization Platform found") + if not util.get_cfg_option_bool( + self.sys_cfg, "disable_vmware_customization", True): + deployPkgPluginPath = search_file("/usr/lib/vmware-tools", + "libdeployPkgPlugin.so") + if deployPkgPluginPath: + vmwareImcConfigFilePath = util.log_time( + logfunc=LOG.debug, + msg="waiting for configuration file", + func=wait_for_imc_cfg_file, args=("/tmp", "cust.cfg")) + + if vmwareImcConfigFilePath: + LOG.debug("Found VMware DeployPkg Config File at %s" % + vmwareImcConfigFilePath) + else: + LOG.debug("Did not find VMware DeployPkg Config File Path") + else: + LOG.debug("Customization for VMware platform is disabled.") + + if vmwareImcConfigFilePath: + try: + cf = ConfigFile(vmwareImcConfigFilePath) + conf = Config(cf) + (md, ud, cfg) = read_vmware_imc(conf) + nicConfigurator = NicConfigurator(conf.nics) + nicConfigurator.configure() + vmwarePlatformFound = True + except Exception as inst: + LOG.debug("Error while parsing the Customization " + "Config File: %s", inst) + finally: + dirPath = os.path.dirname(vmwareImcConfigFilePath) + shutil.rmtree(dirPath) + elif seedfile: # Found a seed dir seed = os.path.join(self.paths.seed_dir, seedfile) (md, ud, cfg) = read_ovf_environment(contents) @@ -76,7 +121,7 @@ class DataSourceOVF(sources.DataSource): found.append(name) # There was no OVF transports found - if len(found) == 0: + if len(found) == 0 and not vmwarePlatformFound: return False if 'seedfrom' in md and md['seedfrom']: @@ -129,6 +174,33 @@ class DataSourceOVFNet(DataSourceOVF): self.supported_seed_starts = ("http://", "https://", "ftp://") +def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5): + waited = 0 + + while waited < maxwait: + fileFullPath = search_file(dirpath, filename) + if fileFullPath: + return fileFullPath + time.sleep(naplen) + waited += naplen + return None + + +# This will return a dict with some content +# meta-data, user-data, some config +def read_vmware_imc(config): + md = {} + cfg = {} + ud = "" + if config.host_name: + if config.domain_name: + md['local-hostname'] = config.host_name + "." + config.domain_name + else: + md['local-hostname'] = config.host_name + + return (md, ud, cfg) + + # This will return a dict with some content # meta-data, user-data, some config def read_ovf_environment(contents): @@ -264,14 +336,14 @@ def get_properties(contents): # could also check here that elem.namespaceURI == # "http://schemas.dmtf.org/ovf/environment/1" propSections = find_child(dom.documentElement, - lambda n: n.localName == "PropertySection") + lambda n: n.localName == "PropertySection") if len(propSections) == 0: raise XmlError("No 'PropertySection's") props = {} propElems = find_child(propSections[0], - (lambda n: n.localName == "Property")) + (lambda n: n.localName == "Property")) for elem in propElems: key = elem.attributes.getNamedItemNS(envNsURI, "key").value @@ -281,14 +353,25 @@ def get_properties(contents): return props +def search_file(dirpath, filename): + if not dirpath or not filename: + return None + + for root, dirs, files in os.walk(dirpath): + if filename in files: + return os.path.join(root, filename) + + return None + + class XmlError(Exception): pass # Used to match classes to dependencies datasources = ( - (DataSourceOVF, (sources.DEP_FILESYSTEM, )), - (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceOVF, (sources.DEP_FILESYSTEM, )), + (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index ac2c3b45..681f3a96 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -149,8 +149,8 @@ class BrokenContextDiskDir(Exception): class OpenNebulaNetwork(object): REG_DEV_MAC = re.compile( - r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?', - re.MULTILINE | re.DOTALL) + r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?', + re.MULTILINE | re.DOTALL) def __init__(self, ip, context): self.ip = ip @@ -404,7 +404,8 @@ def read_context_disk_dir(source_dir, asuser=None): if ssh_key_var: lines = context.get(ssh_key_var).splitlines() results['metadata']['public-keys'] = [l for l in lines - if len(l) and not l.startswith("#")] + if len(l) and not + l.startswith("#")] # custom hostname -- try hostname or leave cloud-init # itself create hostname from IP address later diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index c9b497df..5edab152 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -20,10 +20,13 @@ # Datasource for provisioning on SmartOS. This works on Joyent # and public/private Clouds using SmartOS. # -# SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests. +# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests # The meta-data is transmitted via key/value pairs made by # requests on the console. For example, to get the hostname, you # would send "GET hostname" on /dev/ttyS1. +# For Linux Guests running in LX-Brand Zones on SmartOS hosts +# a socket (/native/.zonecontrol/metadata.sock) is used instead +# of a serial console. # # Certain behavior is defined by the DataDictionary # http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html @@ -34,6 +37,8 @@ import contextlib import os import random import re +import socket +import stat import serial @@ -46,6 +51,7 @@ LOG = logging.getLogger(__name__) SMARTOS_ATTRIB_MAP = { # Cloud-init Key : (SmartOS Key, Strip line endings) + 'instance-id': ('sdc:uuid', True), 'local-hostname': ('hostname', True), 'public-keys': ('root_authorized_keys', True), 'user-script': ('user-script', False), @@ -76,6 +82,7 @@ DS_CFG_PATH = ['datasource', DS_NAME] # BUILTIN_DS_CONFIG = { 'serial_device': '/dev/ttyS1', + 'metadata_sockfile': '/native/.zonecontrol/metadata.sock', 'seed_timeout': 60, 'no_base64_decode': ['root_authorized_keys', 'motd_sys_info', @@ -83,7 +90,7 @@ BUILTIN_DS_CONFIG = { 'user-data', 'user-script', 'sdc:datacenter_name', - ], + 'sdc:uuid'], 'base64_keys': [], 'base64_all': False, 'disk_aliases': {'ephemeral0': '/dev/vdb'}, @@ -94,7 +101,7 @@ BUILTIN_CLOUD_CONFIG = { 'ephemeral0': {'table_type': 'mbr', 'layout': False, 'overwrite': False} - }, + }, 'fs_setup': [{'label': 'ephemeral0', 'filesystem': 'ext3', 'device': 'ephemeral0'}], @@ -150,17 +157,27 @@ class DataSourceSmartOS(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.is_smartdc = None - self.ds_cfg = util.mergemanydict([ self.ds_cfg, util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.metadata = {} - self.cfg = BUILTIN_CLOUD_CONFIG - self.seed = self.ds_cfg.get("serial_device") - self.seed_timeout = self.ds_cfg.get("serial_timeout") + # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but + # report 'BrandZ virtual linux' as the kernel version + if os.uname()[3].lower() == 'brandz virtual linux': + LOG.debug("Host is SmartOS, guest in Zone") + self.is_smartdc = True + self.smartos_type = 'lx-brand' + self.cfg = {} + self.seed = self.ds_cfg.get("metadata_sockfile") + else: + self.is_smartdc = True + self.smartos_type = 'kvm' + self.seed = self.ds_cfg.get("serial_device") + self.cfg = BUILTIN_CLOUD_CONFIG + self.seed_timeout = self.ds_cfg.get("serial_timeout") self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode') self.b64_keys = self.ds_cfg.get('base64_keys') self.b64_all = self.ds_cfg.get('base64_all') @@ -170,12 +187,49 @@ class DataSourceSmartOS(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) + def _get_seed_file_object(self): + if not self.seed: + raise AttributeError("seed device is not set") + + if self.smartos_type == 'lx-brand': + if not stat.S_ISSOCK(os.stat(self.seed).st_mode): + LOG.debug("Seed %s is not a socket", self.seed) + return None + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(self.seed) + return sock.makefile('rwb') + else: + if not stat.S_ISCHR(os.stat(self.seed).st_mode): + LOG.debug("Seed %s is not a character device") + return None + ser = serial.Serial(self.seed, timeout=self.seed_timeout) + if not ser.isOpen(): + raise SystemError("Unable to open %s" % self.seed) + return ser + return None + + def _set_provisioned(self): + '''Mark the instance provisioning state as successful. + + When run in a zone, the host OS will look for /var/svc/provisioning + to be renamed as /var/svc/provision_success. This should be done + after meta-data is successfully retrieved and from this point + the host considers the provision of the zone to be a success and + keeps the zone running. + ''' + + LOG.debug('Instance provisioning state set as successful') + svc_path = '/var/svc' + if os.path.exists('/'.join([svc_path, 'provisioning'])): + os.rename('/'.join([svc_path, 'provisioning']), + '/'.join([svc_path, 'provision_success'])) + def get_data(self): md = {} ud = "" if not device_exists(self.seed): - LOG.debug("No serial device '%s' found for SmartOS datasource", + LOG.debug("No metadata device '%s' found for SmartOS datasource", self.seed) return False @@ -185,29 +239,36 @@ class DataSourceSmartOS(sources.DataSource): LOG.debug("Disabling SmartOS datasource on arm (LP: #1243287)") return False - dmi_info = dmi_data() - if dmi_info is False: - LOG.debug("No dmidata utility found") - return False - - system_uuid, system_type = tuple(dmi_info) - if 'smartdc' not in system_type.lower(): - LOG.debug("Host is not on SmartOS. system_type=%s", system_type) + # SDC KVM instances will provide dmi data, LX-brand does not + if self.smartos_type == 'kvm': + dmi_info = dmi_data() + if dmi_info is False: + LOG.debug("No dmidata utility found") + return False + + system_type = dmi_info + if 'smartdc' not in system_type.lower(): + LOG.debug("Host is not on SmartOS. system_type=%s", + system_type) + return False + LOG.debug("Host is SmartOS, guest in KVM") + + seed_obj = self._get_seed_file_object() + if seed_obj is None: + LOG.debug('Seed file object not found.') return False - self.is_smartdc = True - md['instance-id'] = system_uuid + with contextlib.closing(seed_obj) as seed: + b64_keys = self.query('base64_keys', seed, strip=True, b64=False) + if b64_keys is not None: + self.b64_keys = [k.strip() for k in str(b64_keys).split(',')] - b64_keys = self.query('base64_keys', strip=True, b64=False) - if b64_keys is not None: - self.b64_keys = [k.strip() for k in str(b64_keys).split(',')] + b64_all = self.query('base64_all', seed, strip=True, b64=False) + if b64_all is not None: + self.b64_all = util.is_true(b64_all) - b64_all = self.query('base64_all', strip=True, b64=False) - if b64_all is not None: - self.b64_all = util.is_true(b64_all) - - for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): - smartos_noun, strip = attribute - md[ci_noun] = self.query(smartos_noun, strip=strip) + for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): + smartos_noun, strip = attribute + md[ci_noun] = self.query(smartos_noun, seed, strip=strip) # @datadictionary: This key may contain a program that is written # to a file in the filesystem of the guest on each boot and then @@ -240,7 +301,7 @@ class DataSourceSmartOS(sources.DataSource): # Handle the cloud-init regular meta if not md['local-hostname']: - md['local-hostname'] = system_uuid + md['local-hostname'] = md['instance-id'] ud = None if md['user-data']: @@ -257,6 +318,8 @@ class DataSourceSmartOS(sources.DataSource): self.metadata = util.mergemanydict([md, self.metadata]) self.userdata_raw = ud self.vendordata_raw = md['vendor-data'] + + self._set_provisioned() return True def device_name_to_device(self, name): @@ -268,40 +331,64 @@ class DataSourceSmartOS(sources.DataSource): def get_instance_id(self): return self.metadata['instance-id'] - def query(self, noun, strip=False, default=None, b64=None): + def query(self, noun, seed_file, strip=False, default=None, b64=None): if b64 is None: if noun in self.smartos_no_base64: b64 = False elif self.b64_all or noun in self.b64_keys: b64 = True - return query_data(noun=noun, strip=strip, seed_device=self.seed, - seed_timeout=self.seed_timeout, default=default, - b64=b64) + return self._query_data(noun, seed_file, strip=strip, + default=default, b64=b64) + def _query_data(self, noun, seed_file, strip=False, + default=None, b64=None): + """Makes a request via "GET <NOUN>" -def device_exists(device): - """Symplistic method to determine if the device exists or not""" - return os.path.exists(device) + In the response, the first line is the status, while subsequent + lines are is the value. A blank line with a "." is used to + indicate end of response. + If the response is expected to be base64 encoded, then set + b64encoded to true. Unfortantely, there is no way to know if + something is 100% encoded, so this method relies on being told + if the data is base64 or not. + """ -def get_serial(seed_device, seed_timeout): - """This is replaced in unit testing, allowing us to replace - serial.Serial with a mocked class. + if not noun: + return False - The timeout value of 60 seconds should never be hit. The value - is taken from SmartOS own provisioning tools. Since we are reading - each line individually up until the single ".", the transfer is - usually very fast (i.e. microseconds) to get the response. - """ - if not seed_device: - raise AttributeError("seed_device value is not set") + response = JoyentMetadataClient(seed_file).get_metadata(noun) + + if response is None: + return default + + if b64 is None: + b64 = self._query_data('b64-%s' % noun, seed_file, b64=False, + default=False, strip=True) + b64 = util.is_true(b64) + + resp = None + if b64 or strip: + resp = "".join(response).rstrip() + else: + resp = "".join(response) - ser = serial.Serial(seed_device, timeout=seed_timeout) - if not ser.isOpen(): - raise SystemError("Unable to open %s" % seed_device) + if b64: + try: + return util.b64d(resp) + # Bogus input produces different errors in Python 2 and 3; + # catch both. + except (TypeError, binascii.Error): + LOG.warn("Failed base64 decoding key '%s'", noun) + return resp - return ser + return resp + + +def device_exists(device): + """Symplistic method to determine if the device exists or not""" + return os.path.exists(device) class JoyentMetadataFetchException(Exception): @@ -320,8 +407,8 @@ class JoyentMetadataClient(object): r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)' r'( (?P<payload>.+))?)') - def __init__(self, serial): - self.serial = serial + def __init__(self, metasource): + self.metasource = metasource def _checksum(self, body): return '{0:08x}'.format( @@ -356,67 +443,30 @@ class JoyentMetadataClient(object): util.b64e(metadata_key)) msg = 'V2 {0} {1} {2}\n'.format( len(message_body), self._checksum(message_body), message_body) - LOG.debug('Writing "%s" to serial port.', msg) - self.serial.write(msg.encode('ascii')) - response = self.serial.readline().decode('ascii') - LOG.debug('Read "%s" from serial port.', response) - return self._get_value_from_frame(request_id, response) - - -def query_data(noun, seed_device, seed_timeout, strip=False, default=None, - b64=None): - """Makes a request to via the serial console via "GET <NOUN>" - - In the response, the first line is the status, while subsequent lines - are is the value. A blank line with a "." is used to indicate end of - response. - - If the response is expected to be base64 encoded, then set b64encoded - to true. Unfortantely, there is no way to know if something is 100% - encoded, so this method relies on being told if the data is base64 or - not. - """ - if not noun: - return False - - with contextlib.closing(get_serial(seed_device, seed_timeout)) as ser: - client = JoyentMetadataClient(ser) - response = client.get_metadata(noun) - - if response is None: - return default - - if b64 is None: - b64 = query_data('b64-%s' % noun, seed_device=seed_device, - seed_timeout=seed_timeout, b64=False, - default=False, strip=True) - b64 = util.is_true(b64) - - resp = None - if b64 or strip: - resp = "".join(response).rstrip() - else: - resp = "".join(response) - - if b64: - try: - return util.b64d(resp) - # Bogus input produces different errors in Python 2 and 3; catch both. - except (TypeError, binascii.Error): - LOG.warn("Failed base64 decoding key '%s'", noun) - return resp + LOG.debug('Writing "%s" to metadata transport.', msg) + self.metasource.write(msg.encode('ascii')) + self.metasource.flush() + + response = bytearray() + response.extend(self.metasource.read(1)) + while response[-1:] != b'\n': + response.extend(self.metasource.read(1)) + response = response.rstrip().decode('ascii') + LOG.debug('Read "%s" from metadata transport.', response) + + if 'SUCCESS' not in response: + return None - return resp + return self._get_value_from_frame(request_id, response) def dmi_data(): - sys_uuid = util.read_dmi_data("system-uuid") sys_type = util.read_dmi_data("system-product-name") - if not sys_uuid or not sys_type: + if not sys_type: return None - return (sys_uuid.lower(), sys_type) + return sys_type def write_boot_content(content, content_f, link=None, shebang=False, @@ -462,7 +512,7 @@ def write_boot_content(content, content_f, link=None, shebang=False, except Exception as e: util.logexc(LOG, ("Failed to identify script type for %s" % - content_f, e)) + content_f, e)) if link: try: diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 281d733e..018cac6d 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -79,12 +79,6 @@ class GoalState(object): './Container/RoleInstanceList/RoleInstance/InstanceId') @property - def shared_config_xml(self): - url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance' - '/Configuration/SharedConfig') - return self.http_client.get(url).contents - - @property def certificates_xml(self): if self._certificates_xml is None: url = self._text_from_xpath( @@ -172,19 +166,6 @@ class OpenSSLManager(object): return keys -def iid_from_shared_config_content(content): - """ - find INSTANCE_ID in: - <?xml version="1.0" encoding="utf-8"?> - <SharedConfig version="1.0.0.0" goalStateIncarnation="1"> - <Deployment name="INSTANCE_ID" guid="{...}" incarnation="0"> - <Service name="..." guid="{00000000-0000-0000-0000-000000000000}"/> - """ - root = ElementTree.fromstring(content) - depnode = root.find('Deployment') - return depnode.get('name') - - class WALinuxAgentShim(object): REPORT_READY_XML_TEMPLATE = '\n'.join([ @@ -216,6 +197,21 @@ class WALinuxAgentShim(object): self.openssl_manager.clean_up() @staticmethod + def get_ip_from_lease_value(lease_value): + unescaped_value = lease_value.replace('\\', '') + if len(unescaped_value) > 4: + hex_string = '' + for hex_pair in unescaped_value.split(':'): + if len(hex_pair) == 1: + hex_pair = '0' + hex_pair + hex_string += hex_pair + packed_bytes = struct.pack( + '>L', int(hex_string.replace(':', ''), 16)) + else: + packed_bytes = unescaped_value.encode('utf-8') + return socket.inet_ntoa(packed_bytes) + + @staticmethod def find_endpoint(): LOG.debug('Finding Azure endpoint...') content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases') @@ -225,16 +221,7 @@ class WALinuxAgentShim(object): value = line.strip(' ').split(' ', 2)[-1].strip(';\n"') if value is None: raise Exception('No endpoint found in DHCP config.') - if ':' in value: - hex_string = '' - for hex_pair in value.split(':'): - if len(hex_pair) == 1: - hex_pair = '0' + hex_pair - hex_string += hex_pair - value = struct.pack('>L', int(hex_string.replace(':', ''), 16)) - else: - value = value.encode('utf-8') - endpoint_ip_address = socket.inet_ntoa(value) + endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address @@ -263,8 +250,6 @@ class WALinuxAgentShim(object): public_keys = self.openssl_manager.parse_certificates( goal_state.certificates_xml) data = { - 'instance-id': iid_from_shared_config_content( - goal_state.shared_config_xml), 'public-keys': public_keys, } self._report_ready(goal_state, http_client) diff --git a/cloudinit/sources/helpers/vmware/__init__.py b/cloudinit/sources/helpers/vmware/__init__.py new file mode 100644 index 00000000..386225d5 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/__init__.py @@ -0,0 +1,13 @@ +# vi: ts=4 expandtab +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. diff --git a/cloudinit/sources/helpers/vmware/imc/__init__.py b/cloudinit/sources/helpers/vmware/imc/__init__.py new file mode 100644 index 00000000..386225d5 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/__init__.py @@ -0,0 +1,13 @@ +# vi: ts=4 expandtab +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py new file mode 100644 index 00000000..faba5887 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py @@ -0,0 +1,25 @@ +# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class BootProtoEnum:
+ """Specifies the NIC Boot Settings."""
+
+ DHCP = 'dhcp'
+ STATIC = 'static'
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py new file mode 100644 index 00000000..aebc12a0 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config.py @@ -0,0 +1,95 @@ +# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from .nic import Nic
+
+
+class Config:
+ """
+ Stores the Contents specified in the Customization
+ Specification file.
+ """
+
+ DNS = 'DNS|NAMESERVER|'
+ SUFFIX = 'DNS|SUFFIX|'
+ PASS = 'PASSWORD|-PASS'
+ TIMEZONE = 'DATETIME|TIMEZONE'
+ UTC = 'DATETIME|UTC'
+ HOSTNAME = 'NETWORK|HOSTNAME'
+ DOMAINNAME = 'NETWORK|DOMAINNAME'
+
+ def __init__(self, configFile):
+ self._configFile = configFile
+
+ @property
+ def host_name(self):
+ """Return the hostname."""
+ return self._configFile.get(Config.HOSTNAME, None)
+
+ @property
+ def domain_name(self):
+ """Return the domain name."""
+ return self._configFile.get(Config.DOMAINNAME, None)
+
+ @property
+ def timezone(self):
+ """Return the timezone."""
+ return self._configFile.get(Config.TIMEZONE, None)
+
+ @property
+ def utc(self):
+ """Retrieves whether to set time to UTC or Local."""
+ return self._configFile.get(Config.UTC, None)
+
+ @property
+ def admin_password(self):
+ """Return the root password to be set."""
+ return self._configFile.get(Config.PASS, None)
+
+ @property
+ def name_servers(self):
+ """Return the list of DNS servers."""
+ res = []
+ cnt = self._configFile.get_count_with_prefix(Config.DNS)
+ for i in range(1, cnt + 1):
+ key = Config.DNS + str(i)
+ res.append(self._configFile[key])
+
+ return res
+
+ @property
+ def dns_suffixes(self):
+ """Return the list of DNS Suffixes."""
+ res = []
+ cnt = self._configFile.get_count_with_prefix(Config.SUFFIX)
+ for i in range(1, cnt + 1):
+ key = Config.SUFFIX + str(i)
+ res.append(self._configFile[key])
+
+ return res
+
+ @property
+ def nics(self):
+ """Return the list of associated NICs."""
+ res = []
+ nics = self._configFile['NIC-CONFIG|NICS']
+ for nic in nics.split(','):
+ res.append(Nic(nic, self._configFile))
+
+ return res
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py new file mode 100644 index 00000000..bb9fb7dc --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config_file.py @@ -0,0 +1,129 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi <stanguturi@vmware.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import logging + +try: + import configparser +except ImportError: + import ConfigParser as configparser + +from .config_source import ConfigSource + +logger = logging.getLogger(__name__) + + +class ConfigFile(ConfigSource, dict): + """ConfigFile module to load the content from a specified source.""" + + def __init__(self, filename): + self._loadConfigFile(filename) + pass + + def _insertKey(self, key, val): + """ + Inserts a Key Value pair. + + Keyword arguments: + key -- The key to insert + val -- The value to insert for the key + + """ + key = key.strip() + val = val.strip() + + if key.startswith('-') or '|-' in key: + canLog = False + else: + canLog = True + + # "sensitive" settings shall not be logged + if canLog: + logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val)) + else: + logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key) + + self[key] = val + + def _loadConfigFile(self, filename): + """ + Parses properties from the specified config file. + + Any previously available properties will be removed. + Sensitive data will not be logged in case the key starts + from '-'. + + Keyword arguments: + filename - The full path to the config file. + """ + logger.info('Parsing the config file %s.' % filename) + + config = configparser.ConfigParser() + config.optionxform = str + config.read(filename) + + self.clear() + + for category in config.sections(): + logger.debug("FOUND CATEGORY = '%s'" % category) + + for (key, value) in config.items(category): + self._insertKey(category + '|' + key, value) + + def should_keep_current_value(self, key): + """ + Determines whether a value for a property must be kept. + + If the propery is missing, it is treated as it should be not + changed by the engine. + + Keyword arguments: + key -- The key to search for. + """ + # helps to distinguish from "empty" value which is used to indicate + # "removal" + return key not in self + + def should_remove_current_value(self, key): + """ + Determines whether a value for the property must be removed. + + If the specified key is empty, it is treated as it should be + removed by the engine. + + Return true if the value can be removed, false otherwise. + + Keyword arguments: + key -- The key to search for. + """ + # helps to distinguish from "missing" value which is used to indicate + # "keeping unchanged" + if key in self: + return not bool(self[key]) + else: + return False + + def get_count_with_prefix(self, prefix): + """ + Return the total count of keys that start with the specified prefix. + + Keyword arguments: + prefix -- prefix of the key + """ + return len([key for key in self if key.startswith(prefix)]) diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py new file mode 100644 index 00000000..7266b699 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py @@ -0,0 +1,25 @@ +# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from .config_source import ConfigSource
+
+
+class ConfigNamespace(ConfigSource):
+ """Specifies the Config Namespace."""
+ pass
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py new file mode 100644 index 00000000..8c5c08cf --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -0,0 +1,246 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2016 VMware INC. +# +# Author: Sankar Tanguturi <stanguturi@vmware.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import logging +import os +import re + +from cloudinit import util + +logger = logging.getLogger(__name__) + + +class NicConfigurator: + def __init__(self, nics): + """ + Initialize the Nic Configurator + @param nics (list) an array of nics to configure + """ + self.nics = nics + self.mac2Name = {} + self.ipv4PrimaryGateway = None + self.ipv6PrimaryGateway = None + self.find_devices() + self._primaryNic = self.get_primary_nic() + + def get_primary_nic(self): + """ + Retrieve the primary nic if it exists + @return (NicBase): the primary nic if exists, None otherwise + """ + primary_nics = [nic for nic in self.nics if nic.primary] + if not primary_nics: + return None + elif len(primary_nics) > 1: + raise Exception('There can only be one primary nic', + [nic.mac for nic in primary_nics]) + else: + return primary_nics[0] + + def find_devices(self): + """ + Create the mac2Name dictionary + The mac address(es) are in the lower case + """ + cmd = ['ip', 'addr', 'show'] + (output, err) = util.subp(cmd) + sections = re.split(r'\n\d+: ', '\n' + output)[1:] + + macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))' + for section in sections: + match = re.search(macPat, section) + if not match: # Only keep info about nics + continue + mac = match.group(1).lower() + name = section.split(':', 1)[0] + self.mac2Name[mac] = name + + def gen_one_nic(self, nic): + """ + Return the lines needed to configure a nic + @return (str list): the string list to configure the nic + @param nic (NicBase): the nic to configure + """ + lines = [] + name = self.mac2Name.get(nic.mac.lower()) + if not name: + raise ValueError('No known device has MACADDR: %s' % nic.mac) + + if nic.onboot: + lines.append('auto %s' % name) + + # Customize IPv4 + lines.extend(self.gen_ipv4(name, nic)) + + # Customize IPv6 + lines.extend(self.gen_ipv6(name, nic)) + + lines.append('') + + return lines + + def gen_ipv4(self, name, nic): + """ + Return the lines needed to configure the IPv4 setting of a nic + @return (str list): the string list to configure the gateways + @param name (str): name of the nic + @param nic (NicBase): the nic to configure + """ + lines = [] + + bootproto = nic.bootProto.lower() + if nic.ipv4_mode.lower() == 'disabled': + bootproto = 'manual' + lines.append('iface %s inet %s' % (name, bootproto)) + + if bootproto != 'static': + return lines + + # Static Ipv4 + v4 = nic.staticIpv4 + if v4.ip: + lines.append(' address %s' % v4.ip) + if v4.netmask: + lines.append(' netmask %s' % v4.netmask) + + # Add the primary gateway + if nic.primary and v4.gateways: + self.ipv4PrimaryGateway = v4.gateways[0] + lines.append(' gateway %s metric 0' % self.ipv4PrimaryGateway) + return lines + + # Add routes if there is no primary nic + if not self._primaryNic: + lines.extend(self.gen_ipv4_route(nic, v4.gateways)) + + return lines + + def gen_ipv4_route(self, nic, gateways): + """ + Return the lines needed to configure additional Ipv4 route + @return (str list): the string list to configure the gateways + @param nic (NicBase): the nic to configure + @param gateways (str list): the list of gateways + """ + lines = [] + + for gateway in gateways: + lines.append(' up route add default gw %s metric 10000' % + gateway) + + return lines + + def gen_ipv6(self, name, nic): + """ + Return the lines needed to configure the gateways for a nic + @return (str list): the string list to configure the gateways + @param name (str): name of the nic + @param nic (NicBase): the nic to configure + """ + lines = [] + + if not nic.staticIpv6: + return lines + + # Static Ipv6 + addrs = nic.staticIpv6 + lines.append('iface %s inet6 static' % name) + lines.append(' address %s' % addrs[0].ip) + lines.append(' netmask %s' % addrs[0].netmask) + + for addr in addrs[1:]: + lines.append(' up ifconfig %s inet6 add %s/%s' % (name, addr.ip, + addr.netmask)) + # Add the primary gateway + if nic.primary: + for addr in addrs: + if addr.gateway: + self.ipv6PrimaryGateway = addr.gateway + lines.append(' gateway %s' % self.ipv6PrimaryGateway) + return lines + + # Add routes if there is no primary nic + if not self._primaryNic: + lines.extend(self._genIpv6Route(name, nic, addrs)) + + return lines + + def _genIpv6Route(self, name, nic, addrs): + lines = [] + + for addr in addrs: + lines.append(' up route -A inet6 add default gw ' + '%s metric 10000' % addr.gateway) + + return lines + + def generate(self): + """Return the lines that is needed to configure the nics""" + lines = [] + lines.append('iface lo inet loopback') + lines.append('auto lo') + lines.append('') + + for nic in self.nics: + lines.extend(self.gen_one_nic(nic)) + + return lines + + def clear_dhcp(self): + logger.info('Clearing DHCP leases') + + util.subp(["pkill", "dhclient"]) + util.subp(["rm", "-f", "/var/lib/dhcp/*"]) + + def if_down_up(self): + names = [] + for nic in self.nics: + name = self.mac2Name.get(nic.mac.lower()) + names.append(name) + + for name in names: + logger.info('Bring down interface %s' % name) + util.subp(["ifdown", "%s" % name]) + + self.clear_dhcp() + + for name in names: + logger.info('Bring up interface %s' % name) + util.subp(["ifup", "%s" % name]) + + def configure(self): + """ + Configure the /etc/network/intefaces + Make a back up of the original + """ + containingDir = '/etc/network' + + interfaceFile = os.path.join(containingDir, 'interfaces') + originalFile = os.path.join(containingDir, + 'interfaces.before_vmware_customization') + + if not os.path.exists(originalFile) and os.path.exists(interfaceFile): + os.rename(interfaceFile, originalFile) + + lines = self.generate() + with open(interfaceFile, 'w') as fp: + for line in lines: + fp.write('%s\n' % line) + + self.if_down_up() diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py new file mode 100644 index 00000000..a367e476 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config_source.py @@ -0,0 +1,23 @@ +# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class ConfigSource:
+ """Specifies a source for the Config Content."""
+ pass
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py new file mode 100644 index 00000000..33f88726 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py @@ -0,0 +1,45 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi <stanguturi@vmware.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + + +class Ipv4ModeEnum: + """ + The IPv4 configuration mode which directly represents the user's goal. + + This mode effectively acts as a contract of the in-guest customization + engine. It must be set based on what the user has requested and should + not be changed by those layers. It's up to the in-guest engine to + interpret and materialize the user's request. + """ + + # The legacy mode which only allows dhcp/static based on whether IPv4 + # addresses list is empty or not + IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE' + + # IPv4 must use static address. Reserved for future use + IPV4_MODE_STATIC = 'STATIC' + + # IPv4 must use DHCPv4. Reserved for future use + IPV4_MODE_DHCP = 'DHCP' + + # IPv4 must be disabled + IPV4_MODE_DISABLED = 'DISABLED' + + # IPv4 settings should be left untouched. Reserved for future use + IPV4_MODE_AS_IS = 'AS_IS' diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py new file mode 100644 index 00000000..b5d704ea --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/nic.py @@ -0,0 +1,147 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi <stanguturi@vmware.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +from .boot_proto import BootProtoEnum +from .nic_base import NicBase, StaticIpv4Base, StaticIpv6Base + + +class Nic(NicBase): + """ + Holds the information about each NIC specified + in the customization specification file + """ + + def __init__(self, name, configFile): + self._name = name + self._configFile = configFile + + def _get(self, what): + return self._configFile.get(self.name + '|' + what, None) + + def _get_count_with_prefix(self, prefix): + return self._configFile.get_count_with_prefix(self.name + prefix) + + @property + def name(self): + return self._name + + @property + def mac(self): + return self._get('MACADDR').lower() + + @property + def primary(self): + value = self._get('PRIMARY') + if value: + value = value.lower() + return value == 'yes' or value == 'true' + else: + return False + + @property + def onboot(self): + value = self._get('ONBOOT') + if value: + value = value.lower() + return value == 'yes' or value == 'true' + else: + return False + + @property + def bootProto(self): + value = self._get('BOOTPROTO') + if value: + return value.lower() + else: + return "" + + @property + def ipv4_mode(self): + value = self._get('IPv4_MODE') + if value: + return value.lower() + else: + return "" + + @property + def staticIpv4(self): + """ + Checks the BOOTPROTO property and returns StaticIPv4Addr + configuration object if STATIC configuration is set. + """ + if self.bootProto == BootProtoEnum.STATIC: + return [StaticIpv4Addr(self)] + else: + return None + + @property + def staticIpv6(self): + cnt = self._get_count_with_prefix('|IPv6ADDR|') + + if not cnt: + return None + + result = [] + for index in range(1, cnt + 1): + result.append(StaticIpv6Addr(self, index)) + + return result + + +class StaticIpv4Addr(StaticIpv4Base): + """Static IPV4 Setting.""" + + def __init__(self, nic): + self._nic = nic + + @property + def ip(self): + return self._nic._get('IPADDR') + + @property + def netmask(self): + return self._nic._get('NETMASK') + + @property + def gateways(self): + value = self._nic._get('GATEWAY') + if value: + return [x.strip() for x in value.split(',')] + else: + return None + + +class StaticIpv6Addr(StaticIpv6Base): + """Static IPV6 Address.""" + + def __init__(self, nic, index): + self._nic = nic + self._index = index + + @property + def ip(self): + return self._nic._get('IPv6ADDR|' + str(self._index)) + + @property + def netmask(self): + return self._nic._get('IPv6NETMASK|' + str(self._index)) + + @property + def gateway(self): + return self._nic._get('IPv6GATEWAY|' + str(self._index)) diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py new file mode 100644 index 00000000..030ba311 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/nic_base.py @@ -0,0 +1,154 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi <stanguturi@vmware.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + + +class NicBase: + """ + Define what are expected of each nic. + The following properties should be provided in an implementation class. + """ + + @property + def mac(self): + """ + Retrieves the mac address of the nic + @return (str) : the MACADDR setting + """ + raise NotImplementedError('MACADDR') + + @property + def primary(self): + """ + Retrieves whether the nic is the primary nic + Indicates whether NIC will be used to define the default gateway. + If none of the NICs is configured to be primary, default gateway won't + be set. + @return (bool): the PRIMARY setting + """ + raise NotImplementedError('PRIMARY') + + @property + def onboot(self): + """ + Retrieves whether the nic should be up at the boot time + @return (bool) : the ONBOOT setting + """ + raise NotImplementedError('ONBOOT') + + @property + def bootProto(self): + """ + Retrieves the boot protocol of the nic + @return (str): the BOOTPROTO setting, valid values: dhcp and static. + """ + raise NotImplementedError('BOOTPROTO') + + @property + def ipv4_mode(self): + """ + Retrieves the IPv4_MODE + @return (str): the IPv4_MODE setting, valid values: + backwards_compatible, static, dhcp, disabled, as_is + """ + raise NotImplementedError('IPv4_MODE') + + @property + def staticIpv4(self): + """ + Retrieves the static IPv4 configuration of the nic + @return (StaticIpv4Base list): the static ipv4 setting + """ + raise NotImplementedError('Static IPv4') + + @property + def staticIpv6(self): + """ + Retrieves the IPv6 configuration of the nic + @return (StaticIpv6Base list): the static ipv6 setting + """ + raise NotImplementedError('Static Ipv6') + + def validate(self): + """ + Validate the object + For example, the staticIpv4 property is required and should not be + empty when ipv4Mode is STATIC + """ + raise NotImplementedError('Check constraints on properties') + + +class StaticIpv4Base: + """ + Define what are expected of a static IPv4 setting + The following properties should be provided in an implementation class. + """ + + @property + def ip(self): + """ + Retrieves the Ipv4 address + @return (str): the IPADDR setting + """ + raise NotImplementedError('Ipv4 Address') + + @property + def netmask(self): + """ + Retrieves the Ipv4 NETMASK setting + @return (str): the NETMASK setting + """ + raise NotImplementedError('Ipv4 NETMASK') + + @property + def gateways(self): + """ + Retrieves the gateways on this Ipv4 subnet + @return (str list): the GATEWAY setting + """ + raise NotImplementedError('Ipv4 GATEWAY') + + +class StaticIpv6Base: + """Define what are expected of a static IPv6 setting + The following properties should be provided in an implementation class. + """ + + @property + def ip(self): + """ + Retrieves the Ipv6 address + @return (str): the IPv6ADDR setting + """ + raise NotImplementedError('Ipv6 Address') + + @property + def netmask(self): + """ + Retrieves the Ipv6 NETMASK setting + @return (str): the IPv6NETMASK setting + """ + raise NotImplementedError('Ipv6 NETMASK') + + @property + def gateway(self): + """ + Retrieves the Ipv6 GATEWAY setting + @return (str): the IPv6GATEWAY setting + """ + raise NotImplementedError('Ipv6 GATEWAY') diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 9b2f5ed5..c74a7ae2 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -31,7 +31,8 @@ LOG = logging.getLogger(__name__) DEF_SSHD_CFG = "/etc/ssh/sshd_config" # taken from openssh source key.c/key_type_from_name -VALID_KEY_TYPES = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", +VALID_KEY_TYPES = ( + "rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com", "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com", "ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com", diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 9f192c8d..dbcf3d55 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -509,13 +509,13 @@ class Init(object): def consume_data(self, frequency=PER_INSTANCE): # Consume the userdata first, because we need want to let the part # handlers run first (for merging stuff) - with events.ReportEventStack( - "consume-user-data", "reading and applying user-data", - parent=self.reporter): + with events.ReportEventStack("consume-user-data", + "reading and applying user-data", + parent=self.reporter): self._consume_userdata(frequency) - with events.ReportEventStack( - "consume-vendor-data", "reading and applying vendor-data", - parent=self.reporter): + with events.ReportEventStack("consume-vendor-data", + "reading and applying vendor-data", + parent=self.reporter): self._consume_vendordata(frequency) # Perform post-consumption adjustments so that @@ -655,7 +655,7 @@ class Modules(object): else: raise TypeError(("Failed to read '%s' item in config," " unknown type %s") % - (item, type_utils.obj_name(item))) + (item, type_utils.obj_name(item))) return module_list def _fixup_modules(self, raw_mods): @@ -762,8 +762,8 @@ class Modules(object): if skipped: LOG.info("Skipping modules %s because they are not verified " - "on distro '%s'. To run anyway, add them to " - "'unverified_modules' in config.", skipped, d_name) + "on distro '%s'. To run anyway, add them to " + "'unverified_modules' in config.", skipped, d_name) if forced: LOG.info("running unverified_modules: %s", forced) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index f2e1390e..936f7da5 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -252,9 +252,9 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, # attrs return UrlResponse(r) except exceptions.RequestException as e: - if (isinstance(e, (exceptions.HTTPError)) - and hasattr(e, 'response') # This appeared in v 0.10.8 - and hasattr(e.response, 'status_code')): + if (isinstance(e, (exceptions.HTTPError)) and + hasattr(e, 'response') and # This appeared in v 0.10.8 + hasattr(e.response, 'status_code')): excps.append(UrlError(e, code=e.response.status_code, headers=e.response.headers, url=url)) diff --git a/cloudinit/util.py b/cloudinit/util.py index 83c2c0d2..20916e53 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -76,7 +76,9 @@ FALSE_STRINGS = ('off', '0', 'no', 'false') # Helper utils to see if running in a container -CONTAINER_TESTS = ('running-in-container', 'lxc-is-container') +CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'], + ['running-in-container'], + ['lxc-is-container']) def decode_binary(blob, encoding='utf-8'): @@ -610,7 +612,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): def make_url(scheme, host, port=None, - path='', params='', query='', fragment=''): + path='', params='', query='', fragment=''): pieces = [] pieces.append(scheme or '') @@ -802,8 +804,8 @@ def load_yaml(blob, default=None, allowed=(dict,)): blob = decode_binary(blob) try: LOG.debug("Attempting to load yaml from string " - "of length %s with allowed root types %s", - len(blob), allowed) + "of length %s with allowed root types %s", + len(blob), allowed) converted = safeyaml.load(blob) if not isinstance(converted, allowed): # Yes this will just be caught, but thats ok for now... @@ -876,7 +878,7 @@ def read_conf_with_confd(cfgfile): if not isinstance(confd, six.string_types): raise TypeError(("Config file %s contains 'conf_d' " "with non-string type %s") % - (cfgfile, type_utils.obj_name(confd))) + (cfgfile, type_utils.obj_name(confd))) else: confd = str(confd).strip() elif os.path.isdir("%s.d" % cfgfile): @@ -1039,7 +1041,8 @@ def is_resolvable(name): for iname in badnames: try: result = socket.getaddrinfo(iname, None, 0, 0, - socket.SOCK_STREAM, socket.AI_CANONNAME) + socket.SOCK_STREAM, + socket.AI_CANONNAME) badresults[iname] = [] for (_fam, _stype, _proto, cname, sockaddr) in result: badresults[iname].append("%s: %s" % (cname, sockaddr[0])) @@ -1107,7 +1110,7 @@ def close_stdin(): def find_devs_with(criteria=None, oformat='device', - tag=None, no_cache=False, path=None): + tag=None, no_cache=False, path=None): """ find devices matching given criteria (via blkid) criteria can be *one* of: @@ -1626,7 +1629,7 @@ def write_file(filename, content, mode=0o644, omode="wb"): content = decode_binary(content) write_type = 'characters' LOG.debug("Writing to %s - %s: [%s] %s %s", - filename, omode, mode, len(content), write_type) + filename, omode, mode, len(content), write_type) with SeLinuxGuard(path=filename): with open(filename, omode) as fh: fh.write(content) @@ -1749,7 +1752,7 @@ def is_container(): try: # try to run a helper program. if it returns true/zero # then we're inside a container. otherwise, no - subp([helper]) + subp(helper) return True except (IOError, OSError): pass @@ -2137,15 +2140,21 @@ def _read_dmi_syspath(key): LOG.debug("did not find %s", dmi_key_path) return None - key_data = load_file(dmi_key_path) + key_data = load_file(dmi_key_path, decode=False) if not key_data: LOG.debug("%s did not return any data", dmi_key_path) return None - LOG.debug("dmi data %s returned %s", dmi_key_path, key_data) - return key_data.strip() + # uninitialized dmi values show as all \xff and /sys appends a '\n'. + # in that event, return a string of '.' in the same length. + if key_data == b'\xff' * (len(key_data) - 1) + b'\n': + key_data = b"" - except Exception as e: + str_data = key_data.decode('utf8').strip() + LOG.debug("dmi data %s returned %s", dmi_key_path, str_data) + return str_data + + except Exception: logexc(LOG, "failed read of %s", dmi_key_path) return None @@ -2159,6 +2168,9 @@ def _call_dmidecode(key, dmidecode_path): cmd = [dmidecode_path, "--string", key] (result, _err) = subp(cmd) LOG.debug("dmidecode returned '%s' for '%s'", result, key) + result = result.strip() + if result.replace(".", "") == "": + return "" return result except (IOError, OSError) as _err: LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err.message) |