diff options
Diffstat (limited to 'cloudinit')
79 files changed, 4395 insertions, 593 deletions
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index 95e0cfb2..3e6be203 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -24,6 +24,7 @@ import copy import os from cloudinit import log as logging +from cloudinit.reporting import events LOG = logging.getLogger(__name__) @@ -40,12 +41,18 @@ LOG = logging.getLogger(__name__) class Cloud(object): - def __init__(self, datasource, paths, cfg, distro, runners): + def __init__(self, datasource, paths, cfg, distro, runners, reporter=None): self.datasource = datasource self.paths = paths self.distro = distro self._cfg = cfg self._runners = runners + if reporter is None: + reporter = events.ReportEventStack( + name="unnamed-cloud-reporter", + description="unnamed-cloud-reporter", + reporting_enabled=False) + self.reporter = reporter # If a 'user' manipulates logging or logging services # it is typically useful to cause the logging to be diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 9e9e9e26..702977cb 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -91,7 +91,8 @@ def handle(name, cfg, cloud, log, _args): if matchcfg: matcher = re.compile(matchcfg).search else: - matcher = lambda f: False + def matcher(x): + return False errors = add_sources(cfg['apt_sources'], params, aa_repo_match=matcher) @@ -173,7 +174,8 @@ def add_sources(srclist, template_params=None, aa_repo_match=None): template_params = {} if aa_repo_match is None: - aa_repo_match = lambda f: False + def aa_repo_match(x): + return False errorlist = [] for ent in srclist: diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index e18c5405..28711a59 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -285,7 +285,7 @@ def install_chef(cloud, chef_cfg, log): chef_version = util.get_cfg_option_str(chef_cfg, 'version', None) ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version', RUBY_VERSION_DEFAULT) - install_chef_from_gems(cloud.distro, ruby_version, chef_version) + install_chef_from_gems(ruby_version, chef_version, cloud.distro) # Retain backwards compat, by preferring True instead of False # when not provided/overriden... run = util.get_cfg_option_bool(chef_cfg, 'exec', default=True) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index d5b0d1d7..bbaf9646 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -167,11 +167,12 @@ def enumerate_disk(device, nodeps=False): parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0] for part in parts: - d = {'name': None, - 'type': None, - 'fstype': None, - 'label': None, - } + d = { + 'name': None, + 'type': None, + 'fstype': None, + 'label': None, + } for key, value in value_splitter(part): d[key.lower()] = value @@ -701,11 +702,12 @@ def lookup_force_flag(fs): """ A force flag might be -F or -F, this look it up """ - flags = {'ext': '-F', - 'btrfs': '-f', - 'xfs': '-f', - 'reiserfs': '-f', - } + flags = { + 'ext': '-F', + 'btrfs': '-f', + 'xfs': '-f', + 'reiserfs': '-f', + } if 'ext' in fs.lower(): fs = 'ext' @@ -824,10 +826,11 @@ def mkfs(fs_cfg): # Create the commands if fs_cmd: - fs_cmd = fs_cfg['cmd'] % {'label': label, - 'filesystem': fs_type, - 'device': device, - } + fs_cmd = fs_cfg['cmd'] % { + 'label': label, + 'filesystem': fs_type, + 'device': device, + } else: # Find the mkfs command mkfs_cmd = util.which("mkfs.%s" % fs_type) @@ -844,9 +847,9 @@ def mkfs(fs_cfg): if label: fs_cmd.extend(["-L", label]) - # File systems that support the -F flag - if not fs_cmd and (overwrite or device_type(device) == "disk"): - fs_cmd.append(lookup_force_flag(fs_type)) + # File systems that support the -F flag + if overwrite or device_type(device) == "disk": + fs_cmd.append(lookup_force_flag(fs_type)) # Add the extends FS options if fs_opts: diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py new file mode 100644 index 00000000..39e3850e --- /dev/null +++ b/cloudinit/config/cc_fan.py @@ -0,0 +1,101 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# +# Author: Scott Moser <scott.moser@canonical.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +""" +fan module allows configuration of Ubuntu Fan + https://wiki.ubuntu.com/FanNetworking + +Example config: + #cloud-config + fan: + config: | + # fan 240 + 10.0.0.0/8 eth0/16 dhcp + 10.0.0.0/8 eth1/16 dhcp off + # fan 241 + 241.0.0.0/8 eth0/16 dhcp + config_path: /etc/network/fan + +If cloud-init sees a 'fan' entry in cloud-config it will + a.) write 'config_path' with the contents + b.) install the package 'ubuntu-fan' if it is not installed + c.) ensure the service is started (or restarted if was previously running) +""" + +from cloudinit import log as logging +from cloudinit import util +from cloudinit.settings import PER_INSTANCE + +LOG = logging.getLogger(__name__) + +frequency = PER_INSTANCE + +BUILTIN_CFG = { + 'config': None, + 'config_path': '/etc/network/fan', +} + + +def stop_update_start(service, config_file, content, systemd=False): + if systemd: + cmds = {'stop': ['systemctl', 'stop', service], + 'start': ['systemctl', 'start', service], + 'enable': ['systemctl', 'enable', service]} + else: + cmds = {'stop': ['service', 'stop'], + 'start': ['service', 'start']} + + def run(cmd, msg): + try: + return util.subp(cmd, capture=True) + except util.ProcessExecutionError as e: + LOG.warn("failed: %s (%s): %s", service, cmd, e) + return False + + stop_failed = not run(cmds['stop'], msg='stop %s' % service) + if not content.endswith('\n'): + content += '\n' + util.write_file(config_file, content, omode="w") + + ret = run(cmds['start'], msg='start %s' % service) + if ret and stop_failed: + LOG.warn("success: %s started", service) + + if 'enable' in cmds: + ret = run(cmds['enable'], msg='enable %s' % service) + + return ret + + +def handle(name, cfg, cloud, log, args): + cfgin = cfg.get('fan') + if not cfgin: + cfgin = {} + mycfg = util.mergemanydict([cfgin, BUILTIN_CFG]) + + if not mycfg.get('config'): + LOG.debug("%s: no 'fan' config entry. disabling", name) + return + + util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w") + distro = cloud.distro + if not util.which('fanctl'): + distro.install_packages(['ubuntu-fan']) + + stop_update_start( + service='ubuntu-fan', config_file=mycfg.get('config_path'), + content=mycfg.get('config'), systemd=distro.uses_systemd()) diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py index ad957e12..4a51476f 100644 --- a/cloudinit/config/cc_final_message.py +++ b/cloudinit/config/cc_final_message.py @@ -28,9 +28,9 @@ frequency = PER_ALWAYS # Jinja formated default message FINAL_MESSAGE_DEF = ( - "## template: jinja\n" - "Cloud-init v. {{version}} finished at {{timestamp}}." - " Datasource {{datasource}}. Up {{uptime}} seconds" + "## template: jinja\n" + "Cloud-init v. {{version}} finished at {{timestamp}}." + " Datasource {{datasource}}. Up {{uptime}} seconds" ) diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py index 456597af..3c2d9985 100644 --- a/cloudinit/config/cc_grub_dpkg.py +++ b/cloudinit/config/cc_grub_dpkg.py @@ -37,12 +37,11 @@ def handle(name, cfg, _cloud, log, _args): return idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None) - idevs_empty = util.get_cfg_option_str(mycfg, - "grub-pc/install_devices_empty", None) + idevs_empty = util.get_cfg_option_str( + mycfg, "grub-pc/install_devices_empty", None) if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or - (os.path.exists("/dev/xvda1") - and not os.path.exists("/dev/xvda"))): + (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))): if idevs is None: idevs = "" if idevs_empty is None: @@ -66,7 +65,7 @@ def handle(name, cfg, _cloud, log, _args): (idevs, idevs_empty)) log.debug("Setting grub debconf-set-selections with '%s','%s'" % - (idevs, idevs_empty)) + (idevs, idevs_empty)) try: util.subp(['debconf-set-selections'], dconf_sel) diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index f1c1adff..aa844ee9 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -48,7 +48,7 @@ def handle(name, cfg, cloud, log, _args): "ssh_fp_console_blacklist", []) key_blacklist = util.get_cfg_option_list(cfg, "ssh_key_console_blacklist", - ["ssh-dss"]) + ["ssh-dss"]) try: cmd = [helper_path] diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 0b9d846e..68fcb27f 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -38,12 +38,12 @@ distros = ['ubuntu'] # defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2 LSC_BUILTIN_CFG = { - 'client': { - 'log_level': "info", - 'url': "https://landscape.canonical.com/message-system", - 'ping_url': "http://landscape.canonical.com/ping", - 'data_path': "/var/lib/landscape/client", - } + 'client': { + 'log_level': "info", + 'url': "https://landscape.canonical.com/message-system", + 'ping_url': "http://landscape.canonical.com/ping", + 'data_path': "/var/lib/landscape/client", + } } diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py new file mode 100644 index 00000000..63b8fb63 --- /dev/null +++ b/cloudinit/config/cc_lxd.py @@ -0,0 +1,85 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2016 Canonical Ltd. +# +# Author: Wesley Wiedenmeier <wesley.wiedenmeier@canonical.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +""" +This module initializes lxd using 'lxd init' + +Example config: + #cloud-config + lxd: + init: + network_address: <ip addr> + network_port: <port> + storage_backend: <zfs/dir> + storage_create_device: <dev> + storage_create_loop: <size> + storage_pool: <name> + trust_password: <password> +""" + +from cloudinit import util + + +def handle(name, cfg, cloud, log, args): + # Get config + lxd_cfg = cfg.get('lxd') + if not lxd_cfg: + log.debug("Skipping module named %s, not present or disabled by cfg") + return + if not isinstance(lxd_cfg, dict): + log.warn("lxd config must be a dictionary. found a '%s'", + type(lxd_cfg)) + return + + init_cfg = lxd_cfg.get('init') + if not isinstance(init_cfg, dict): + log.warn("lxd/init config must be a dictionary. found a '%s'", + type(init_cfg)) + init_cfg = {} + + if not init_cfg: + log.debug("no lxd/init config. disabled.") + return + + packages = [] + # Ensure lxd is installed + if not util.which("lxd"): + packages.append('lxd') + + # if using zfs, get the utils + if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'): + packages.append('zfs') + + if len(packages): + try: + cloud.distro.install_packages(packages) + except util.ProcessExecutionError as exc: + log.warn("failed to install packages %s: %s", packages, exc) + return + + # Set up lxd if init config is given + init_keys = ( + 'network_address', 'network_port', 'storage_backend', + 'storage_create_device', 'storage_create_loop', + 'storage_pool', 'trust_password') + cmd = ['lxd', 'init', '--auto'] + for k in init_keys: + if init_cfg.get(k): + cmd.extend(["--%s=%s" % + (k.replace('_', '-'), str(init_cfg[k]))]) + util.subp(cmd) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 73b42f91..4fe3ee21 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -204,11 +204,12 @@ def setup_swapfile(fname, size=None, maxsize=None): try: util.ensure_dir(tdir) util.log_time(LOG.debug, msg, func=util.subp, - args=[['sh', '-c', - ('rm -f "$1" && umask 0066 && ' - 'dd if=/dev/zero "of=$1" bs=1M "count=$2" && ' - 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), - 'setup_swap', fname, mbsize]]) + args=[['sh', '-c', + ('rm -f "$1" && umask 0066 && ' + '{ fallocate -l "${2}M" "$1" || ' + ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' + 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), + 'setup_swap', fname, mbsize]]) except Exception as e: raise IOError("Failed %s: %s" % (msg, e)) @@ -262,7 +263,11 @@ def handle_swapcfg(swapcfg): def handle(_name, cfg, cloud, log, _args): # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno - defvals = [None, None, "auto", "defaults,nobootwait", "0", "2"] + def_mnt_opts = "defaults,nobootwait" + if cloud.distro.uses_systemd(): + def_mnt_opts = "defaults,nofail" + + defvals = [None, None, "auto", def_mnt_opts, "0", "2"] defvals = cfg.get("mount_default_fields", defvals) # these are our default set of mounts diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 09d37371..cc3f7f70 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -22,6 +22,7 @@ from cloudinit import util import errno import os import re +import six import subprocess import time @@ -48,10 +49,40 @@ def givecmdline(pid): return None +def check_condition(cond, log=None): + if isinstance(cond, bool): + if log: + log.debug("Static Condition: %s" % cond) + return cond + + pre = "check_condition command (%s): " % cond + try: + proc = subprocess.Popen(cond, shell=not isinstance(cond, list)) + proc.communicate() + ret = proc.returncode + if ret == 0: + if log: + log.debug(pre + "exited 0. condition met.") + return True + elif ret == 1: + if log: + log.debug(pre + "exited 1. condition not met.") + return False + else: + if log: + log.warn(pre + "unexpected exit %s. " % ret + + "do not apply change.") + return False + except Exception as e: + if log: + log.warn(pre + "Unexpected error: %s" % e) + return False + + def handle(_name, cfg, _cloud, log, _args): try: - (args, timeout) = load_power_state(cfg) + (args, timeout, condition) = load_power_state(cfg) if args is None: log.debug("no power_state provided. doing nothing") return @@ -59,6 +90,10 @@ def handle(_name, cfg, _cloud, log, _args): log.warn("%s Not performing power state change!" % str(e)) return + if condition is False: + log.debug("Condition was false. Will not perform state change.") + return + mypid = os.getpid() cmdline = givecmdline(mypid) @@ -70,8 +105,8 @@ def handle(_name, cfg, _cloud, log, _args): log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args))) - util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, execmd, - [args, devnull_fp]) + util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, + condition, execmd, [args, devnull_fp]) def load_power_state(cfg): @@ -80,7 +115,7 @@ def load_power_state(cfg): pstate = cfg.get('power_state') if pstate is None: - return (None, None) + return (None, None, None) if not isinstance(pstate, dict): raise TypeError("power_state is not a dict.") @@ -115,7 +150,10 @@ def load_power_state(cfg): raise ValueError("failed to convert timeout '%s' to float." % pstate['timeout']) - return (args, timeout) + condition = pstate.get("condition", True) + if not isinstance(condition, six.string_types + (list, bool)): + raise TypeError("condition type %s invalid. must be list, bool, str") + return (args, timeout, condition) def doexit(sysexit): @@ -133,7 +171,7 @@ def execmd(exe_args, output=None, data_in=None): doexit(ret) -def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args): +def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args): # wait until pid, with /proc/pid/cmdline contents of pidcmdline # is no longer alive. After it is gone, or timeout has passed # execute func(args) @@ -175,4 +213,11 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args): if log: log.debug(msg) + + try: + if not check_condition(condition, log): + return + except Exception as e: + fatal("Unexpected Exception when checking condition: %s" % e) + func(*args) diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 4501598e..774d3322 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -36,8 +36,8 @@ def _autostart_puppet(log): # Set puppet to automatically start if os.path.exists('/etc/default/puppet'): util.subp(['sed', '-i', - '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False) + '-e', 's/^START=.*/START=yes/', + '/etc/default/puppet'], capture=False) elif os.path.exists('/bin/systemctl'): util.subp(['/bin/systemctl', 'enable', 'puppet.service'], capture=False) @@ -65,7 +65,7 @@ def handle(name, cfg, cloud, log, _args): " doing nothing.")) elif install: log.debug(("Attempting to install puppet %s,"), - version if version else 'latest') + version if version else 'latest') cloud.distro.install_packages(('puppet', version)) # ... and then update the puppet configuration diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index cbc07853..2a2a9f59 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -166,7 +166,7 @@ def handle(name, cfg, _cloud, log, args): func=do_resize, args=(resize_cmd, log)) else: util.log_time(logfunc=log.debug, msg="Resizing", - func=do_resize, args=(resize_cmd, log)) + func=do_resize, args=(resize_cmd, log)) action = 'Resized' if resize_root == NOBLOCK: diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 6da26d25..6087c45c 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -126,13 +126,11 @@ class SubscriptionManager(object): "(True/False " return False, not_bool - if (self.servicelevel is not None) and \ - ((not self.auto_attach) - or (util.is_false(str(self.auto_attach)))): - - no_auto = "The service-level key must be used in conjunction with "\ - "the auto-attach key. Please re-run with auto-attach: "\ - "True" + if (self.servicelevel is not None) and ((not self.auto_attach) or + (util.is_false(str(self.auto_attach)))): + no_auto = ("The service-level key must be used in conjunction " + "with the auto-attach key. Please re-run with " + "auto-attach: True") return False, no_auto return True, None diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index a0132d28..b8642d65 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -130,6 +130,7 @@ HOST_PORT_RE = re.compile( '(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))' '([:](?P<port>[0-9]+))?$') + def reload_syslog(command=DEF_RELOAD, systemd=False): service = 'rsyslog' if command == DEF_RELOAD: diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index 3288a853..1b011216 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -83,7 +83,7 @@ def handle(name, cfg, cloud, log, _args): len(seed_data), seed_path) util.append_file(seed_path, seed_data) - command = mycfg.get('command', ['pollinate', '-q']) + command = mycfg.get('command', None) req = mycfg.get('command_required', False) try: env = os.environ.copy() diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index 5d7f4331..f43d8d5a 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -24,7 +24,7 @@ from cloudinit import util def handle(name, cfg, cloud, log, _args): if util.get_cfg_option_bool(cfg, "preserve_hostname", False): log.debug(("Configuration option 'preserve_hostname' is set," - " not setting the hostname in module %s"), name) + " not setting the hostname in module %s"), name) return (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 0c315361..58e1b713 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -45,8 +45,6 @@ def handle(_name, cfg, cloud, log, args): password = util.get_cfg_option_str(cfg, "password", None) expire = True - pw_auth = "no" - change_pwauth = False plist = None if 'chpasswd' in cfg: @@ -104,11 +102,24 @@ def handle(_name, cfg, cloud, log, args): change_pwauth = False pw_auth = None if 'ssh_pwauth' in cfg: - change_pwauth = True if util.is_true(cfg['ssh_pwauth']): + change_pwauth = True pw_auth = 'yes' - if util.is_false(cfg['ssh_pwauth']): + elif util.is_false(cfg['ssh_pwauth']): + change_pwauth = True pw_auth = 'no' + elif str(cfg['ssh_pwauth']).lower() == 'unchanged': + log.debug('Leaving auth line unchanged') + change_pwauth = False + elif not str(cfg['ssh_pwauth']).strip(): + log.debug('Leaving auth line unchanged') + change_pwauth = False + elif not cfg['ssh_pwauth']: + log.debug('Leaving auth line unchanged') + change_pwauth = False + else: + msg = 'Unrecognized value %s for ssh_pwauth' % cfg['ssh_pwauth'] + util.logexc(log, msg) if change_pwauth: replaced_auth = False diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py index 7aaec94a..fa9d54a0 100644 --- a/cloudinit/config/cc_snappy.py +++ b/cloudinit/config/cc_snappy.py @@ -6,7 +6,7 @@ Example config: #cloud-config snappy: system_snappy: auto - ssh_enabled: False + ssh_enabled: auto packages: [etcd, pkg2.smoser] config: pkgname: @@ -16,7 +16,12 @@ Example config: packages_dir: '/writable/user-data/cloud-init/snaps' - ssh_enabled: - This defaults to 'False'. Set to a non-false value to enable ssh service + This controls the system's ssh service. The default value is 'auto'. + True: enable ssh service + False: disable ssh service + auto: enable ssh service if either ssh keys have been provided + or user has requested password authentication (ssh_pwauth). + - snap installation and config The above would install 'etcd', and then install 'pkg2.smoser' with a '<config-file>' argument where 'config-file' has 'config-blob' inside it. @@ -58,7 +63,7 @@ NAMESPACE_DELIM = '.' BUILTIN_CFG = { 'packages': [], 'packages_dir': '/writable/user-data/cloud-init/snaps', - 'ssh_enabled': False, + 'ssh_enabled': "auto", 'system_snappy': "auto", 'config': {}, } @@ -274,7 +279,26 @@ def handle(name, cfg, cloud, log, args): LOG.warn("'%s' failed for '%s': %s", pkg_op['op'], pkg_op['name'], e) - disable_enable_ssh(mycfg.get('ssh_enabled', False)) + # Default to disabling SSH + ssh_enabled = mycfg.get('ssh_enabled', "auto") + + # If the user has not explicitly enabled or disabled SSH, then enable it + # when password SSH authentication is requested or there are SSH keys + if ssh_enabled == "auto": + user_ssh_keys = cloud.get_public_ssh_keys() or None + password_auth_enabled = cfg.get('ssh_pwauth', False) + if user_ssh_keys: + LOG.debug("Enabling SSH, ssh keys found in datasource") + ssh_enabled = True + elif cfg.get('ssh_authorized_keys'): + LOG.debug("Enabling SSH, ssh keys found in config") + elif password_auth_enabled: + LOG.debug("Enabling SSH, password authentication requested") + ssh_enabled = True + elif ssh_enabled not in (True, False): + LOG.warn("Unknown value '%s' in ssh_enabled", ssh_enabled) + + disable_enable_ssh(ssh_enabled) if fails: raise Exception("failed to install/configure snaps") diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 5bd2dec6..d24e43c0 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -30,9 +30,10 @@ from cloudinit import distros as ds from cloudinit import ssh_util from cloudinit import util -DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding," -"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" " -"rather than the user \\\"root\\\".\';echo;sleep 10\"") +DISABLE_ROOT_OPTS = ( + "no-port-forwarding,no-agent-forwarding," + "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\"" + " rather than the user \\\"root\\\".\';echo;sleep 10\"") GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519'] KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key' diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index d3dd1f32..15703efe 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -41,10 +41,10 @@ def handle(name, cfg, cloud, log, _args): if not tpl_fn_name: raise RuntimeError(("No hosts template could be" " found for distro %s") % - (cloud.distro.osfamily)) + (cloud.distro.osfamily)) templater.render_to_file(tpl_fn_name, '/etc/hosts', - {'hostname': hostname, 'fqdn': fqdn}) + {'hostname': hostname, 'fqdn': fqdn}) elif manage_hosts == "localhost": (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) @@ -57,4 +57,4 @@ def handle(name, cfg, cloud, log, _args): cloud.distro.update_etc_hosts(hostname, fqdn) else: log.debug(("Configuration option 'manage_etc_hosts' is not set," - " not managing /etc/hosts in module %s"), name) + " not managing /etc/hosts in module %s"), name) diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index e396ba13..5b78afe1 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -29,7 +29,7 @@ frequency = PER_ALWAYS def handle(name, cfg, cloud, log, _args): if util.get_cfg_option_bool(cfg, "preserve_hostname", False): log.debug(("Configuration option 'preserve_hostname' is set," - " not updating the hostname in module %s"), name) + " not updating the hostname in module %s"), name) return (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py index 4b03ea91..351cfc8c 100644 --- a/cloudinit/config/cc_write_files.py +++ b/cloudinit/config/cc_write_files.py @@ -92,10 +92,10 @@ def decode_perms(perm, default, log): def extract_contents(contents, extraction_types): - result = str(contents) + result = contents for t in extraction_types: if t == 'application/x-gzip': - result = util.decomp_gzip(result, quiet=False) + result = util.decomp_gzip(result, quiet=False, decode=False) elif t == 'application/base64': result = base64.b64decode(result) elif t == UNKNOWN_ENC: diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 3b821af9..64fba869 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -92,7 +92,7 @@ def handle(name, cfg, _cloud, log, _args): for req_field in ['baseurl']: if req_field not in repo_config: log.warn(("Repository %s does not contain a %s" - " configuration 'required' entry"), + " configuration 'required' entry"), repo_id, req_field) missing_required += 1 if not missing_required: diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 71884b32..5879dabf 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -75,6 +75,9 @@ class Distro(object): # to write this blob out in a distro format raise NotImplementedError() + def _write_network_config(self, settings): + raise NotImplementedError() + def _find_tz_file(self, tz): tz_file = os.path.join(self.tz_zone_dir, str(tz)) if not os.path.isfile(tz_file): @@ -132,6 +135,14 @@ class Distro(object): return self._bring_up_interfaces(dev_names) return False + def apply_network_config(self, netconfig, bring_up=False): + # Write it out + dev_names = self._write_network_config(netconfig) + # Now try to bring them up + if bring_up: + return self._bring_up_interfaces(dev_names) + return False + @abc.abstractmethod def apply_locale(self, locale, out_fn=None): raise NotImplementedError() @@ -211,8 +222,8 @@ class Distro(object): # If the system hostname is different than the previous # one or the desired one lets update it as well - if (not sys_hostname) or (sys_hostname == prev_hostname - and sys_hostname != hostname): + if ((not sys_hostname) or (sys_hostname == prev_hostname and + sys_hostname != hostname)): update_files.append(sys_fn) # If something else has changed the hostname after we set it @@ -221,7 +232,7 @@ class Distro(object): if (sys_hostname and prev_hostname and sys_hostname != prev_hostname): LOG.info("%s differs from %s, assuming user maintained hostname.", - prev_hostname_fn, sys_fn) + prev_hostname_fn, sys_fn) return # Remove duplicates (incase the previous config filename) @@ -289,7 +300,7 @@ class Distro(object): def _bring_up_interface(self, device_name): cmd = ['ifup', device_name] LOG.debug("Attempting to run bring up interface %s using command %s", - device_name, cmd) + device_name, cmd) try: (_out, err) = util.subp(cmd) if len(err): @@ -319,6 +330,11 @@ class Distro(object): LOG.info("User %s already exists, skipping." % name) return + if 'create_groups' in kwargs: + create_groups = kwargs.pop('create_groups') + else: + create_groups = True + adduser_cmd = ['useradd', name] log_adduser_cmd = ['useradd', name] @@ -346,6 +362,26 @@ class Distro(object): redact_opts = ['passwd'] + # support kwargs having groups=[list] or groups="g1,g2" + groups = kwargs.get('groups') + if groups: + if isinstance(groups, (list, tuple)): + # kwargs.items loop below wants a comma delimeted string + # that can go right through to the command. + kwargs['groups'] = ",".join(groups) + else: + groups = groups.split(",") + + primary_group = kwargs.get('primary_group') + if primary_group: + groups.append(primary_group) + + if create_groups and groups: + for group in groups: + if not util.is_group(group): + self.create_group(group) + LOG.debug("created group %s for user %s", name, group) + # Check the values and create the command for key, val in kwargs.items(): @@ -393,6 +429,10 @@ class Distro(object): if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']: self.set_passwd(name, kwargs['plain_text_passwd']) + # Set password if hashed password is provided and non-empty + if 'hashed_passwd' in kwargs and kwargs['hashed_passwd']: + self.set_passwd(name, kwargs['hashed_passwd'], hashed=True) + # Default locking down the account. 'lock_passwd' defaults to True. # lock account unless lock_password is False. if kwargs.get('lock_passwd', True): @@ -530,8 +570,10 @@ class Distro(object): util.logexc(LOG, "Failed to append sudoers file %s", sudo_file) raise e - def create_group(self, name, members): + def create_group(self, name, members=None): group_add_cmd = ['groupadd', name] + if not members: + members = [] # Check if group exists, and then add it doesn't if util.is_group(name): @@ -541,14 +583,14 @@ class Distro(object): util.subp(group_add_cmd) LOG.info("Created new group %s" % name) except Exception: - util.logexc("Failed to create group %s", name) + util.logexc(LOG, "Failed to create group %s", name) # Add members to the group, if so defined if len(members) > 0: for member in members: if not util.is_user(member): LOG.warn("Unable to add group member '%s' to group '%s'" - "; user does not exist.", member, name) + "; user does not exist.", member, name) continue util.subp(['usermod', '-a', '-G', name, member]) @@ -886,7 +928,7 @@ def fetch(name): locs, looked_locs = importer.find_module(name, ['', __name__], ['Distro']) if not locs: raise ImportError("No distribution found for distro %s (searched %s)" - % (name, looked_locs)) + % (name, looked_locs)) mod = importer.import_module(locs[0]) cls = getattr(mod, 'Distro') return cls @@ -897,5 +939,12 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone", util.write_file(tz_conf, str(tz).rstrip() + "\n") # This ensures that the correct tz will be used for the system if tz_local and tz_file: - util.copy(tz_file, tz_local) + # use a symlink if there exists a symlink or tz_local is not present + islink = os.path.islink(tz_local) + if islink or not os.path.exists(tz_local): + if islink: + util.del_file(tz_local) + os.symlink(tz_file, tz_local) + else: + util.copy(tz_file, tz_local) return diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 45fcf26f..93a2e008 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -74,7 +74,7 @@ class Distro(distros.Distro): 'Interface': dev, 'IP': info.get('bootproto'), 'Address': "('%s/%s')" % (info.get('address'), - info.get('netmask')), + info.get('netmask')), 'Gateway': info.get('gateway'), 'DNS': str(tuple(info.get('dns-nameservers'))).replace(',', '') } @@ -86,7 +86,7 @@ class Distro(distros.Distro): if nameservers: util.write_file(self.resolve_conf_fn, - convert_resolv_conf(nameservers)) + convert_resolv_conf(nameservers)) return dev_names @@ -102,7 +102,7 @@ class Distro(distros.Distro): def _bring_up_interface(self, device_name): cmd = ['netctl', 'restart', device_name] LOG.debug("Attempting to run bring up interface %s using command %s", - device_name, cmd) + device_name, cmd) try: (_out, err) = util.subp(cmd) if len(err): diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 6d3a82bf..5d7e6cfc 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -26,6 +26,7 @@ from cloudinit import distros from cloudinit import helpers from cloudinit import log as logging from cloudinit import util +from cloudinit import net from cloudinit.distros.parsers.hostname import HostnameConf @@ -45,7 +46,8 @@ APT_GET_WRAPPER = { class Distro(distros.Distro): hostname_conf_fn = "/etc/hostname" locale_conf_fn = "/etc/default/locale" - network_conf_fn = "/etc/network/interfaces" + network_conf_fn = "/etc/network/interfaces.d/50-cloud-init.cfg" + links_prefix = "/etc/systemd/network/50-cloud-init-" def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -76,6 +78,15 @@ class Distro(distros.Distro): util.write_file(self.network_conf_fn, settings) return ['all'] + def _write_network_config(self, netconfig): + ns = net.parse_net_config_data(netconfig) + net.render_network_state(target="/", network_state=ns, + eni=self.network_conf_fn, + links_prefix=self.links_prefix, + netrules=None) + util.del_file("/etc/network/interfaces.d/eth0.cfg") + return [] + def _bring_up_interfaces(self, device_names): use_all = False for d in device_names: @@ -159,8 +170,9 @@ class Distro(distros.Distro): # Allow the output of this to flow outwards (ie not be captured) util.log_time(logfunc=LOG.debug, - msg="apt-%s [%s]" % (command, ' '.join(cmd)), func=util.subp, - args=(cmd,), kwargs={'env': e, 'capture': False}) + msg="apt-%s [%s]" % (command, ' '.join(cmd)), + func=util.subp, + args=(cmd,), kwargs={'env': e, 'capture': False}) def update_package_sources(self): self._runner.run("update-sources", self.package_command, diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 4c484639..91bf4a4e 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -16,6 +16,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +import os import six from six import StringIO @@ -30,6 +31,8 @@ from cloudinit import util from cloudinit.distros import net_util from cloudinit.distros.parsers.resolv_conf import ResolvConf +from cloudinit.settings import PER_INSTANCE + LOG = logging.getLogger(__name__) @@ -205,8 +208,8 @@ class Distro(distros.Distro): redact_opts = ['passwd'] for key, val in kwargs.items(): - if (key in adduser_opts and val - and isinstance(val, six.string_types)): + if (key in adduser_opts and val and + isinstance(val, six.string_types)): adduser_cmd.extend([adduser_opts[key], val]) # Redact certain fields from the logs @@ -236,9 +239,21 @@ class Distro(distros.Distro): util.logexc(LOG, "Failed to create user %s", name) raise e - # TODO: def set_passwd(self, user, passwd, hashed=False): - return False + cmd = ['pw', 'usermod', user] + + if hashed: + cmd.append('-H') + else: + cmd.append('-h') + + cmd.append('0') + + try: + util.subp(cmd, passwd, logstring="chpasswd for %s" % user) + except Exception as e: + util.logexc(LOG, "Failed to set password for %s", user) + raise e def lock_passwd(self, name): try: @@ -369,13 +384,34 @@ class Distro(distros.Distro): LOG.warn("Error running %s: %s", cmd, err) def install_packages(self, pkglist): - return + self.update_package_sources() + self.package_command('install', pkgs=pkglist) + + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + + e = os.environ.copy() + e['ASSUME_ALWAYS_YES'] = 'YES' + + cmd = ['pkg'] + if args and isinstance(args, str): + cmd.append(args) + elif args and isinstance(args, list): + cmd.extend(args) + + if command: + cmd.append(command) + + pkglist = util.expand_package_list('%s-%s', pkgs) + cmd.extend(pkglist) - def package_command(self, cmd, args=None, pkgs=None): - return + # Allow the output of this to flow outwards (ie not be captured) + util.subp(cmd, env=e, capture=False) def set_timezone(self, tz): - return + distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) def update_package_sources(self): - return + self._runner.run("update-sources", self.package_command, + ["update"], freq=PER_INSTANCE) diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py index 9e80583c..6267dd6e 100644 --- a/cloudinit/distros/gentoo.py +++ b/cloudinit/distros/gentoo.py @@ -66,7 +66,7 @@ class Distro(distros.Distro): def _bring_up_interface(self, device_name): cmd = ['/etc/init.d/net.%s' % device_name, 'restart'] LOG.debug("Attempting to run bring up interface %s using command %s", - device_name, cmd) + device_name, cmd) try: (_out, err) = util.subp(cmd) if len(err): @@ -88,7 +88,7 @@ class Distro(distros.Distro): (_out, err) = util.subp(cmd) if len(err): LOG.warn("Running %s resulted in stderr output: %s", cmd, - err) + err) except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) return False diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py index 84a1de42..efb185d4 100644 --- a/cloudinit/distros/parsers/hostname.py +++ b/cloudinit/distros/parsers/hostname.py @@ -84,5 +84,5 @@ class HostnameConf(object): hostnames_found.add(head) if len(hostnames_found) > 1: raise IOError("Multiple hostnames (%s) found!" - % (hostnames_found)) + % (hostnames_found)) return entries diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index 8aee03a4..2ed13d9c 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -132,7 +132,7 @@ class ResolvConf(object): # Some hard limit on 256 chars total raise ValueError(("Adding %r would go beyond the " "256 maximum search list character limit") - % (search_domain)) + % (search_domain)) self._remove_option('search') self._contents.append(('option', ['search', s_list, ''])) return flat_sds diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py index d795e12f..6157cf32 100644 --- a/cloudinit/distros/parsers/sys_conf.py +++ b/cloudinit/distros/parsers/sys_conf.py @@ -77,8 +77,7 @@ class SysConf(configobj.ConfigObj): quot_func = None if value[0] in ['"', "'"] and value[-1] in ['"', "'"]: if len(value) == 1: - quot_func = (lambda x: - self._get_single_quote(x) % x) + quot_func = (lambda x: self._get_single_quote(x) % x) else: # Quote whitespace if it isn't the start + end of a shell command if value.strip().startswith("$(") and value.strip().endswith(")"): @@ -91,10 +90,10 @@ class SysConf(configobj.ConfigObj): # to use single quotes which won't get expanded... if re.search(r"[\n\"']", value): quot_func = (lambda x: - self._get_triple_quote(x) % x) + self._get_triple_quote(x) % x) else: quot_func = (lambda x: - self._get_single_quote(x) % x) + self._get_single_quote(x) % x) else: quot_func = pipes.quote if not quot_func: diff --git a/cloudinit/filters/launch_index.py b/cloudinit/filters/launch_index.py index 5bebd318..baecdac9 100644 --- a/cloudinit/filters/launch_index.py +++ b/cloudinit/filters/launch_index.py @@ -61,7 +61,7 @@ class Filter(object): discarded += 1 LOG.debug(("Discarding %s multipart messages " "which do not match launch index %s"), - discarded, self.wanted_idx) + discarded, self.wanted_idx) new_message = copy.copy(message) new_message.set_payload(new_msgs) new_message[ud.ATTACHMENT_FIELD] = str(len(new_msgs)) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 5e99d185..0cf982f3 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -139,9 +139,10 @@ class FileSemaphores(object): # but the item had run before we did canon_sem_name. if cname != name and os.path.exists(self._get_path(name, freq)): LOG.warn("%s has run without canonicalized name [%s].\n" - "likely the migrator has not yet run. It will run next boot.\n" - "run manually with: cloud-init single --name=migrator" - % (name, cname)) + "likely the migrator has not yet run. " + "It will run next boot.\n" + "run manually with: cloud-init single --name=migrator" + % (name, cname)) return True return False @@ -335,19 +336,19 @@ class Paths(object): template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/') self.template_tpl = os.path.join(template_dir, '%s.tmpl') self.lookups = { - "handlers": "handlers", - "scripts": "scripts", - "vendor_scripts": "scripts/vendor", - "sem": "sem", - "boothooks": "boothooks", - "userdata_raw": "user-data.txt", - "userdata": "user-data.txt.i", - "obj_pkl": "obj.pkl", - "cloud_config": "cloud-config.txt", - "vendor_cloud_config": "vendor-cloud-config.txt", - "data": "data", - "vendordata_raw": "vendor-data.txt", - "vendordata": "vendor-data.txt.i", + "handlers": "handlers", + "scripts": "scripts", + "vendor_scripts": "scripts/vendor", + "sem": "sem", + "boothooks": "boothooks", + "userdata_raw": "user-data.txt", + "userdata": "user-data.txt.i", + "obj_pkl": "obj.pkl", + "cloud_config": "cloud-config.txt", + "vendor_cloud_config": "vendor-cloud-config.txt", + "data": "data", + "vendordata_raw": "vendor-data.txt", + "vendordata": "vendor-data.txt.i", } # Set when a datasource becomes active self.datasource = ds diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py new file mode 100644 index 00000000..40929c6e --- /dev/null +++ b/cloudinit/net/__init__.py @@ -0,0 +1,751 @@ +# Copyright (C) 2013-2014 Canonical Ltd. +# +# Author: Scott Moser <scott.moser@canonical.com> +# Author: Blake Rouse <blake.rouse@canonical.com> +# +# Curtin is free software: you can redistribute it and/or modify it under +# the terms of the GNU Affero General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for +# more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with Curtin. If not, see <http://www.gnu.org/licenses/>. + +import base64 +import errno +import glob +import gzip +import io +import os +import re +import shlex + +from cloudinit import log as logging +from cloudinit import util +from .udev import generate_udev_rule +from . import network_state + +LOG = logging.getLogger(__name__) + +SYS_CLASS_NET = "/sys/class/net/" +LINKS_FNAME_PREFIX = "etc/systemd/network/50-cloud-init-" + +NET_CONFIG_OPTIONS = [ + "address", "netmask", "broadcast", "network", "metric", "gateway", + "pointtopoint", "media", "mtu", "hostname", "leasehours", "leasetime", + "vendor", "client", "bootfile", "server", "hwaddr", "provider", "frame", + "netnum", "endpoint", "local", "ttl", + ] + +NET_CONFIG_COMMANDS = [ + "pre-up", "up", "post-up", "down", "pre-down", "post-down", + ] + +NET_CONFIG_BRIDGE_OPTIONS = [ + "bridge_ageing", "bridge_bridgeprio", "bridge_fd", "bridge_gcinit", + "bridge_hello", "bridge_maxage", "bridge_maxwait", "bridge_stp", + ] + +DEFAULT_PRIMARY_INTERFACE = 'eth0' + + +def sys_dev_path(devname, path=""): + return SYS_CLASS_NET + devname + "/" + path + + +def read_sys_net(devname, path, translate=None, enoent=None, keyerror=None): + try: + contents = "" + with open(sys_dev_path(devname, path), "r") as fp: + contents = fp.read().strip() + if translate is None: + return contents + + try: + return translate.get(contents) + except KeyError: + LOG.debug("found unexpected value '%s' in '%s/%s'", contents, + devname, path) + if keyerror is not None: + return keyerror + raise + except OSError as e: + if e.errno == errno.ENOENT and enoent is not None: + return enoent + raise + + +def is_up(devname): + # The linux kernel says to consider devices in 'unknown' + # operstate as up for the purposes of network configuration. See + # Documentation/networking/operstates.txt in the kernel source. + translate = {'up': True, 'unknown': True, 'down': False} + return read_sys_net(devname, "operstate", enoent=False, keyerror=False, + translate=translate) + + +def is_wireless(devname): + return os.path.exists(sys_dev_path(devname, "wireless")) + + +def is_connected(devname): + # is_connected isn't really as simple as that. 2 is + # 'physically connected'. 3 is 'not connected'. but a wlan interface will + # always show 3. + try: + iflink = read_sys_net(devname, "iflink", enoent=False) + if iflink == "2": + return True + if not is_wireless(devname): + return False + LOG.debug("'%s' is wireless, basing 'connected' on carrier", devname) + + return read_sys_net(devname, "carrier", enoent=False, keyerror=False, + translate={'0': False, '1': True}) + + except IOError as e: + if e.errno == errno.EINVAL: + return False + raise + + +def is_physical(devname): + return os.path.exists(sys_dev_path(devname, "device")) + + +def is_present(devname): + return os.path.exists(sys_dev_path(devname)) + + +def get_devicelist(): + return os.listdir(SYS_CLASS_NET) + + +class ParserError(Exception): + """Raised when parser has issue parsing the interfaces file.""" + + +def parse_deb_config_data(ifaces, contents, src_dir, src_path): + """Parses the file contents, placing result into ifaces. + + '_source_path' is added to every dictionary entry to define which file + the configration information came from. + + :param ifaces: interface dictionary + :param contents: contents of interfaces file + :param src_dir: directory interfaces file was located + :param src_path: file path the `contents` was read + """ + currif = None + for line in contents.splitlines(): + line = line.strip() + if line.startswith('#'): + continue + split = line.split(' ') + option = split[0] + if option == "source-directory": + parsed_src_dir = split[1] + if not parsed_src_dir.startswith("/"): + parsed_src_dir = os.path.join(src_dir, parsed_src_dir) + for expanded_path in glob.glob(parsed_src_dir): + dir_contents = os.listdir(expanded_path) + dir_contents = [ + os.path.join(expanded_path, path) + for path in dir_contents + if (os.path.isfile(os.path.join(expanded_path, path)) and + re.match("^[a-zA-Z0-9_-]+$", path) is not None) + ] + for entry in dir_contents: + with open(entry, "r") as fp: + src_data = fp.read().strip() + abs_entry = os.path.abspath(entry) + parse_deb_config_data( + ifaces, src_data, + os.path.dirname(abs_entry), abs_entry) + elif option == "source": + new_src_path = split[1] + if not new_src_path.startswith("/"): + new_src_path = os.path.join(src_dir, new_src_path) + for expanded_path in glob.glob(new_src_path): + with open(expanded_path, "r") as fp: + src_data = fp.read().strip() + abs_path = os.path.abspath(expanded_path) + parse_deb_config_data( + ifaces, src_data, + os.path.dirname(abs_path), abs_path) + elif option == "auto": + for iface in split[1:]: + if iface not in ifaces: + ifaces[iface] = { + # Include the source path this interface was found in. + "_source_path": src_path + } + ifaces[iface]['auto'] = True + elif option == "iface": + iface, family, method = split[1:4] + if iface not in ifaces: + ifaces[iface] = { + # Include the source path this interface was found in. + "_source_path": src_path + } + elif 'family' in ifaces[iface]: + raise ParserError( + "Interface %s can only be defined once. " + "Re-defined in '%s'." % (iface, src_path)) + ifaces[iface]['family'] = family + ifaces[iface]['method'] = method + currif = iface + elif option == "hwaddress": + ifaces[currif]['hwaddress'] = split[1] + elif option in NET_CONFIG_OPTIONS: + ifaces[currif][option] = split[1] + elif option in NET_CONFIG_COMMANDS: + if option not in ifaces[currif]: + ifaces[currif][option] = [] + ifaces[currif][option].append(' '.join(split[1:])) + elif option.startswith('dns-'): + if 'dns' not in ifaces[currif]: + ifaces[currif]['dns'] = {} + if option == 'dns-search': + ifaces[currif]['dns']['search'] = [] + for domain in split[1:]: + ifaces[currif]['dns']['search'].append(domain) + elif option == 'dns-nameservers': + ifaces[currif]['dns']['nameservers'] = [] + for server in split[1:]: + ifaces[currif]['dns']['nameservers'].append(server) + elif option.startswith('bridge_'): + if 'bridge' not in ifaces[currif]: + ifaces[currif]['bridge'] = {} + if option in NET_CONFIG_BRIDGE_OPTIONS: + bridge_option = option.replace('bridge_', '', 1) + ifaces[currif]['bridge'][bridge_option] = split[1] + elif option == "bridge_ports": + ifaces[currif]['bridge']['ports'] = [] + for iface in split[1:]: + ifaces[currif]['bridge']['ports'].append(iface) + elif option == "bridge_hw" and split[1].lower() == "mac": + ifaces[currif]['bridge']['mac'] = split[2] + elif option == "bridge_pathcost": + if 'pathcost' not in ifaces[currif]['bridge']: + ifaces[currif]['bridge']['pathcost'] = {} + ifaces[currif]['bridge']['pathcost'][split[1]] = split[2] + elif option == "bridge_portprio": + if 'portprio' not in ifaces[currif]['bridge']: + ifaces[currif]['bridge']['portprio'] = {} + ifaces[currif]['bridge']['portprio'][split[1]] = split[2] + elif option.startswith('bond-'): + if 'bond' not in ifaces[currif]: + ifaces[currif]['bond'] = {} + bond_option = option.replace('bond-', '', 1) + ifaces[currif]['bond'][bond_option] = split[1] + for iface in ifaces.keys(): + if 'auto' not in ifaces[iface]: + ifaces[iface]['auto'] = False + + +def parse_deb_config(path): + """Parses a debian network configuration file.""" + ifaces = {} + with open(path, "r") as fp: + contents = fp.read().strip() + abs_path = os.path.abspath(path) + parse_deb_config_data( + ifaces, contents, + os.path.dirname(abs_path), abs_path) + return ifaces + + +def parse_net_config_data(net_config): + """Parses the config, returns NetworkState dictionary + + :param net_config: curtin network config dict + """ + state = None + if 'version' in net_config and 'config' in net_config: + ns = network_state.NetworkState(version=net_config.get('version'), + config=net_config.get('config')) + ns.parse_config() + state = ns.network_state + + return state + + +def parse_net_config(path): + """Parses a curtin network configuration file and + return network state""" + ns = None + net_config = util.read_conf(path) + if 'network' in net_config: + ns = parse_net_config_data(net_config.get('network')) + + return ns + + +def _load_shell_content(content, add_empty=False, empty_val=None): + """Given shell like syntax (key=value\nkey2=value2\n) in content + return the data in dictionary form. If 'add_empty' is True + then add entries in to the returned dictionary for 'VAR=' + variables. Set their value to empty_val.""" + data = {} + for line in shlex.split(content): + key, value = line.split("=", 1) + if not value: + value = empty_val + if add_empty or value: + data[key] = value + + return data + + +def _klibc_to_config_entry(content, mac_addrs=None): + """Convert a klibc writtent shell content file to a 'config' entry + When ip= is seen on the kernel command line in debian initramfs + and networking is brought up, ipconfig will populate + /run/net-<name>.cfg. + + The files are shell style syntax, and examples are in the tests + provided here. There is no good documentation on this unfortunately. + + DEVICE=<name> is expected/required and PROTO should indicate if + this is 'static' or 'dhcp'. + """ + + if mac_addrs is None: + mac_addrs = {} + + data = _load_shell_content(content) + try: + name = data['DEVICE'] + except KeyError: + raise ValueError("no 'DEVICE' entry in data") + + # ipconfig on precise does not write PROTO + proto = data.get('PROTO') + if not proto: + if data.get('filename'): + proto = 'dhcp' + else: + proto = 'static' + + if proto not in ('static', 'dhcp'): + raise ValueError("Unexpected value for PROTO: %s" % proto) + + iface = { + 'type': 'physical', + 'name': name, + 'subnets': [], + } + + if name in mac_addrs: + iface['mac_address'] = mac_addrs[name] + + # originally believed there might be IPV6* values + for v, pre in (('ipv4', 'IPV4'),): + # if no IPV4ADDR or IPV6ADDR, then go on. + if pre + "ADDR" not in data: + continue + subnet = {'type': proto} + + # these fields go right on the subnet + for key in ('NETMASK', 'BROADCAST', 'GATEWAY'): + if pre + key in data: + subnet[key.lower()] = data[pre + key] + + dns = [] + # handle IPV4DNS0 or IPV6DNS0 + for nskey in ('DNS0', 'DNS1'): + ns = data.get(pre + nskey) + # verify it has something other than 0.0.0.0 (or ipv6) + if ns and len(ns.strip(":.0")): + dns.append(data[pre + nskey]) + if dns: + subnet['dns_nameservers'] = dns + # add search to both ipv4 and ipv6, as it has no namespace + search = data.get('DOMAINSEARCH') + if search: + if ',' in search: + subnet['dns_search'] = search.split(",") + else: + subnet['dns_search'] = search.split() + + iface['subnets'].append(subnet) + + return name, iface + + +def config_from_klibc_net_cfg(files=None, mac_addrs=None): + if files is None: + files = glob.glob('/run/net*.conf') + + entries = [] + names = {} + for cfg_file in files: + name, entry = _klibc_to_config_entry(util.load_file(cfg_file), + mac_addrs=mac_addrs) + if name in names: + raise ValueError( + "device '%s' defined multiple times: %s and %s" % ( + name, names[name], cfg_file)) + + names[name] = cfg_file + entries.append(entry) + return {'config': entries, 'version': 1} + + +def render_persistent_net(network_state): + ''' Given state, emit udev rules to map + mac to ifname + ''' + content = "" + interfaces = network_state.get('interfaces') + for iface in interfaces.values(): + # for physical interfaces write out a persist net udev rule + if iface['type'] == 'physical' and \ + 'name' in iface and iface.get('mac_address'): + content += generate_udev_rule(iface['name'], + iface['mac_address']) + + return content + + +# TODO: switch valid_map based on mode inet/inet6 +def iface_add_subnet(iface, subnet): + content = "" + valid_map = [ + 'address', + 'netmask', + 'broadcast', + 'metric', + 'gateway', + 'pointopoint', + 'mtu', + 'scope', + 'dns_search', + 'dns_nameservers', + ] + for key, value in subnet.items(): + if value and key in valid_map: + if type(value) == list: + value = " ".join(value) + if '_' in key: + key = key.replace('_', '-') + content += " {} {}\n".format(key, value) + + return content + + +# TODO: switch to valid_map for attrs +def iface_add_attrs(iface): + content = "" + ignore_map = [ + 'type', + 'name', + 'inet', + 'mode', + 'index', + 'subnets', + ] + if iface['type'] not in ['bond', 'bridge', 'vlan']: + ignore_map.append('mac_address') + + for key, value in iface.items(): + if value and key not in ignore_map: + if type(value) == list: + value = " ".join(value) + content += " {} {}\n".format(key, value) + + return content + + +def render_route(route, indent=""): + """ When rendering routes for an iface, in some cases applying a route + may result in the route command returning non-zero which produces + some confusing output for users manually using ifup/ifdown[1]. To + that end, we will optionally include an '|| true' postfix to each + route line allowing users to work with ifup/ifdown without using + --force option. + + We may at somepoint not want to emit this additional postfix, and + add a 'strict' flag to this function. When called with strict=True, + then we will not append the postfix. + + 1. http://askubuntu.com/questions/168033/ + how-to-set-static-routes-in-ubuntu-server + """ + content = "" + up = indent + "post-up route add" + down = indent + "pre-down route del" + eol = " || true\n" + mapping = { + 'network': '-net', + 'netmask': 'netmask', + 'gateway': 'gw', + 'metric': 'metric', + } + if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0': + default_gw = " default gw %s" % route['gateway'] + content += up + default_gw + eol + content += down + default_gw + eol + elif route['network'] == '::' and route['netmask'] == 0: + # ipv6! + default_gw = " -A inet6 default gw %s" % route['gateway'] + content += up + default_gw + eol + content += down + default_gw + eol + else: + route_line = "" + for k in ['network', 'netmask', 'gateway', 'metric']: + if k in route: + route_line += " %s %s" % (mapping[k], route[k]) + content += up + route_line + eol + content += down + route_line + eol + + return content + + +def render_interfaces(network_state): + ''' Given state, emit etc/network/interfaces content ''' + + content = "" + interfaces = network_state.get('interfaces') + ''' Apply a sort order to ensure that we write out + the physical interfaces first; this is critical for + bonding + ''' + order = { + 'physical': 0, + 'bond': 1, + 'bridge': 2, + 'vlan': 3, + } + content += "auto lo\niface lo inet loopback\n" + for dnskey, value in network_state.get('dns', {}).items(): + if len(value): + content += " dns-{} {}\n".format(dnskey, " ".join(value)) + + content += "\n" + for iface in sorted(interfaces.values(), + key=lambda k: (order[k['type']], k['name'])): + content += "auto {name}\n".format(**iface) + + subnets = iface.get('subnets', {}) + if subnets: + for index, subnet in zip(range(0, len(subnets)), subnets): + iface['index'] = index + iface['mode'] = subnet['type'] + if iface['mode'].endswith('6'): + iface['inet'] += '6' + elif iface['mode'] == 'static' and ":" in subnet['address']: + iface['inet'] += '6' + if iface['mode'].startswith('dhcp'): + iface['mode'] = 'dhcp' + + if index == 0: + content += "iface {name} {inet} {mode}\n".format(**iface) + else: + content += "auto {name}:{index}\n".format(**iface) + content += \ + "iface {name}:{index} {inet} {mode}\n".format(**iface) + + content += iface_add_subnet(iface, subnet) + content += iface_add_attrs(iface) + for route in subnet.get('routes', []): + content += render_route(route, indent=" ") + content += "\n" + else: + content += "iface {name} {inet} {mode}\n".format(**iface) + content += iface_add_attrs(iface) + content += "\n" + + for route in network_state.get('routes'): + content += render_route(route) + + # global replacements until v2 format + content = content.replace('mac_address', 'hwaddress ether') + return content + + +def render_network_state(target, network_state, eni="etc/network/interfaces", + links_prefix=LINKS_FNAME_PREFIX, + netrules='etc/udev/rules.d/70-persistent-net.rules'): + + fpeni = os.path.sep.join((target, eni,)) + util.ensure_dir(os.path.dirname(fpeni)) + with open(fpeni, 'w+') as f: + f.write(render_interfaces(network_state)) + + if netrules: + netrules = os.path.sep.join((target, netrules,)) + util.ensure_dir(os.path.dirname(netrules)) + with open(netrules, 'w+') as f: + f.write(render_persistent_net(network_state)) + + if links_prefix: + render_systemd_links(target, network_state, links_prefix) + + +def render_systemd_links(target, network_state, + links_prefix=LINKS_FNAME_PREFIX): + fp_prefix = os.path.sep.join((target, links_prefix)) + for f in glob.glob(fp_prefix + "*"): + os.unlink(f) + + interfaces = network_state.get('interfaces') + for iface in interfaces.values(): + if (iface['type'] == 'physical' and 'name' in iface and + iface.get('mac_address')): + fname = fp_prefix + iface['name'] + ".link" + with open(fname, "w") as fp: + fp.write("\n".join([ + "[Match]", + "MACAddress=" + iface['mac_address'], + "", + "[Link]", + "Name=" + iface['name'], + "" + ])) + + +def is_disabled_cfg(cfg): + if not cfg or not isinstance(cfg, dict): + return False + return cfg.get('config') == "disabled" + + +def sys_netdev_info(name, field): + if not os.path.exists(os.path.join(SYS_CLASS_NET, name)): + raise OSError("%s: interface does not exist in %s" % + (name, SYS_CLASS_NET)) + + fname = os.path.join(SYS_CLASS_NET, name, field) + if not os.path.exists(fname): + raise OSError("%s: could not find sysfs entry: %s" % (name, fname)) + data = util.load_file(fname) + if data[-1] == '\n': + data = data[:-1] + return data + + +def generate_fallback_config(): + """Determine which attached net dev is most likely to have a connection and + generate network state to run dhcp on that interface""" + # by default use eth0 as primary interface + nconf = {'config': [], 'version': 1} + + # get list of interfaces that could have connections + invalid_interfaces = set(['lo']) + potential_interfaces = set(get_devicelist()) + potential_interfaces = potential_interfaces.difference(invalid_interfaces) + # sort into interfaces with carrier, interfaces which could have carrier, + # and ignore interfaces that are definitely disconnected + connected = [] + possibly_connected = [] + for interface in potential_interfaces: + try: + carrier = int(sys_netdev_info(interface, 'carrier')) + if carrier: + connected.append(interface) + continue + except OSError: + pass + # check if nic is dormant or down, as this may make a nick appear to + # not have a carrier even though it could acquire one when brought + # online by dhclient + try: + dormant = int(sys_netdev_info(interface, 'dormant')) + if dormant: + possibly_connected.append(interface) + continue + except OSError: + pass + try: + operstate = sys_netdev_info(interface, 'operstate') + if operstate in ['dormant', 'down', 'lowerlayerdown', 'unknown']: + possibly_connected.append(interface) + continue + except OSError: + pass + + # don't bother with interfaces that might not be connected if there are + # some that definitely are + if connected: + potential_interfaces = connected + else: + potential_interfaces = possibly_connected + # if there are no interfaces, give up + if not potential_interfaces: + return + # if eth0 exists use it above anything else, otherwise get the interface + # that looks 'first' + if DEFAULT_PRIMARY_INTERFACE in potential_interfaces: + name = DEFAULT_PRIMARY_INTERFACE + else: + name = sorted(potential_interfaces)[0] + + mac = sys_netdev_info(name, 'address') + target_name = name + + nconf['config'].append( + {'type': 'physical', 'name': target_name, + 'mac_address': mac, 'subnets': [{'type': 'dhcp'}]}) + return nconf + + +def _decomp_gzip(blob, strict=True): + # decompress blob. raise exception if not compressed unless strict=False. + with io.BytesIO(blob) as iobuf: + gzfp = None + try: + gzfp = gzip.GzipFile(mode="rb", fileobj=iobuf) + return gzfp.read() + except IOError: + if strict: + raise + return blob + finally: + if gzfp: + gzfp.close() + + +def _b64dgz(b64str, gzipped="try"): + # decode a base64 string. If gzipped is true, transparently uncompresss + # if gzipped is 'try', then try gunzip, returning the original on fail. + try: + blob = base64.b64decode(b64str) + except TypeError: + raise ValueError("Invalid base64 text: %s" % b64str) + + if not gzipped: + return blob + + return _decomp_gzip(blob, strict=gzipped != "try") + + +def read_kernel_cmdline_config(files=None, mac_addrs=None, cmdline=None): + if cmdline is None: + cmdline = util.get_cmdline() + + if 'network-config=' in cmdline: + data64 = None + for tok in cmdline.split(): + if tok.startswith("network-config="): + data64 = tok.split("=", 1)[1] + if data64: + return util.load_yaml(_b64dgz(data64)) + + if 'ip=' not in cmdline: + return None + + if mac_addrs is None: + mac_addrs = {k: sys_netdev_info(k, 'address') + for k in get_devicelist()} + + return config_from_klibc_net_cfg(files=files, mac_addrs=mac_addrs) + + +# vi: ts=4 expandtab syntax=python diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py new file mode 100644 index 00000000..e32d2cdf --- /dev/null +++ b/cloudinit/net/network_state.py @@ -0,0 +1,446 @@ +# Copyright (C) 2013-2014 Canonical Ltd. +# +# Author: Ryan Harper <ryan.harper@canonical.com> +# +# Curtin is free software: you can redistribute it and/or modify it under +# the terms of the GNU Affero General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for +# more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with Curtin. If not, see <http://www.gnu.org/licenses/>. + +from cloudinit import log as logging +from cloudinit import util +from cloudinit.util import yaml_dumps as dump_config + +LOG = logging.getLogger(__name__) + +NETWORK_STATE_VERSION = 1 +NETWORK_STATE_REQUIRED_KEYS = { + 1: ['version', 'config', 'network_state'], +} + + +def from_state_file(state_file): + network_state = None + state = util.read_conf(state_file) + network_state = NetworkState() + network_state.load(state) + + return network_state + + +class NetworkState: + def __init__(self, version=NETWORK_STATE_VERSION, config=None): + self.version = version + self.config = config + self.network_state = { + 'interfaces': {}, + 'routes': [], + 'dns': { + 'nameservers': [], + 'search': [], + } + } + self.command_handlers = self.get_command_handlers() + + def get_command_handlers(self): + METHOD_PREFIX = 'handle_' + methods = filter(lambda x: callable(getattr(self, x)) and + x.startswith(METHOD_PREFIX), dir(self)) + handlers = {} + for m in methods: + key = m.replace(METHOD_PREFIX, '') + handlers[key] = getattr(self, m) + + return handlers + + def dump(self): + state = { + 'version': self.version, + 'config': self.config, + 'network_state': self.network_state, + } + return dump_config(state) + + def load(self, state): + if 'version' not in state: + LOG.error('Invalid state, missing version field') + raise Exception('Invalid state, missing version field') + + required_keys = NETWORK_STATE_REQUIRED_KEYS[state['version']] + if not self.valid_command(state, required_keys): + msg = 'Invalid state, missing keys: {}'.format(required_keys) + LOG.error(msg) + raise Exception(msg) + + # v1 - direct attr mapping, except version + for key in [k for k in required_keys if k not in ['version']]: + setattr(self, key, state[key]) + self.command_handlers = self.get_command_handlers() + + def dump_network_state(self): + return dump_config(self.network_state) + + def parse_config(self): + # rebuild network state + for command in self.config: + handler = self.command_handlers.get(command['type']) + handler(command) + + def valid_command(self, command, required_keys): + if not required_keys: + return False + + found_keys = [key for key in command.keys() if key in required_keys] + return len(found_keys) == len(required_keys) + + def handle_physical(self, command): + ''' + command = { + 'type': 'physical', + 'mac_address': 'c0:d6:9f:2c:e8:80', + 'name': 'eth0', + 'subnets': [ + {'type': 'dhcp4'} + ] + } + ''' + required_keys = [ + 'name', + ] + if not self.valid_command(command, required_keys): + LOG.warn('Skipping Invalid command: {}'.format(command)) + LOG.debug(self.dump_network_state()) + return + + interfaces = self.network_state.get('interfaces') + iface = interfaces.get(command['name'], {}) + for param, val in command.get('params', {}).items(): + iface.update({param: val}) + + # convert subnet ipv6 netmask to cidr as needed + subnets = command.get('subnets') + if subnets: + for subnet in subnets: + if subnet['type'] == 'static': + if 'netmask' in subnet and ':' in subnet['address']: + subnet['netmask'] = mask2cidr(subnet['netmask']) + for route in subnet.get('routes', []): + if 'netmask' in route: + route['netmask'] = mask2cidr(route['netmask']) + iface.update({ + 'name': command.get('name'), + 'type': command.get('type'), + 'mac_address': command.get('mac_address'), + 'inet': 'inet', + 'mode': 'manual', + 'mtu': command.get('mtu'), + 'address': None, + 'gateway': None, + 'subnets': subnets, + }) + self.network_state['interfaces'].update({command.get('name'): iface}) + self.dump_network_state() + + def handle_vlan(self, command): + ''' + auto eth0.222 + iface eth0.222 inet static + address 10.10.10.1 + netmask 255.255.255.0 + hwaddress ether BC:76:4E:06:96:B3 + vlan-raw-device eth0 + ''' + required_keys = [ + 'name', + 'vlan_link', + 'vlan_id', + ] + if not self.valid_command(command, required_keys): + print('Skipping Invalid command: {}'.format(command)) + print(self.dump_network_state()) + return + + interfaces = self.network_state.get('interfaces') + self.handle_physical(command) + iface = interfaces.get(command.get('name'), {}) + iface['vlan-raw-device'] = command.get('vlan_link') + iface['vlan_id'] = command.get('vlan_id') + interfaces.update({iface['name']: iface}) + + def handle_bond(self, command): + ''' + #/etc/network/interfaces + auto eth0 + iface eth0 inet manual + bond-master bond0 + bond-mode 802.3ad + + auto eth1 + iface eth1 inet manual + bond-master bond0 + bond-mode 802.3ad + + auto bond0 + iface bond0 inet static + address 192.168.0.10 + gateway 192.168.0.1 + netmask 255.255.255.0 + bond-slaves none + bond-mode 802.3ad + bond-miimon 100 + bond-downdelay 200 + bond-updelay 200 + bond-lacp-rate 4 + ''' + required_keys = [ + 'name', + 'bond_interfaces', + 'params', + ] + if not self.valid_command(command, required_keys): + print('Skipping Invalid command: {}'.format(command)) + print(self.dump_network_state()) + return + + self.handle_physical(command) + interfaces = self.network_state.get('interfaces') + iface = interfaces.get(command.get('name'), {}) + for param, val in command.get('params').items(): + iface.update({param: val}) + iface.update({'bond-slaves': 'none'}) + self.network_state['interfaces'].update({iface['name']: iface}) + + # handle bond slaves + for ifname in command.get('bond_interfaces'): + if ifname not in interfaces: + cmd = { + 'name': ifname, + 'type': 'bond', + } + # inject placeholder + self.handle_physical(cmd) + + interfaces = self.network_state.get('interfaces') + bond_if = interfaces.get(ifname) + bond_if['bond-master'] = command.get('name') + # copy in bond config into slave + for param, val in command.get('params').items(): + bond_if.update({param: val}) + self.network_state['interfaces'].update({ifname: bond_if}) + + def handle_bridge(self, command): + ''' + auto br0 + iface br0 inet static + address 10.10.10.1 + netmask 255.255.255.0 + bridge_ports eth0 eth1 + bridge_stp off + bridge_fd 0 + bridge_maxwait 0 + + bridge_params = [ + "bridge_ports", + "bridge_ageing", + "bridge_bridgeprio", + "bridge_fd", + "bridge_gcint", + "bridge_hello", + "bridge_hw", + "bridge_maxage", + "bridge_maxwait", + "bridge_pathcost", + "bridge_portprio", + "bridge_stp", + "bridge_waitport", + ] + ''' + required_keys = [ + 'name', + 'bridge_interfaces', + 'params', + ] + if not self.valid_command(command, required_keys): + print('Skipping Invalid command: {}'.format(command)) + print(self.dump_network_state()) + return + + # find one of the bridge port ifaces to get mac_addr + # handle bridge_slaves + interfaces = self.network_state.get('interfaces') + for ifname in command.get('bridge_interfaces'): + if ifname in interfaces: + continue + + cmd = { + 'name': ifname, + } + # inject placeholder + self.handle_physical(cmd) + + interfaces = self.network_state.get('interfaces') + self.handle_physical(command) + iface = interfaces.get(command.get('name'), {}) + iface['bridge_ports'] = command['bridge_interfaces'] + for param, val in command.get('params').items(): + iface.update({param: val}) + + interfaces.update({iface['name']: iface}) + + def handle_nameserver(self, command): + required_keys = [ + 'address', + ] + if not self.valid_command(command, required_keys): + print('Skipping Invalid command: {}'.format(command)) + print(self.dump_network_state()) + return + + dns = self.network_state.get('dns') + if 'address' in command: + addrs = command['address'] + if not type(addrs) == list: + addrs = [addrs] + for addr in addrs: + dns['nameservers'].append(addr) + if 'search' in command: + paths = command['search'] + if not isinstance(paths, list): + paths = [paths] + for path in paths: + dns['search'].append(path) + + def handle_route(self, command): + required_keys = [ + 'destination', + ] + if not self.valid_command(command, required_keys): + print('Skipping Invalid command: {}'.format(command)) + print(self.dump_network_state()) + return + + routes = self.network_state.get('routes') + network, cidr = command['destination'].split("/") + netmask = cidr2mask(int(cidr)) + route = { + 'network': network, + 'netmask': netmask, + 'gateway': command.get('gateway'), + 'metric': command.get('metric'), + } + routes.append(route) + + +def cidr2mask(cidr): + mask = [0, 0, 0, 0] + for i in list(range(0, cidr)): + idx = int(i / 8) + mask[idx] = mask[idx] + (1 << (7 - i % 8)) + return ".".join([str(x) for x in mask]) + + +def ipv4mask2cidr(mask): + if '.' not in mask: + return mask + return sum([bin(int(x)).count('1') for x in mask.split('.')]) + + +def ipv6mask2cidr(mask): + if ':' not in mask: + return mask + + bitCount = [0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00, + 0xff00, 0xff80, 0xffc0, 0xffe0, 0xfff0, 0xfff8, 0xfffc, + 0xfffe, 0xffff] + cidr = 0 + for word in mask.split(':'): + if not word or int(word, 16) == 0: + break + cidr += bitCount.index(int(word, 16)) + + return cidr + + +def mask2cidr(mask): + if ':' in mask: + return ipv6mask2cidr(mask) + elif '.' in mask: + return ipv4mask2cidr(mask) + else: + return mask + + +if __name__ == '__main__': + import sys + import random + from cloudinit import net + + def load_config(nc): + version = nc.get('version') + config = nc.get('config') + return (version, config) + + def test_parse(network_config): + (version, config) = load_config(network_config) + ns1 = NetworkState(version=version, config=config) + ns1.parse_config() + random.shuffle(config) + ns2 = NetworkState(version=version, config=config) + ns2.parse_config() + print("----NS1-----") + print(ns1.dump_network_state()) + print() + print("----NS2-----") + print(ns2.dump_network_state()) + print("NS1 == NS2 ?=> {}".format( + ns1.network_state == ns2.network_state)) + eni = net.render_interfaces(ns2.network_state) + print(eni) + udev_rules = net.render_persistent_net(ns2.network_state) + print(udev_rules) + + def test_dump_and_load(network_config): + print("Loading network_config into NetworkState") + (version, config) = load_config(network_config) + ns1 = NetworkState(version=version, config=config) + ns1.parse_config() + print("Dumping state to file") + ns1_dump = ns1.dump() + ns1_state = "/tmp/ns1.state" + with open(ns1_state, "w+") as f: + f.write(ns1_dump) + + print("Loading state from file") + ns2 = from_state_file(ns1_state) + print("NS1 == NS2 ?=> {}".format( + ns1.network_state == ns2.network_state)) + + def test_output(network_config): + (version, config) = load_config(network_config) + ns1 = NetworkState(version=version, config=config) + ns1.parse_config() + random.shuffle(config) + ns2 = NetworkState(version=version, config=config) + ns2.parse_config() + print("NS1 == NS2 ?=> {}".format( + ns1.network_state == ns2.network_state)) + eni_1 = net.render_interfaces(ns1.network_state) + eni_2 = net.render_interfaces(ns2.network_state) + print(eni_1) + print(eni_2) + print("eni_1 == eni_2 ?=> {}".format( + eni_1 == eni_2)) + + y = util.read_conf(sys.argv[1]) + network_config = y.get('network') + test_parse(network_config) + test_dump_and_load(network_config) + test_output(network_config) diff --git a/cloudinit/net/udev.py b/cloudinit/net/udev.py new file mode 100644 index 00000000..6435ace0 --- /dev/null +++ b/cloudinit/net/udev.py @@ -0,0 +1,54 @@ +# Copyright (C) 2015 Canonical Ltd. +# +# Author: Ryan Harper <ryan.harper@canonical.com> +# +# Curtin is free software: you can redistribute it and/or modify it under +# the terms of the GNU Affero General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# Curtin is distributed in the hope that it will be useful, but WITHOUT ANY +# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for +# more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with Curtin. If not, see <http://www.gnu.org/licenses/>. + + +def compose_udev_equality(key, value): + """Return a udev comparison clause, like `ACTION=="add"`.""" + assert key == key.upper() + return '%s=="%s"' % (key, value) + + +def compose_udev_attr_equality(attribute, value): + """Return a udev attribute comparison clause, like `ATTR{type}=="1"`.""" + assert attribute == attribute.lower() + return 'ATTR{%s}=="%s"' % (attribute, value) + + +def compose_udev_setting(key, value): + """Return a udev assignment clause, like `NAME="eth0"`.""" + assert key == key.upper() + return '%s="%s"' % (key, value) + + +def generate_udev_rule(interface, mac): + """Return a udev rule to set the name of network interface with `mac`. + + The rule ends up as a single line looking something like: + + SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", + ATTR{address}="ff:ee:dd:cc:bb:aa", NAME="eth0" + """ + rule = ', '.join([ + compose_udev_equality('SUBSYSTEM', 'net'), + compose_udev_equality('ACTION', 'add'), + compose_udev_equality('DRIVERS', '?*'), + compose_udev_attr_equality('address', mac), + compose_udev_setting('NAME', interface), + ]) + return '%s\n' % rule + +# vi: ts=4 expandtab syntax=python diff --git a/cloudinit/registry.py b/cloudinit/registry.py new file mode 100644 index 00000000..04368ddf --- /dev/null +++ b/cloudinit/registry.py @@ -0,0 +1,37 @@ +# Copyright 2015 Canonical Ltd. +# This file is part of cloud-init. See LICENCE file for license information. +# +# vi: ts=4 expandtab +import copy + + +class DictRegistry(object): + """A simple registry for a mapping of objects.""" + + def __init__(self): + self.reset() + + def reset(self): + self._items = {} + + def register_item(self, key, item): + """Add item to the registry.""" + if key in self._items: + raise ValueError( + 'Item already registered with key {0}'.format(key)) + self._items[key] = item + + def unregister_item(self, key, force=True): + """Remove item from the registry.""" + if key in self._items: + del self._items[key] + elif not force: + raise KeyError("%s: key not present to unregister" % key) + + @property + def registered_items(self): + """All the items that have been registered. + + This cannot be used to modify the contents of the registry. + """ + return copy.copy(self._items) diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py new file mode 100644 index 00000000..6b41ae61 --- /dev/null +++ b/cloudinit/reporting/__init__.py @@ -0,0 +1,42 @@ +# Copyright 2015 Canonical Ltd. +# This file is part of cloud-init. See LICENCE file for license information. +# +""" +cloud-init reporting framework + +The reporting framework is intended to allow all parts of cloud-init to +report events in a structured manner. +""" + +from ..registry import DictRegistry +from .handlers import available_handlers + +DEFAULT_CONFIG = { + 'logging': {'type': 'log'}, +} + + +def update_configuration(config): + """Update the instanciated_handler_registry. + + :param config: + The dictionary containing changes to apply. If a key is given + with a False-ish value, the registered handler matching that name + will be unregistered. + """ + for handler_name, handler_config in config.items(): + if not handler_config: + instantiated_handler_registry.unregister_item( + handler_name, force=True) + continue + handler_config = handler_config.copy() + cls = available_handlers.registered_items[handler_config.pop('type')] + instantiated_handler_registry.unregister_item(handler_name) + instance = cls(**handler_config) + instantiated_handler_registry.register_item(handler_name, instance) + + +instantiated_handler_registry = DictRegistry() +update_configuration(DEFAULT_CONFIG) + +# vi: ts=4 expandtab diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py new file mode 100644 index 00000000..2f767f64 --- /dev/null +++ b/cloudinit/reporting/events.py @@ -0,0 +1,246 @@ +# Copyright 2015 Canonical Ltd. +# This file is part of cloud-init. See LICENCE file for license information. +# +""" +events for reporting. + +The events here are designed to be used with reporting. +They can be published to registered handlers with report_event. +""" +import base64 +import os.path +import time + +from . import instantiated_handler_registry + +FINISH_EVENT_TYPE = 'finish' +START_EVENT_TYPE = 'start' + +DEFAULT_EVENT_ORIGIN = 'cloudinit' + + +class _nameset(set): + def __getattr__(self, name): + if name in self: + return name + raise AttributeError("%s not a valid value" % name) + + +status = _nameset(("SUCCESS", "WARN", "FAIL")) + + +class ReportingEvent(object): + """Encapsulation of event formatting.""" + + def __init__(self, event_type, name, description, + origin=DEFAULT_EVENT_ORIGIN, timestamp=time.time()): + self.event_type = event_type + self.name = name + self.description = description + self.origin = origin + self.timestamp = timestamp + + def as_string(self): + """The event represented as a string.""" + return '{0}: {1}: {2}'.format( + self.event_type, self.name, self.description) + + def as_dict(self): + """The event represented as a dictionary.""" + return {'name': self.name, 'description': self.description, + 'event_type': self.event_type, 'origin': self.origin, + 'timestamp': self.timestamp} + + +class FinishReportingEvent(ReportingEvent): + + def __init__(self, name, description, result=status.SUCCESS, + post_files=None): + super(FinishReportingEvent, self).__init__( + FINISH_EVENT_TYPE, name, description) + self.result = result + if post_files is None: + post_files = [] + self.post_files = post_files + if result not in status: + raise ValueError("Invalid result: %s" % result) + + def as_string(self): + return '{0}: {1}: {2}: {3}'.format( + self.event_type, self.name, self.result, self.description) + + def as_dict(self): + """The event represented as json friendly.""" + data = super(FinishReportingEvent, self).as_dict() + data['result'] = self.result + if self.post_files: + data['files'] = _collect_file_info(self.post_files) + return data + + +def report_event(event): + """Report an event to all registered event handlers. + + This should generally be called via one of the other functions in + the reporting module. + + :param event_type: + The type of the event; this should be a constant from the + reporting module. + """ + for _, handler in instantiated_handler_registry.registered_items.items(): + handler.publish_event(event) + + +def report_finish_event(event_name, event_description, + result=status.SUCCESS, post_files=None): + """Report a "finish" event. + + See :py:func:`.report_event` for parameter details. + """ + event = FinishReportingEvent(event_name, event_description, result, + post_files=post_files) + return report_event(event) + + +def report_start_event(event_name, event_description): + """Report a "start" event. + + :param event_name: + The name of the event; this should be a topic which events would + share (e.g. it will be the same for start and finish events). + + :param event_description: + A human-readable description of the event that has occurred. + """ + event = ReportingEvent(START_EVENT_TYPE, event_name, event_description) + return report_event(event) + + +class ReportEventStack(object): + """Context Manager for using :py:func:`report_event` + + This enables calling :py:func:`report_start_event` and + :py:func:`report_finish_event` through a context manager. + + :param name: + the name of the event + + :param description: + the event's description, passed on to :py:func:`report_start_event` + + :param message: + the description to use for the finish event. defaults to + :param:description. + + :param parent: + :type parent: :py:class:ReportEventStack or None + The parent of this event. The parent is populated with + results of all its children. The name used in reporting + is <parent.name>/<name> + + :param reporting_enabled: + Indicates if reporting events should be generated. + If not provided, defaults to the parent's value, or True if no parent + is provided. + + :param result_on_exception: + The result value to set if an exception is caught. default + value is FAIL. + """ + def __init__(self, name, description, message=None, parent=None, + reporting_enabled=None, result_on_exception=status.FAIL, + post_files=None): + self.parent = parent + self.name = name + self.description = description + self.message = message + self.result_on_exception = result_on_exception + self.result = status.SUCCESS + if post_files is None: + post_files = [] + self.post_files = post_files + + # use parents reporting value if not provided + if reporting_enabled is None: + if parent: + reporting_enabled = parent.reporting_enabled + else: + reporting_enabled = True + self.reporting_enabled = reporting_enabled + + if parent: + self.fullname = '/'.join((parent.fullname, name,)) + else: + self.fullname = self.name + self.children = {} + + def __repr__(self): + return ("ReportEventStack(%s, %s, reporting_enabled=%s)" % + (self.name, self.description, self.reporting_enabled)) + + def __enter__(self): + self.result = status.SUCCESS + if self.reporting_enabled: + report_start_event(self.fullname, self.description) + if self.parent: + self.parent.children[self.name] = (None, None) + return self + + def _childrens_finish_info(self): + for cand_result in (status.FAIL, status.WARN): + for name, (value, msg) in self.children.items(): + if value == cand_result: + return (value, self.message) + return (self.result, self.message) + + @property + def result(self): + return self._result + + @result.setter + def result(self, value): + if value not in status: + raise ValueError("'%s' not a valid result" % value) + self._result = value + + @property + def message(self): + if self._message is not None: + return self._message + return self.description + + @message.setter + def message(self, value): + self._message = value + + def _finish_info(self, exc): + # return tuple of description, and value + if exc: + return (self.result_on_exception, self.message) + return self._childrens_finish_info() + + def __exit__(self, exc_type, exc_value, traceback): + (result, msg) = self._finish_info(exc_value) + if self.parent: + self.parent.children[self.name] = (result, msg) + if self.reporting_enabled: + report_finish_event(self.fullname, msg, result, + post_files=self.post_files) + + +def _collect_file_info(files): + if not files: + return None + ret = [] + for fname in files: + if not os.path.isfile(fname): + content = None + else: + with open(fname, "rb") as fp: + content = base64.b64encode(fp.read()).decode() + ret.append({'path': fname, 'content': content, + 'encoding': 'base64'}) + return ret + +# vi: ts=4 expandtab syntax=python diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py new file mode 100644 index 00000000..3212d173 --- /dev/null +++ b/cloudinit/reporting/handlers.py @@ -0,0 +1,91 @@ +# vi: ts=4 expandtab + +import abc +import json +import six + +from ..registry import DictRegistry +from .. import (url_helper, util) +from .. import log as logging + + +LOG = logging.getLogger(__name__) + + +@six.add_metaclass(abc.ABCMeta) +class ReportingHandler(object): + """Base class for report handlers. + + Implement :meth:`~publish_event` for controlling what + the handler does with an event. + """ + + @abc.abstractmethod + def publish_event(self, event): + """Publish an event.""" + + +class LogHandler(ReportingHandler): + """Publishes events to the cloud-init log at the ``DEBUG`` log level.""" + + def __init__(self, level="DEBUG"): + super(LogHandler, self).__init__() + if isinstance(level, int): + pass + else: + input_level = level + try: + level = getattr(logging, level.upper()) + except: + LOG.warn("invalid level '%s', using WARN", input_level) + level = logging.WARN + self.level = level + + def publish_event(self, event): + logger = logging.getLogger( + '.'.join(['cloudinit', 'reporting', event.event_type, event.name])) + logger.log(self.level, event.as_string()) + + +class PrintHandler(ReportingHandler): + """Print the event as a string.""" + + def publish_event(self, event): + print(event.as_string()) + + +class WebHookHandler(ReportingHandler): + def __init__(self, endpoint, consumer_key=None, token_key=None, + token_secret=None, consumer_secret=None, timeout=None, + retries=None): + super(WebHookHandler, self).__init__() + + if any([consumer_key, token_key, token_secret, consumer_secret]): + self.oauth_helper = url_helper.OauthUrlHelper( + consumer_key=consumer_key, token_key=token_key, + token_secret=token_secret, consumer_secret=consumer_secret) + else: + self.oauth_helper = None + self.endpoint = endpoint + self.timeout = timeout + self.retries = retries + self.ssl_details = util.fetch_ssl_details() + + def publish_event(self, event): + if self.oauth_helper: + readurl = self.oauth_helper.readurl + else: + readurl = url_helper.readurl + try: + return readurl( + self.endpoint, data=json.dumps(event.as_dict()), + timeout=self.timeout, + retries=self.retries, ssl_details=self.ssl_details) + except: + LOG.warn("failed posting event: %s" % event.as_string()) + + +available_handlers = DictRegistry() +available_handlers.register_item('log', LogHandler) +available_handlers.register_item('print', PrintHandler) +available_handlers.register_item('webhook', WebHookHandler) diff --git a/cloudinit/settings.py b/cloudinit/settings.py index b61e5613..8c258ea1 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -42,6 +42,7 @@ CFG_BUILTIN = { 'CloudSigma', 'CloudStack', 'SmartOS', + 'Bigstep', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index fb528ae5..cd61df31 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -41,7 +41,7 @@ CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info' # Shell command lists CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy'] -CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5'] +CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--timeout=5'] META_DATA_NOT_SUPPORTED = { 'block-device-mapping': {}, @@ -284,7 +284,7 @@ class DataSourceAltCloud(sources.DataSource): # In the future 'dsmode' like behavior can be added to offer user # the ability to run before networking. datasources = [ - (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index ff950deb..698f4cac 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -31,17 +31,16 @@ from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS from cloudinit import sources from cloudinit import util -from cloudinit.sources.helpers.azure import ( - get_metadata_from_fabric, iid_from_shared_config_content) +from cloudinit.sources.helpers.azure import get_metadata_from_fabric LOG = logging.getLogger(__name__) DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} AGENT_START = ['service', 'walinuxagent', 'start'] -BOUNCE_COMMAND = ['sh', '-xc', +BOUNCE_COMMAND = [ + 'sh', '-xc', "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"] -DATA_DIR_CLEAN_LIST = ['SharedConfig.xml'] BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START, @@ -93,9 +92,9 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): """ policy = cfg['hostname_bounce']['policy'] previous_hostname = get_hostname(hostname_command) - if (not util.is_true(cfg.get('set_hostname')) - or util.is_false(policy) - or (previous_hostname == temp_hostname and policy != 'force')): + if (not util.is_true(cfg.get('set_hostname')) or + util.is_false(policy) or + (previous_hostname == temp_hostname and policy != 'force')): yield None return set_hostname(temp_hostname, hostname_command) @@ -125,8 +124,8 @@ class DataSourceAzureNet(sources.DataSource): with temporary_hostname(temp_hostname, self.ds_cfg, hostname_command=hostname_command) \ as previous_hostname: - if (previous_hostname is not None - and util.is_true(self.ds_cfg.get('set_hostname'))): + if (previous_hostname is not None and + util.is_true(self.ds_cfg.get('set_hostname'))): cfg = self.ds_cfg['hostname_bounce'] try: perform_hostname_bounce(hostname=temp_hostname, @@ -144,29 +143,27 @@ class DataSourceAzureNet(sources.DataSource): self.ds_cfg['agent_command']) ddir = self.ds_cfg['data_dir'] - shcfgxml = os.path.join(ddir, "SharedConfig.xml") - wait_for = [shcfgxml] fp_files = [] + key_value = None for pk in self.cfg.get('_pubkeys', []): - bname = str(pk['fingerprint'] + ".crt") - fp_files += [os.path.join(ddir, bname)] + if pk.get('value', None): + key_value = pk['value'] + LOG.debug("ssh authentication: using value from fabric") + else: + bname = str(pk['fingerprint'] + ".crt") + fp_files += [os.path.join(ddir, bname)] + LOG.debug("ssh authentication: " + "using fingerprint from fabirc") missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", func=wait_for_files, - args=(wait_for + fp_files,)) + args=(fp_files,)) if len(missing): LOG.warn("Did not find files, but going on: %s", missing) metadata = {} - if shcfgxml in missing: - LOG.warn("SharedConfig.xml missing, using static instance-id") - else: - try: - metadata['instance-id'] = iid_from_shared_config(shcfgxml) - except ValueError as e: - LOG.warn("failed to get instance id in %s: %s", shcfgxml, e) - metadata['public-keys'] = pubkeys_from_crt_files(fp_files) + metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) return metadata def get_data(self): @@ -222,21 +219,6 @@ class DataSourceAzureNet(sources.DataSource): user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) - if found != ddir: - cached_ovfenv = util.load_file( - os.path.join(ddir, 'ovf-env.xml'), quiet=True, decode=False) - if cached_ovfenv != files['ovf-env.xml']: - # source was not walinux-agent's datadir, so we have to clean - # up so 'wait_for_files' doesn't return early due to stale data - cleaned = [] - for f in [os.path.join(ddir, f) for f in DATA_DIR_CLEAN_LIST]: - if os.path.exists(f): - util.del_file(f) - cleaned.append(f) - if cleaned: - LOG.info("removed stale file(s) in '%s': %s", - ddir, str(cleaned)) - # walinux agent writes files world readable, but expects # the directory to be protected. write_files(ddir, files, dirmode=0o700) @@ -252,6 +234,7 @@ class DataSourceAzureNet(sources.DataSource): " on Azure.", exc_info=True) return False + self.metadata['instance-id'] = util.read_dmi_data('system-uuid') self.metadata.update(fabric_data) found_ephemeral = find_fabric_formatted_ephemeral_disk() @@ -271,6 +254,10 @@ class DataSourceAzureNet(sources.DataSource): def get_config_obj(self): return self.cfg + def check_instance_id(self, sys_cfg): + # quickly (local check only) if self.instance_id is still valid + return sources.instance_id_matches_system_uuid(self.get_instance_id()) + def count_files(mp): return len(fnmatch.filter(os.listdir(mp), '*[!cdrom]*')) @@ -497,7 +484,8 @@ def load_azure_ovf_pubkeys(sshnode): for pk_node in pubkeys: if not pk_node.hasChildNodes(): continue - cur = {'fingerprint': "", 'path': ""} + + cur = {'fingerprint': "", 'path': "", 'value': ""} for child in pk_node.childNodes: if child.nodeType == text_node or not child.localName: continue @@ -524,7 +512,7 @@ def read_azure_ovf(contents): raise BrokenAzureDataSource("invalid xml: %s" % e) results = find_child(dom.documentElement, - lambda n: n.localName == "ProvisioningSection") + lambda n: n.localName == "ProvisioningSection") if len(results) == 0: raise NonAzureDataSource("No ProvisioningSection") @@ -534,7 +522,8 @@ def read_azure_ovf(contents): provSection = results[0] lpcs_nodes = find_child(provSection, - lambda n: n.localName == "LinuxProvisioningConfigurationSet") + lambda n: + n.localName == "LinuxProvisioningConfigurationSet") if len(results) == 0: raise NonAzureDataSource("No LinuxProvisioningConfigurationSet") @@ -641,12 +630,6 @@ def load_azure_ds_dir(source_dir): return (md, ud, cfg, {'ovf-env.xml': contents}) -def iid_from_shared_config(path): - with open(path, "rb") as fp: - content = fp.read() - return iid_from_shared_config_content(content) - - class BrokenAzureDataSource(Exception): pass @@ -657,7 +640,7 @@ class NonAzureDataSource(Exception): # Used to match classes to dependencies datasources = [ - (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py new file mode 100644 index 00000000..b5ee4129 --- /dev/null +++ b/cloudinit/sources/DataSourceBigstep.py @@ -0,0 +1,57 @@ +# +# Copyright (C) 2015-2016 Bigstep Cloud Ltd. +# +# Author: Alexandru Sirbu <alexandru.sirbu@bigstep.com> +# + +import json +import errno + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util +from cloudinit import url_helper + +LOG = logging.getLogger(__name__) + + +class DataSourceBigstep(sources.DataSource): + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.metadata = {} + self.vendordata_raw = "" + self.userdata_raw = "" + + def get_data(self, apply_filter=False): + url = get_url_from_file() + if url is None: + return False + response = url_helper.readurl(url) + decoded = json.loads(response.contents) + self.metadata = decoded["metadata"] + self.vendordata_raw = decoded["vendordata_raw"] + self.userdata_raw = decoded["userdata_raw"] + return True + + +def get_url_from_file(): + try: + content = util.load_file("/var/lib/cloud/data/seed/bigstep/url") + except IOError as e: + # If the file doesn't exist, then the server probably isn't a Bigstep + # instance; otherwise, another problem exists which needs investigation + if e.errno == errno.ENOENT: + return None + else: + raise + return content + +# Used to match classes to dependencies +datasources = [ + (DataSourceBigstep, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index d0cac5bb..455a4652 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -41,10 +41,12 @@ class CloudStackPasswordServerClient(object): """ Implements password fetching from the CloudStack password server. - http://cloudstack-administration.readthedocs.org/en/latest/templates.html#adding-password-management-to-your-templates + http://cloudstack-administration.readthedocs.org/ + en/latest/templates.html#adding-password-management-to-your-templates has documentation about the system. This implementation is following that found at - https://github.com/shankerbalan/cloudstack-scripts/blob/master/cloud-set-guest-password-debian + https://github.com/shankerbalan/cloudstack-scripts/ + blob/master/cloud-set-guest-password-debian """ def __init__(self, virtual_router_address): @@ -87,8 +89,6 @@ class DataSourceCloudStack(sources.DataSource): def _get_url_settings(self): mcfg = self.ds_cfg - if not mcfg: - mcfg = {} max_wait = 120 try: max_wait = int(mcfg.get("max_wait", max_wait)) @@ -107,10 +107,6 @@ class DataSourceCloudStack(sources.DataSource): return (max_wait, timeout) def wait_for_metadata_service(self): - mcfg = self.ds_cfg - if not mcfg: - mcfg = {} - (max_wait, timeout) = self._get_url_settings() urls = [uhelp.combine_url(self.metadata_address, diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index eb474079..3fa62ef3 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -18,6 +18,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +import copy import os from cloudinit import log as logging @@ -39,7 +40,7 @@ FS_TYPES = ('vfat', 'iso9660') LABEL_TYPES = ('config-2',) POSSIBLE_MOUNTS = ('sr', 'cd') OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS - for i in range(0, 2))) + for i in range(0, 2))) class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): @@ -50,6 +51,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): self.seed_dir = os.path.join(paths.seed_dir, 'config_drive') self.version = None self.ec2_metadata = None + self._network_config = None + self.network_json = None self.files = {} def __str__(self): @@ -144,8 +147,25 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): LOG.warn("Invalid content in vendor-data: %s", e) self.vendordata_raw = None + try: + self.network_json = results.get('networkdata') + except ValueError as e: + LOG.warn("Invalid content in network-data: %s", e) + self.network_json = None + return True + def check_instance_id(self): + # quickly (local check only) if self.instance_id is still valid + return sources.instance_id_matches_system_uuid(self.get_instance_id()) + + @property + def network_config(self): + if self._network_config is None: + if self.network_json is not None: + self._network_config = convert_network_data(self.network_json) + return self._network_config + class DataSourceConfigDriveNet(DataSourceConfigDrive): def __init__(self, sys_cfg, distro, paths): @@ -283,3 +303,122 @@ datasources = [ # Return a list of data sources that match this set of dependencies def get_datasource_list(depends): return sources.list_from_depends(depends, datasources) + + +# Convert OpenStack ConfigDrive NetworkData json to network_config yaml +def convert_network_data(network_json=None): + """Return a dictionary of network_config by parsing provided + OpenStack ConfigDrive NetworkData json format + + OpenStack network_data.json provides a 3 element dictionary + - "links" (links are network devices, physical or virtual) + - "networks" (networks are ip network configurations for one or more + links) + - services (non-ip services, like dns) + + networks and links are combined via network items referencing specific + links via a 'link_id' which maps to a links 'id' field. + + To convert this format to network_config yaml, we first iterate over the + links and then walk the network list to determine if any of the networks + utilize the current link; if so we generate a subnet entry for the device + + We also need to map network_data.json fields to network_config fields. For + example, the network_data links 'id' field is equivalent to network_config + 'name' field for devices. We apply more of this mapping to the various + link types that we encounter. + + There are additional fields that are populated in the network_data.json + from OpenStack that are not relevant to network_config yaml, so we + enumerate a dictionary of valid keys for network_yaml and apply filtering + to drop these superflous keys from the network_config yaml. + """ + if network_json is None: + return None + + # dict of network_config key for filtering network_json + valid_keys = { + 'physical': [ + 'name', + 'type', + 'mac_address', + 'subnets', + 'params', + ], + 'subnet': [ + 'type', + 'address', + 'netmask', + 'broadcast', + 'metric', + 'gateway', + 'pointopoint', + 'mtu', + 'scope', + 'dns_nameservers', + 'dns_search', + 'routes', + ], + } + + links = network_json.get('links', []) + networks = network_json.get('networks', []) + services = network_json.get('services', []) + + config = [] + for link in links: + subnets = [] + cfg = {k: v for k, v in link.items() + if k in valid_keys['physical']} + cfg.update({'name': link['id']}) + for network in [net for net in networks + if net['link'] == link['id']]: + subnet = {k: v for k, v in network.items() + if k in valid_keys['subnet']} + if 'dhcp' in network['type']: + t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4' + subnet.update({ + 'type': t, + }) + else: + subnet.update({ + 'type': 'static', + 'address': network.get('ip_address'), + }) + subnets.append(subnet) + cfg.update({'subnets': subnets}) + if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']: + cfg.update({ + 'type': 'physical', + 'mac_address': link['ethernet_mac_address']}) + elif link['type'] in ['bond']: + params = {} + for k, v in link.items(): + if k == 'bond_links': + continue + elif k.startswith('bond'): + params.update({k: v}) + cfg.update({ + 'bond_interfaces': copy.deepcopy(link['bond_links']), + 'params': params, + }) + elif link['type'] in ['vlan']: + cfg.update({ + 'name': "%s.%s" % (link['vlan_link'], + link['vlan_id']), + 'vlan_link': link['vlan_link'], + 'vlan_id': link['vlan_id'], + 'mac_address': link['vlan_mac_address'], + }) + else: + raise ValueError( + 'Unknown network_data link type: %s' % link['type']) + + config.append(cfg) + + for service in services: + cfg = service + cfg.update({'type': 'nameserver'}) + config.append(cfg) + + return {'version': 1, 'config': config} diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 5d47564d..12e863d2 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -101,8 +101,8 @@ class DataSourceDigitalOcean(sources.DataSource): # Used to match classes to dependencies datasources = [ - (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), - ] + (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] # Return a list of data sources that match this set of dependencies diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 0032d06c..6fe2a0bb 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -61,12 +61,12 @@ class DataSourceEc2(sources.DataSource): if not self.wait_for_metadata_service(): return False start_time = time.time() - self.userdata_raw = ec2.get_instance_userdata(self.api_ver, - self.metadata_address) + self.userdata_raw = \ + ec2.get_instance_userdata(self.api_ver, self.metadata_address) self.metadata = ec2.get_instance_metadata(self.api_ver, self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", - int(time.time() - start_time)) + int(time.time() - start_time)) return True except Exception: util.logexc(LOG, "Failed reading from metadata address %s", @@ -84,8 +84,6 @@ class DataSourceEc2(sources.DataSource): def _get_url_settings(self): mcfg = self.ds_cfg - if not mcfg: - mcfg = {} max_wait = 120 try: max_wait = int(mcfg.get("max_wait", max_wait)) @@ -102,8 +100,6 @@ class DataSourceEc2(sources.DataSource): def wait_for_metadata_service(self): mcfg = self.ds_cfg - if not mcfg: - mcfg = {} (max_wait, timeout) = self._get_url_settings() if max_wait <= 0: @@ -132,13 +128,13 @@ class DataSourceEc2(sources.DataSource): start_time = time.time() url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, - timeout=timeout, status_cb=LOG.warn) + timeout=timeout, status_cb=LOG.warn) if url: LOG.debug("Using metadata source: '%s'", url2base[url]) else: LOG.critical("Giving up on md from %s after %s seconds", - urls, int(time.time() - start_time)) + urls, int(time.time() - start_time)) self.metadata_address = url2base.get(url) return bool(url) @@ -206,7 +202,7 @@ class DataSourceEc2(sources.DataSource): # Used to match classes to dependencies datasources = [ - (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index c1a0eb61..d828f078 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -20,14 +20,10 @@ from __future__ import print_function -from email.utils import parsedate import errno -import oauthlib.oauth1 as oauth1 import os import time -from six.moves.urllib_request import Request, urlopen - from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper @@ -52,7 +48,20 @@ class DataSourceMAAS(sources.DataSource): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.base_url = None self.seed_dir = os.path.join(paths.seed_dir, 'maas') - self.oauth_clockskew = None + self.oauth_helper = self._get_helper() + + def _get_helper(self): + mcfg = self.ds_cfg + # If we are missing token_key, token_secret or consumer_key + # then just do non-authed requests + for required in ('token_key', 'token_secret', 'consumer_key'): + if required not in mcfg: + return url_helper.OauthUrlHelper() + + return url_helper.OauthUrlHelper( + consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'], + token_secret=mcfg['token_secret'], + consumer_secret=mcfg.get('consumer_secret')) def __str__(self): root = sources.DataSource.__str__(self) @@ -79,14 +88,18 @@ class DataSourceMAAS(sources.DataSource): return False try: + # doing this here actually has a side affect of + # getting oauth time-fix in place. As no where else would + # retry by default, so even if we could fix the timestamp + # we would not. if not self.wait_for_metadata_service(url): return False self.base_url = url - (userdata, metadata) = read_maas_seed_url(self.base_url, - self._md_headers, - paths=self.paths) + (userdata, metadata) = read_maas_seed_url( + self.base_url, read_file_or_url=self.oauth_helper.readurl, + paths=self.paths, retries=1) self.userdata_raw = userdata self.metadata = metadata return True @@ -94,31 +107,8 @@ class DataSourceMAAS(sources.DataSource): util.logexc(LOG, "Failed fetching metadata from url %s", url) return False - def _md_headers(self, url): - mcfg = self.ds_cfg - - # If we are missing token_key, token_secret or consumer_key - # then just do non-authed requests - for required in ('token_key', 'token_secret', 'consumer_key'): - if required not in mcfg: - return {} - - consumer_secret = mcfg.get('consumer_secret', "") - - timestamp = None - if self.oauth_clockskew: - timestamp = int(time.time()) + self.oauth_clockskew - - return oauth_headers(url=url, - consumer_key=mcfg['consumer_key'], - token_key=mcfg['token_key'], - token_secret=mcfg['token_secret'], - consumer_secret=consumer_secret, - timestamp=timestamp) - def wait_for_metadata_service(self, url): mcfg = self.ds_cfg - max_wait = 120 try: max_wait = int(mcfg.get("max_wait", max_wait)) @@ -138,10 +128,8 @@ class DataSourceMAAS(sources.DataSource): starttime = time.time() check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION) urls = [check_url] - url = url_helper.wait_for_url(urls=urls, max_wait=max_wait, - timeout=timeout, - exception_cb=self._except_cb, - headers_cb=self._md_headers) + url = self.oauth_helper.wait_for_url( + urls=urls, max_wait=max_wait, timeout=timeout) if url: LOG.debug("Using metadata source: '%s'", url) @@ -151,26 +139,6 @@ class DataSourceMAAS(sources.DataSource): return bool(url) - def _except_cb(self, msg, exception): - if not (isinstance(exception, url_helper.UrlError) and - (exception.code == 403 or exception.code == 401)): - return - - if 'date' not in exception.headers: - LOG.warn("Missing header 'date' in %s response", exception.code) - return - - date = exception.headers['date'] - try: - ret_time = time.mktime(parsedate(date)) - except Exception as e: - LOG.warn("Failed to convert datetime '%s': %s", date, e) - return - - self.oauth_clockskew = int(ret_time - time.time()) - LOG.warn("Setting oauth clockskew to %d", self.oauth_clockskew) - return - def read_maas_seed_dir(seed_d): """ @@ -196,12 +164,12 @@ def read_maas_seed_dir(seed_d): return check_seed_contents(md, seed_d) -def read_maas_seed_url(seed_url, header_cb=None, timeout=None, - version=MD_VERSION, paths=None): +def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, + version=MD_VERSION, paths=None, retries=None): """ Read the maas datasource at seed_url. - - header_cb is a method that should return a headers dictionary for - a given url + read_file_or_url is a method that should provide an interface + like util.read_file_or_url Expected format of seed_url is are the following files: * <seed_url>/<version>/meta-data/instance-id @@ -222,25 +190,21 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None, 'user-data': "%s/%s" % (base_url, 'user-data'), } + if read_file_or_url is None: + read_file_or_url = util.read_file_or_url + md = {} for name in file_order: url = files.get(name) - if not header_cb: - def _cb(url): - return {} - header_cb = _cb - if name == 'user-data': - retries = 0 + item_retries = 0 else: - retries = None + item_retries = retries try: ssl_details = util.fetch_ssl_details(paths) - resp = util.read_file_or_url(url, retries=retries, - headers_cb=header_cb, - timeout=timeout, - ssl_details=ssl_details) + resp = read_file_or_url(url, retries=item_retries, + timeout=timeout, ssl_details=ssl_details) if resp.ok(): if name in BINARY_FIELDS: md[name] = resp.contents @@ -280,24 +244,6 @@ def check_seed_contents(content, seed): return (userdata, md) -def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, - timestamp=None): - if timestamp: - timestamp = str(timestamp) - else: - timestamp = None - - client = oauth1.Client( - consumer_key, - client_secret=consumer_secret, - resource_owner_key=token_key, - resource_owner_secret=token_secret, - signature_method=oauth1.SIGNATURE_PLAINTEXT, - timestamp=timestamp) - uri, signed_headers, body = client.sign(url) - return signed_headers - - class MAASSeedDirNone(Exception): pass @@ -308,7 +254,7 @@ class MAASSeedDirMalformed(Exception): # Used to match classes to dependencies datasources = [ - (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] @@ -329,17 +275,18 @@ if __name__ == "__main__": parser = argparse.ArgumentParser(description='Interact with MAAS DS') parser.add_argument("--config", metavar="file", - help="specify DS config file", default=None) + help="specify DS config file", default=None) parser.add_argument("--ckey", metavar="key", - help="the consumer key to auth with", default=None) + help="the consumer key to auth with", default=None) parser.add_argument("--tkey", metavar="key", - help="the token key to auth with", default=None) + help="the token key to auth with", default=None) parser.add_argument("--csec", metavar="secret", - help="the consumer secret (likely '')", default="") + help="the consumer secret (likely '')", default="") parser.add_argument("--tsec", metavar="secret", - help="the token secret to auth with", default=None) + help="the token secret to auth with", default=None) parser.add_argument("--apiver", metavar="version", - help="the apiver to use ("" can be used)", default=MD_VERSION) + help="the apiver to use ("" can be used)", + default=MD_VERSION) subcmds = parser.add_subparsers(title="subcommands", dest="subcmd") subcmds.add_parser('crawl', help="crawl the datasource") @@ -351,7 +298,7 @@ if __name__ == "__main__": args = parser.parse_args() creds = {'consumer_key': args.ckey, 'token_key': args.tkey, - 'token_secret': args.tsec, 'consumer_secret': args.csec} + 'token_secret': args.tsec, 'consumer_secret': args.csec} if args.config: cfg = util.read_conf(args.config) @@ -361,47 +308,46 @@ if __name__ == "__main__": if key in cfg and creds[key] is None: creds[key] = cfg[key] - def geturl(url, headers_cb): - req = Request(url, data=None, headers=headers_cb(url)) - return urlopen(req).read() + oauth_helper = url_helper.OauthUrlHelper(**creds) + + def geturl(url): + # the retry is to ensure that oauth timestamp gets fixed + return oauth_helper.readurl(url, retries=1).contents - def printurl(url, headers_cb): - print("== %s ==\n%s\n" % (url, geturl(url, headers_cb))) + def printurl(url): + print("== %s ==\n%s\n" % (url, geturl(url).decode())) - def crawl(url, headers_cb=None): + def crawl(url): if url.endswith("/"): - for line in geturl(url, headers_cb).splitlines(): + for line in geturl(url).decode().splitlines(): if line.endswith("/"): - crawl("%s%s" % (url, line), headers_cb) + crawl("%s%s" % (url, line)) + elif line == "meta-data": + # meta-data is a dir, it *should* end in a / + crawl("%s%s" % (url, "meta-data/")) else: - printurl("%s%s" % (url, line), headers_cb) + printurl("%s%s" % (url, line)) else: - printurl(url, headers_cb) - - def my_headers(url): - headers = {} - if creds.get('consumer_key', None) is not None: - headers = oauth_headers(url, **creds) - return headers + printurl(url) if args.subcmd == "check-seed": - if args.url.startswith("http"): - (userdata, metadata) = read_maas_seed_url(args.url, - header_cb=my_headers, - version=args.apiver) - else: - (userdata, metadata) = read_maas_seed_url(args.url) + readurl = oauth_helper.readurl + if args.url[0] == "/" or args.url.startswith("file://"): + readurl = None + (userdata, metadata) = read_maas_seed_url( + args.url, version=args.apiver, read_file_or_url=readurl, + retries=2) print("=== userdata ===") - print(userdata) + print(userdata.decode()) print("=== metadata ===") pprint.pprint(metadata) elif args.subcmd == "get": - printurl(args.url, my_headers) + printurl(args.url) elif args.subcmd == "crawl": if not args.url.endswith("/"): args.url = "%s/" % args.url - crawl(args.url, my_headers) + crawl(args.url) main() diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 6a861af3..c2fba4d2 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -36,7 +36,9 @@ class DataSourceNoCloud(sources.DataSource): self.dsmode = 'local' self.seed = None self.cmdline_id = "ds=nocloud" - self.seed_dir = os.path.join(paths.seed_dir, 'nocloud') + self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'), + os.path.join(paths.seed_dir, 'nocloud-net')] + self.seed_dir = None self.supported_seed_starts = ("/", "file://") def __str__(self): @@ -50,31 +52,32 @@ class DataSourceNoCloud(sources.DataSource): } found = [] - mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': ""} + mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "", + 'network-config': {}} try: # Parse the kernel command line, getting data passed in md = {} if parse_cmdline_data(self.cmdline_id, md): found.append("cmdline") - mydata['meta-data'].update(md) + mydata = _merge_new_seed(mydata, {'meta-data': md}) except: util.logexc(LOG, "Unable to parse command line data") return False # Check to see if the seed dir has data. pp2d_kwargs = {'required': ['user-data', 'meta-data'], - 'optional': ['vendor-data']} - - try: - seeded = util.pathprefix2dict(self.seed_dir, **pp2d_kwargs) - found.append(self.seed_dir) - LOG.debug("Using seeded data from %s", self.seed_dir) - except ValueError as e: - pass - - if self.seed_dir in found: - mydata = _merge_new_seed(mydata, seeded) + 'optional': ['vendor-data', 'network-config']} + + for path in self.seed_dirs: + try: + seeded = util.pathprefix2dict(path, **pp2d_kwargs) + found.append(path) + LOG.debug("Using seeded data from %s", path) + mydata = _merge_new_seed(mydata, seeded) + break + except ValueError as e: + pass # If the datasource config had a 'seedfrom' entry, then that takes # precedence over a 'seedfrom' that was found in a filesystem @@ -141,8 +144,7 @@ class DataSourceNoCloud(sources.DataSource): if len(found) == 0: return False - seeded_interfaces = None - + seeded_network = None # The special argument "seedfrom" indicates we should # attempt to seed the userdata / metadata from its value # its primarily value is in allowing the user to type less @@ -158,8 +160,9 @@ class DataSourceNoCloud(sources.DataSource): LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False - if 'network-interfaces' in mydata['meta-data']: - seeded_interfaces = self.dsmode + if (mydata['meta-data'].get('network-interfaces') or + mydata.get('network-config')): + seeded_network = self.dsmode # This could throw errors, but the user told us to do it # so if errors are raised, let them raise @@ -176,27 +179,75 @@ class DataSourceNoCloud(sources.DataSource): mydata['meta-data'] = util.mergemanydict([mydata['meta-data'], defaults]) - # Update the network-interfaces if metadata had 'network-interfaces' - # entry and this is the local datasource, or 'seedfrom' was used - # and the source of the seed was self.dsmode - # ('local' for NoCloud, 'net' for NoCloudNet') - if ('network-interfaces' in mydata['meta-data'] and - (self.dsmode in ("local", seeded_interfaces))): - LOG.debug("Updating network interfaces from %s", self) - self.distro.apply_network( - mydata['meta-data']['network-interfaces']) + netdata = {'format': None, 'data': None} + if mydata['meta-data'].get('network-interfaces'): + netdata['format'] = 'interfaces' + netdata['data'] = mydata['meta-data']['network-interfaces'] + elif mydata.get('network-config'): + netdata['format'] = 'network-config' + netdata['data'] = mydata['network-config'] + + # if this is the local datasource or 'seedfrom' was used + # and the source of the seed was self.dsmode. + # Then see if there is network config to apply. + # note this is obsolete network-interfaces style seeding. + if self.dsmode in ("local", seeded_network): + if mydata['meta-data'].get('network-interfaces'): + LOG.debug("Updating network interfaces from %s", self) + self.distro.apply_network( + mydata['meta-data']['network-interfaces']) if mydata['meta-data']['dsmode'] == self.dsmode: self.seed = ",".join(found) self.metadata = mydata['meta-data'] self.userdata_raw = mydata['user-data'] - self.vendordata = mydata['vendor-data'] + self.vendordata_raw = mydata['vendor-data'] + self._network_config = mydata['network-config'] return True LOG.debug("%s: not claiming datasource, dsmode=%s", self, mydata['meta-data']['dsmode']) return False + def check_instance_id(self, sys_cfg): + # quickly (local check only) if self.instance_id is still valid + # we check kernel command line or files. + current = self.get_instance_id() + if not current: + return None + + quick_id = _quick_read_instance_id(cmdline_id=self.cmdline_id, + dirs=self.seed_dirs) + if not quick_id: + return None + return quick_id == current + + @property + def network_config(self): + return self._network_config + + +def _quick_read_instance_id(cmdline_id, dirs=None): + if dirs is None: + dirs = [] + + iid_key = 'instance-id' + if cmdline_id is None: + fill = {} + if parse_cmdline_data(cmdline_id, fill) and iid_key in fill: + return fill[iid_key] + + for d in dirs: + try: + data = util.pathprefix2dict(d, required=['meta-data']) + md = util.load_yaml(data['meta-data']) + if iid_key in md: + return md[iid_key] + except ValueError: + pass + + return None + # Returns true or false indicating if cmdline indicated # that this module should be used @@ -244,9 +295,17 @@ def parse_cmdline_data(ds_id, fill, cmdline=None): def _merge_new_seed(cur, seeded): ret = cur.copy() - ret['meta-data'] = util.mergemanydict([cur['meta-data'], - util.load_yaml(seeded['meta-data'])]) - ret['user-data'] = seeded['user-data'] + + newmd = seeded.get('meta-data', {}) + if not isinstance(seeded['meta-data'], dict): + newmd = util.load_yaml(seeded['meta-data']) + ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd]) + + if seeded.get('network-config'): + ret['network-config'] = util.load_yaml(seeded['network-config']) + + if 'user-data' in seeded: + ret['user-data'] = seeded['user-data'] if 'vendor-data' in seeded: ret['vendor-data'] = seeded['vendor-data'] return ret @@ -257,14 +316,13 @@ class DataSourceNoCloudNet(DataSourceNoCloud): DataSourceNoCloud.__init__(self, sys_cfg, distro, paths) self.cmdline_id = "ds=nocloud-net" self.supported_seed_starts = ("http://", "https://", "ftp://") - self.seed_dir = os.path.join(paths.seed_dir, 'nocloud-net') self.dsmode = "net" # Used to match classes to dependencies datasources = [ - (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )), - (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )), + (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py index 12a8a992..d1a62b2a 100644 --- a/cloudinit/sources/DataSourceNone.py +++ b/cloudinit/sources/DataSourceNone.py @@ -47,8 +47,8 @@ class DataSourceNone(sources.DataSource): # Used to match classes to dependencies datasources = [ - (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), - (DataSourceNone, []), + (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceNone, []), ] diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 58a4b2a2..2a6cd050 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -25,10 +25,22 @@ from xml.dom import minidom import base64 import os import re +import time from cloudinit import log as logging from cloudinit import sources from cloudinit import util +from .helpers.vmware.imc.config import Config +from .helpers.vmware.imc.config_file import ConfigFile +from .helpers.vmware.imc.config_nic import NicConfigurator +from .helpers.vmware.imc.guestcust_event import GuestCustEventEnum +from .helpers.vmware.imc.guestcust_state import GuestCustStateEnum +from .helpers.vmware.imc.guestcust_error import GuestCustErrorEnum +from .helpers.vmware.imc.guestcust_util import ( + set_customization_status, + get_nics_to_enable, + enable_nics +) LOG = logging.getLogger(__name__) @@ -50,18 +62,90 @@ class DataSourceOVF(sources.DataSource): found = [] md = {} ud = "" + vmwarePlatformFound = False + vmwareImcConfigFilePath = '' defaults = { "instance-id": "iid-dsovf", } (seedfile, contents) = get_ovf_env(self.paths.seed_dir) + + system_type = util.read_dmi_data("system-product-name") + if system_type is None: + LOG.debug("No system-product-name found") + if seedfile: # Found a seed dir seed = os.path.join(self.paths.seed_dir, seedfile) (md, ud, cfg) = read_ovf_environment(contents) self.environment = contents found.append(seed) + elif system_type and 'vmware' in system_type.lower(): + LOG.debug("VMware Virtualization Platform found") + if not util.get_cfg_option_bool( + self.sys_cfg, "disable_vmware_customization", True): + deployPkgPluginPath = search_file("/usr/lib/vmware-tools", + "libdeployPkgPlugin.so") + if not deployPkgPluginPath: + deployPkgPluginPath = search_file("/usr/lib/open-vm-tools", + "libdeployPkgPlugin.so") + if deployPkgPluginPath: + # When the VM is powered on, the "VMware Tools" daemon + # copies the customization specification file to + # /var/run/vmware-imc directory. cloud-init code needs + # to search for the file in that directory. + vmwareImcConfigFilePath = util.log_time( + logfunc=LOG.debug, + msg="waiting for configuration file", + func=wait_for_imc_cfg_file, + args=("/var/run/vmware-imc", "cust.cfg")) + + if vmwareImcConfigFilePath: + LOG.debug("Found VMware DeployPkg Config File at %s" % + vmwareImcConfigFilePath) + else: + LOG.debug("Did not find VMware DeployPkg Config File Path") + else: + LOG.debug("Customization for VMware platform is disabled.") + + if vmwareImcConfigFilePath: + nics = "" + try: + cf = ConfigFile(vmwareImcConfigFilePath) + conf = Config(cf) + (md, ud, cfg) = read_vmware_imc(conf) + dirpath = os.path.dirname(vmwareImcConfigFilePath) + nics = get_nics_to_enable(dirpath) + except Exception as e: + LOG.debug("Error parsing the customization Config File") + LOG.exception(e) + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) + enable_nics(nics) + return False + finally: + util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) + + try: + LOG.debug("Applying the Network customization") + nicConfigurator = NicConfigurator(conf.nics) + nicConfigurator.configure() + except Exception as e: + LOG.debug("Error applying the Network Configuration") + LOG.exception(e) + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED) + enable_nics(nics) + return False + + vmwarePlatformFound = True + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_DONE, + GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) + enable_nics(nics) else: np = {'iso': transport_iso9660, 'vmware-guestd': transport_vmware_guestd, } @@ -76,7 +160,7 @@ class DataSourceOVF(sources.DataSource): found.append(name) # There was no OVF transports found - if len(found) == 0: + if len(found) == 0 and not vmwarePlatformFound: return False if 'seedfrom' in md and md['seedfrom']: @@ -129,6 +213,36 @@ class DataSourceOVFNet(DataSourceOVF): self.supported_seed_starts = ("http://", "https://", "ftp://") +def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5): + waited = 0 + + while waited < maxwait: + fileFullPath = search_file(dirpath, filename) + if fileFullPath: + return fileFullPath + time.sleep(naplen) + waited += naplen + return None + + +# This will return a dict with some content +# meta-data, user-data, some config +def read_vmware_imc(config): + md = {} + cfg = {} + ud = "" + if config.host_name: + if config.domain_name: + md['local-hostname'] = config.host_name + "." + config.domain_name + else: + md['local-hostname'] = config.host_name + + if config.timezone: + cfg['timezone'] = config.timezone + + return (md, ud, cfg) + + # This will return a dict with some content # meta-data, user-data, some config def read_ovf_environment(contents): @@ -264,14 +378,14 @@ def get_properties(contents): # could also check here that elem.namespaceURI == # "http://schemas.dmtf.org/ovf/environment/1" propSections = find_child(dom.documentElement, - lambda n: n.localName == "PropertySection") + lambda n: n.localName == "PropertySection") if len(propSections) == 0: raise XmlError("No 'PropertySection's") props = {} propElems = find_child(propSections[0], - (lambda n: n.localName == "Property")) + (lambda n: n.localName == "Property")) for elem in propElems: key = elem.attributes.getNamedItemNS(envNsURI, "key").value @@ -281,14 +395,25 @@ def get_properties(contents): return props +def search_file(dirpath, filename): + if not dirpath or not filename: + return None + + for root, dirs, files in os.walk(dirpath): + if filename in files: + return os.path.join(root, filename) + + return None + + class XmlError(Exception): pass # Used to match classes to dependencies datasources = ( - (DataSourceOVF, (sources.DEP_FILESYSTEM, )), - (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceOVF, (sources.DEP_FILESYSTEM, )), + (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index ac2c3b45..681f3a96 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -149,8 +149,8 @@ class BrokenContextDiskDir(Exception): class OpenNebulaNetwork(object): REG_DEV_MAC = re.compile( - r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?', - re.MULTILINE | re.DOTALL) + r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?', + re.MULTILINE | re.DOTALL) def __init__(self, ip, context): self.ip = ip @@ -404,7 +404,8 @@ def read_context_disk_dir(source_dir, asuser=None): if ssh_key_var: lines = context.get(ssh_key_var).splitlines() results['metadata']['public-keys'] = [l for l in lines - if len(l) and not l.startswith("#")] + if len(l) and not + l.startswith("#")] # custom hostname -- try hostname or leave cloud-init # itself create hostname from IP address later diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 469c2e2a..3af17b10 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -45,8 +45,6 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): self.version = None self.files = {} self.ec2_metadata = None - if not self.ds_cfg: - self.ds_cfg = {} def __str__(self): root = sources.DataSource.__str__(self) @@ -150,6 +148,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): return True + def check_instance_id(self, sys_cfg): + # quickly (local check only) if self.instance_id is still valid + return sources.instance_id_matches_system_uuid(self.get_instance_id()) + def read_metadata_service(base_url, ssl_details=None): reader = openstack.MetadataReader(base_url, ssl_details=ssl_details) diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index c9b497df..5edab152 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -20,10 +20,13 @@ # Datasource for provisioning on SmartOS. This works on Joyent # and public/private Clouds using SmartOS. # -# SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests. +# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests # The meta-data is transmitted via key/value pairs made by # requests on the console. For example, to get the hostname, you # would send "GET hostname" on /dev/ttyS1. +# For Linux Guests running in LX-Brand Zones on SmartOS hosts +# a socket (/native/.zonecontrol/metadata.sock) is used instead +# of a serial console. # # Certain behavior is defined by the DataDictionary # http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html @@ -34,6 +37,8 @@ import contextlib import os import random import re +import socket +import stat import serial @@ -46,6 +51,7 @@ LOG = logging.getLogger(__name__) SMARTOS_ATTRIB_MAP = { # Cloud-init Key : (SmartOS Key, Strip line endings) + 'instance-id': ('sdc:uuid', True), 'local-hostname': ('hostname', True), 'public-keys': ('root_authorized_keys', True), 'user-script': ('user-script', False), @@ -76,6 +82,7 @@ DS_CFG_PATH = ['datasource', DS_NAME] # BUILTIN_DS_CONFIG = { 'serial_device': '/dev/ttyS1', + 'metadata_sockfile': '/native/.zonecontrol/metadata.sock', 'seed_timeout': 60, 'no_base64_decode': ['root_authorized_keys', 'motd_sys_info', @@ -83,7 +90,7 @@ BUILTIN_DS_CONFIG = { 'user-data', 'user-script', 'sdc:datacenter_name', - ], + 'sdc:uuid'], 'base64_keys': [], 'base64_all': False, 'disk_aliases': {'ephemeral0': '/dev/vdb'}, @@ -94,7 +101,7 @@ BUILTIN_CLOUD_CONFIG = { 'ephemeral0': {'table_type': 'mbr', 'layout': False, 'overwrite': False} - }, + }, 'fs_setup': [{'label': 'ephemeral0', 'filesystem': 'ext3', 'device': 'ephemeral0'}], @@ -150,17 +157,27 @@ class DataSourceSmartOS(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.is_smartdc = None - self.ds_cfg = util.mergemanydict([ self.ds_cfg, util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.metadata = {} - self.cfg = BUILTIN_CLOUD_CONFIG - self.seed = self.ds_cfg.get("serial_device") - self.seed_timeout = self.ds_cfg.get("serial_timeout") + # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but + # report 'BrandZ virtual linux' as the kernel version + if os.uname()[3].lower() == 'brandz virtual linux': + LOG.debug("Host is SmartOS, guest in Zone") + self.is_smartdc = True + self.smartos_type = 'lx-brand' + self.cfg = {} + self.seed = self.ds_cfg.get("metadata_sockfile") + else: + self.is_smartdc = True + self.smartos_type = 'kvm' + self.seed = self.ds_cfg.get("serial_device") + self.cfg = BUILTIN_CLOUD_CONFIG + self.seed_timeout = self.ds_cfg.get("serial_timeout") self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode') self.b64_keys = self.ds_cfg.get('base64_keys') self.b64_all = self.ds_cfg.get('base64_all') @@ -170,12 +187,49 @@ class DataSourceSmartOS(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) + def _get_seed_file_object(self): + if not self.seed: + raise AttributeError("seed device is not set") + + if self.smartos_type == 'lx-brand': + if not stat.S_ISSOCK(os.stat(self.seed).st_mode): + LOG.debug("Seed %s is not a socket", self.seed) + return None + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(self.seed) + return sock.makefile('rwb') + else: + if not stat.S_ISCHR(os.stat(self.seed).st_mode): + LOG.debug("Seed %s is not a character device") + return None + ser = serial.Serial(self.seed, timeout=self.seed_timeout) + if not ser.isOpen(): + raise SystemError("Unable to open %s" % self.seed) + return ser + return None + + def _set_provisioned(self): + '''Mark the instance provisioning state as successful. + + When run in a zone, the host OS will look for /var/svc/provisioning + to be renamed as /var/svc/provision_success. This should be done + after meta-data is successfully retrieved and from this point + the host considers the provision of the zone to be a success and + keeps the zone running. + ''' + + LOG.debug('Instance provisioning state set as successful') + svc_path = '/var/svc' + if os.path.exists('/'.join([svc_path, 'provisioning'])): + os.rename('/'.join([svc_path, 'provisioning']), + '/'.join([svc_path, 'provision_success'])) + def get_data(self): md = {} ud = "" if not device_exists(self.seed): - LOG.debug("No serial device '%s' found for SmartOS datasource", + LOG.debug("No metadata device '%s' found for SmartOS datasource", self.seed) return False @@ -185,29 +239,36 @@ class DataSourceSmartOS(sources.DataSource): LOG.debug("Disabling SmartOS datasource on arm (LP: #1243287)") return False - dmi_info = dmi_data() - if dmi_info is False: - LOG.debug("No dmidata utility found") - return False - - system_uuid, system_type = tuple(dmi_info) - if 'smartdc' not in system_type.lower(): - LOG.debug("Host is not on SmartOS. system_type=%s", system_type) + # SDC KVM instances will provide dmi data, LX-brand does not + if self.smartos_type == 'kvm': + dmi_info = dmi_data() + if dmi_info is False: + LOG.debug("No dmidata utility found") + return False + + system_type = dmi_info + if 'smartdc' not in system_type.lower(): + LOG.debug("Host is not on SmartOS. system_type=%s", + system_type) + return False + LOG.debug("Host is SmartOS, guest in KVM") + + seed_obj = self._get_seed_file_object() + if seed_obj is None: + LOG.debug('Seed file object not found.') return False - self.is_smartdc = True - md['instance-id'] = system_uuid + with contextlib.closing(seed_obj) as seed: + b64_keys = self.query('base64_keys', seed, strip=True, b64=False) + if b64_keys is not None: + self.b64_keys = [k.strip() for k in str(b64_keys).split(',')] - b64_keys = self.query('base64_keys', strip=True, b64=False) - if b64_keys is not None: - self.b64_keys = [k.strip() for k in str(b64_keys).split(',')] + b64_all = self.query('base64_all', seed, strip=True, b64=False) + if b64_all is not None: + self.b64_all = util.is_true(b64_all) - b64_all = self.query('base64_all', strip=True, b64=False) - if b64_all is not None: - self.b64_all = util.is_true(b64_all) - - for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): - smartos_noun, strip = attribute - md[ci_noun] = self.query(smartos_noun, strip=strip) + for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): + smartos_noun, strip = attribute + md[ci_noun] = self.query(smartos_noun, seed, strip=strip) # @datadictionary: This key may contain a program that is written # to a file in the filesystem of the guest on each boot and then @@ -240,7 +301,7 @@ class DataSourceSmartOS(sources.DataSource): # Handle the cloud-init regular meta if not md['local-hostname']: - md['local-hostname'] = system_uuid + md['local-hostname'] = md['instance-id'] ud = None if md['user-data']: @@ -257,6 +318,8 @@ class DataSourceSmartOS(sources.DataSource): self.metadata = util.mergemanydict([md, self.metadata]) self.userdata_raw = ud self.vendordata_raw = md['vendor-data'] + + self._set_provisioned() return True def device_name_to_device(self, name): @@ -268,40 +331,64 @@ class DataSourceSmartOS(sources.DataSource): def get_instance_id(self): return self.metadata['instance-id'] - def query(self, noun, strip=False, default=None, b64=None): + def query(self, noun, seed_file, strip=False, default=None, b64=None): if b64 is None: if noun in self.smartos_no_base64: b64 = False elif self.b64_all or noun in self.b64_keys: b64 = True - return query_data(noun=noun, strip=strip, seed_device=self.seed, - seed_timeout=self.seed_timeout, default=default, - b64=b64) + return self._query_data(noun, seed_file, strip=strip, + default=default, b64=b64) + def _query_data(self, noun, seed_file, strip=False, + default=None, b64=None): + """Makes a request via "GET <NOUN>" -def device_exists(device): - """Symplistic method to determine if the device exists or not""" - return os.path.exists(device) + In the response, the first line is the status, while subsequent + lines are is the value. A blank line with a "." is used to + indicate end of response. + If the response is expected to be base64 encoded, then set + b64encoded to true. Unfortantely, there is no way to know if + something is 100% encoded, so this method relies on being told + if the data is base64 or not. + """ -def get_serial(seed_device, seed_timeout): - """This is replaced in unit testing, allowing us to replace - serial.Serial with a mocked class. + if not noun: + return False - The timeout value of 60 seconds should never be hit. The value - is taken from SmartOS own provisioning tools. Since we are reading - each line individually up until the single ".", the transfer is - usually very fast (i.e. microseconds) to get the response. - """ - if not seed_device: - raise AttributeError("seed_device value is not set") + response = JoyentMetadataClient(seed_file).get_metadata(noun) + + if response is None: + return default + + if b64 is None: + b64 = self._query_data('b64-%s' % noun, seed_file, b64=False, + default=False, strip=True) + b64 = util.is_true(b64) + + resp = None + if b64 or strip: + resp = "".join(response).rstrip() + else: + resp = "".join(response) - ser = serial.Serial(seed_device, timeout=seed_timeout) - if not ser.isOpen(): - raise SystemError("Unable to open %s" % seed_device) + if b64: + try: + return util.b64d(resp) + # Bogus input produces different errors in Python 2 and 3; + # catch both. + except (TypeError, binascii.Error): + LOG.warn("Failed base64 decoding key '%s'", noun) + return resp - return ser + return resp + + +def device_exists(device): + """Symplistic method to determine if the device exists or not""" + return os.path.exists(device) class JoyentMetadataFetchException(Exception): @@ -320,8 +407,8 @@ class JoyentMetadataClient(object): r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)' r'( (?P<payload>.+))?)') - def __init__(self, serial): - self.serial = serial + def __init__(self, metasource): + self.metasource = metasource def _checksum(self, body): return '{0:08x}'.format( @@ -356,67 +443,30 @@ class JoyentMetadataClient(object): util.b64e(metadata_key)) msg = 'V2 {0} {1} {2}\n'.format( len(message_body), self._checksum(message_body), message_body) - LOG.debug('Writing "%s" to serial port.', msg) - self.serial.write(msg.encode('ascii')) - response = self.serial.readline().decode('ascii') - LOG.debug('Read "%s" from serial port.', response) - return self._get_value_from_frame(request_id, response) - - -def query_data(noun, seed_device, seed_timeout, strip=False, default=None, - b64=None): - """Makes a request to via the serial console via "GET <NOUN>" - - In the response, the first line is the status, while subsequent lines - are is the value. A blank line with a "." is used to indicate end of - response. - - If the response is expected to be base64 encoded, then set b64encoded - to true. Unfortantely, there is no way to know if something is 100% - encoded, so this method relies on being told if the data is base64 or - not. - """ - if not noun: - return False - - with contextlib.closing(get_serial(seed_device, seed_timeout)) as ser: - client = JoyentMetadataClient(ser) - response = client.get_metadata(noun) - - if response is None: - return default - - if b64 is None: - b64 = query_data('b64-%s' % noun, seed_device=seed_device, - seed_timeout=seed_timeout, b64=False, - default=False, strip=True) - b64 = util.is_true(b64) - - resp = None - if b64 or strip: - resp = "".join(response).rstrip() - else: - resp = "".join(response) - - if b64: - try: - return util.b64d(resp) - # Bogus input produces different errors in Python 2 and 3; catch both. - except (TypeError, binascii.Error): - LOG.warn("Failed base64 decoding key '%s'", noun) - return resp + LOG.debug('Writing "%s" to metadata transport.', msg) + self.metasource.write(msg.encode('ascii')) + self.metasource.flush() + + response = bytearray() + response.extend(self.metasource.read(1)) + while response[-1:] != b'\n': + response.extend(self.metasource.read(1)) + response = response.rstrip().decode('ascii') + LOG.debug('Read "%s" from metadata transport.', response) + + if 'SUCCESS' not in response: + return None - return resp + return self._get_value_from_frame(request_id, response) def dmi_data(): - sys_uuid = util.read_dmi_data("system-uuid") sys_type = util.read_dmi_data("system-product-name") - if not sys_uuid or not sys_type: + if not sys_type: return None - return (sys_uuid.lower(), sys_type) + return sys_type def write_boot_content(content, content_f, link=None, shebang=False, @@ -462,7 +512,7 @@ def write_boot_content(content, content_f, link=None, shebang=False, except Exception as e: util.logexc(LOG, ("Failed to identify script type for %s" % - content_f, e)) + content_f, e)) if link: try: diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index a21c08c2..6bf2c33b 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -32,6 +32,7 @@ from cloudinit import user_data as ud from cloudinit import util from cloudinit.filters import launch_index +from cloudinit.reporting import events DEP_FILESYSTEM = "FILESYSTEM" DEP_NETWORK = "NETWORK" @@ -69,6 +70,9 @@ class DataSource(object): self.ds_cfg = util.get_cfg_by_path(self.sys_cfg, ("datasource", name), {}) + if not self.ds_cfg: + self.ds_cfg = {} + if not ud_proc: self.ud_proc = ud.UserDataProcessor(self.paths) else: @@ -216,6 +220,14 @@ class DataSource(object): def get_package_mirror_info(self): return self.distro.get_package_mirror_info(data_source=self) + def check_instance_id(self, sys_cfg): + # quickly (local check only) if self.instance_id is still + return False + + @property + def network_config(self): + return None + def normalize_pubkey_data(pubkey_data): keys = [] @@ -246,17 +258,25 @@ def normalize_pubkey_data(pubkey_data): return keys -def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list): +def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): ds_list = list_sources(cfg_list, ds_deps, pkg_list) ds_names = [type_utils.obj_name(f) for f in ds_list] - LOG.debug("Searching for data source in: %s", ds_names) - - for cls in ds_list: + mode = "network" if DEP_NETWORK in ds_deps else "local" + LOG.debug("Searching for %s data source in: %s", mode, ds_names) + + for name, cls in zip(ds_names, ds_list): + myrep = events.ReportEventStack( + name="search-%s" % name.replace("DataSource", ""), + description="searching for %s data from %s" % (mode, name), + message="no %s data found from %s" % (mode, name), + parent=reporter) try: - LOG.debug("Seeing if we can get any data from %s", cls) - s = cls(sys_cfg, distro, paths) - if s.get_data(): - return (s, type_utils.obj_name(cls)) + with myrep: + LOG.debug("Seeing if we can get any data from %s", cls) + s = cls(sys_cfg, distro, paths) + if s.get_data(): + myrep.message = "found %s data from %s" % (mode, name) + return (s, type_utils.obj_name(cls)) except Exception: util.logexc(LOG, "Getting data from %s failed", cls) @@ -290,6 +310,18 @@ def list_sources(cfg_list, depends, pkg_list): return src_list +def instance_id_matches_system_uuid(instance_id, field='system-uuid'): + # quickly (local check only) if self.instance_id is still valid + # we check kernel command line or files. + if not instance_id: + return False + + dmi_value = util.read_dmi_data(field) + if not dmi_value: + return False + return instance_id.lower() == dmi_value.lower() + + # 'depends' is a list of dependencies (DEP_FILESYSTEM) # ds_list is a list of 2 item lists # ds_list = [ diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 281d733e..018cac6d 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -79,12 +79,6 @@ class GoalState(object): './Container/RoleInstanceList/RoleInstance/InstanceId') @property - def shared_config_xml(self): - url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance' - '/Configuration/SharedConfig') - return self.http_client.get(url).contents - - @property def certificates_xml(self): if self._certificates_xml is None: url = self._text_from_xpath( @@ -172,19 +166,6 @@ class OpenSSLManager(object): return keys -def iid_from_shared_config_content(content): - """ - find INSTANCE_ID in: - <?xml version="1.0" encoding="utf-8"?> - <SharedConfig version="1.0.0.0" goalStateIncarnation="1"> - <Deployment name="INSTANCE_ID" guid="{...}" incarnation="0"> - <Service name="..." guid="{00000000-0000-0000-0000-000000000000}"/> - """ - root = ElementTree.fromstring(content) - depnode = root.find('Deployment') - return depnode.get('name') - - class WALinuxAgentShim(object): REPORT_READY_XML_TEMPLATE = '\n'.join([ @@ -216,6 +197,21 @@ class WALinuxAgentShim(object): self.openssl_manager.clean_up() @staticmethod + def get_ip_from_lease_value(lease_value): + unescaped_value = lease_value.replace('\\', '') + if len(unescaped_value) > 4: + hex_string = '' + for hex_pair in unescaped_value.split(':'): + if len(hex_pair) == 1: + hex_pair = '0' + hex_pair + hex_string += hex_pair + packed_bytes = struct.pack( + '>L', int(hex_string.replace(':', ''), 16)) + else: + packed_bytes = unescaped_value.encode('utf-8') + return socket.inet_ntoa(packed_bytes) + + @staticmethod def find_endpoint(): LOG.debug('Finding Azure endpoint...') content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases') @@ -225,16 +221,7 @@ class WALinuxAgentShim(object): value = line.strip(' ').split(' ', 2)[-1].strip(';\n"') if value is None: raise Exception('No endpoint found in DHCP config.') - if ':' in value: - hex_string = '' - for hex_pair in value.split(':'): - if len(hex_pair) == 1: - hex_pair = '0' + hex_pair - hex_string += hex_pair - value = struct.pack('>L', int(hex_string.replace(':', ''), 16)) - else: - value = value.encode('utf-8') - endpoint_ip_address = socket.inet_ntoa(value) + endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address @@ -263,8 +250,6 @@ class WALinuxAgentShim(object): public_keys = self.openssl_manager.parse_certificates( goal_state.certificates_xml) data = { - 'instance-id': iid_from_shared_config_content( - goal_state.shared_config_xml), 'public-keys': public_keys, } self._report_ready(goal_state, http_client) diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index bd93d22f..1aa6bbae 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -51,11 +51,13 @@ OS_LATEST = 'latest' OS_FOLSOM = '2012-08-10' OS_GRIZZLY = '2013-04-04' OS_HAVANA = '2013-10-17' +OS_LIBERTY = '2015-10-15' # keep this in chronological order. new supported versions go at the end. OS_VERSIONS = ( OS_FOLSOM, OS_GRIZZLY, OS_HAVANA, + OS_LIBERTY, ) @@ -229,6 +231,11 @@ class BaseReader(object): False, load_json_anytype, ) + files['networkdata'] = ( + self._path_join("openstack", version, 'network_data.json'), + False, + load_json_anytype, + ) return files results = { @@ -334,7 +341,7 @@ class ConfigDriveReader(BaseReader): path = self._path_join(self.base_path, 'openstack') found = [d for d in os.listdir(path) if os.path.isdir(os.path.join(path))] - self._versions = found + self._versions = sorted(found) return self._versions def _read_ec2_metadata(self): diff --git a/cloudinit/sources/helpers/vmware/__init__.py b/cloudinit/sources/helpers/vmware/__init__.py new file mode 100644 index 00000000..386225d5 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/__init__.py @@ -0,0 +1,13 @@ +# vi: ts=4 expandtab +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. diff --git a/cloudinit/sources/helpers/vmware/imc/__init__.py b/cloudinit/sources/helpers/vmware/imc/__init__.py new file mode 100644 index 00000000..386225d5 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/__init__.py @@ -0,0 +1,13 @@ +# vi: ts=4 expandtab +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py new file mode 100644 index 00000000..faba5887 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py @@ -0,0 +1,25 @@ +# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class BootProtoEnum:
+ """Specifies the NIC Boot Settings."""
+
+ DHCP = 'dhcp'
+ STATIC = 'static'
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py new file mode 100644 index 00000000..aebc12a0 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config.py @@ -0,0 +1,95 @@ +# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from .nic import Nic
+
+
+class Config:
+ """
+ Stores the Contents specified in the Customization
+ Specification file.
+ """
+
+ DNS = 'DNS|NAMESERVER|'
+ SUFFIX = 'DNS|SUFFIX|'
+ PASS = 'PASSWORD|-PASS'
+ TIMEZONE = 'DATETIME|TIMEZONE'
+ UTC = 'DATETIME|UTC'
+ HOSTNAME = 'NETWORK|HOSTNAME'
+ DOMAINNAME = 'NETWORK|DOMAINNAME'
+
+ def __init__(self, configFile):
+ self._configFile = configFile
+
+ @property
+ def host_name(self):
+ """Return the hostname."""
+ return self._configFile.get(Config.HOSTNAME, None)
+
+ @property
+ def domain_name(self):
+ """Return the domain name."""
+ return self._configFile.get(Config.DOMAINNAME, None)
+
+ @property
+ def timezone(self):
+ """Return the timezone."""
+ return self._configFile.get(Config.TIMEZONE, None)
+
+ @property
+ def utc(self):
+ """Retrieves whether to set time to UTC or Local."""
+ return self._configFile.get(Config.UTC, None)
+
+ @property
+ def admin_password(self):
+ """Return the root password to be set."""
+ return self._configFile.get(Config.PASS, None)
+
+ @property
+ def name_servers(self):
+ """Return the list of DNS servers."""
+ res = []
+ cnt = self._configFile.get_count_with_prefix(Config.DNS)
+ for i in range(1, cnt + 1):
+ key = Config.DNS + str(i)
+ res.append(self._configFile[key])
+
+ return res
+
+ @property
+ def dns_suffixes(self):
+ """Return the list of DNS Suffixes."""
+ res = []
+ cnt = self._configFile.get_count_with_prefix(Config.SUFFIX)
+ for i in range(1, cnt + 1):
+ key = Config.SUFFIX + str(i)
+ res.append(self._configFile[key])
+
+ return res
+
+ @property
+ def nics(self):
+ """Return the list of associated NICs."""
+ res = []
+ nics = self._configFile['NIC-CONFIG|NICS']
+ for nic in nics.split(','):
+ res.append(Nic(nic, self._configFile))
+
+ return res
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py new file mode 100644 index 00000000..bb9fb7dc --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config_file.py @@ -0,0 +1,129 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi <stanguturi@vmware.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import logging + +try: + import configparser +except ImportError: + import ConfigParser as configparser + +from .config_source import ConfigSource + +logger = logging.getLogger(__name__) + + +class ConfigFile(ConfigSource, dict): + """ConfigFile module to load the content from a specified source.""" + + def __init__(self, filename): + self._loadConfigFile(filename) + pass + + def _insertKey(self, key, val): + """ + Inserts a Key Value pair. + + Keyword arguments: + key -- The key to insert + val -- The value to insert for the key + + """ + key = key.strip() + val = val.strip() + + if key.startswith('-') or '|-' in key: + canLog = False + else: + canLog = True + + # "sensitive" settings shall not be logged + if canLog: + logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val)) + else: + logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key) + + self[key] = val + + def _loadConfigFile(self, filename): + """ + Parses properties from the specified config file. + + Any previously available properties will be removed. + Sensitive data will not be logged in case the key starts + from '-'. + + Keyword arguments: + filename - The full path to the config file. + """ + logger.info('Parsing the config file %s.' % filename) + + config = configparser.ConfigParser() + config.optionxform = str + config.read(filename) + + self.clear() + + for category in config.sections(): + logger.debug("FOUND CATEGORY = '%s'" % category) + + for (key, value) in config.items(category): + self._insertKey(category + '|' + key, value) + + def should_keep_current_value(self, key): + """ + Determines whether a value for a property must be kept. + + If the propery is missing, it is treated as it should be not + changed by the engine. + + Keyword arguments: + key -- The key to search for. + """ + # helps to distinguish from "empty" value which is used to indicate + # "removal" + return key not in self + + def should_remove_current_value(self, key): + """ + Determines whether a value for the property must be removed. + + If the specified key is empty, it is treated as it should be + removed by the engine. + + Return true if the value can be removed, false otherwise. + + Keyword arguments: + key -- The key to search for. + """ + # helps to distinguish from "missing" value which is used to indicate + # "keeping unchanged" + if key in self: + return not bool(self[key]) + else: + return False + + def get_count_with_prefix(self, prefix): + """ + Return the total count of keys that start with the specified prefix. + + Keyword arguments: + prefix -- prefix of the key + """ + return len([key for key in self if key.startswith(prefix)]) diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py new file mode 100644 index 00000000..7266b699 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py @@ -0,0 +1,25 @@ +# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from .config_source import ConfigSource
+
+
+class ConfigNamespace(ConfigSource):
+ """Specifies the Config Namespace."""
+ pass
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py new file mode 100644 index 00000000..77098a05 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -0,0 +1,247 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2016 VMware INC. +# +# Author: Sankar Tanguturi <stanguturi@vmware.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import logging +import os +import re + +from cloudinit import util + +logger = logging.getLogger(__name__) + + +class NicConfigurator: + def __init__(self, nics): + """ + Initialize the Nic Configurator + @param nics (list) an array of nics to configure + """ + self.nics = nics + self.mac2Name = {} + self.ipv4PrimaryGateway = None + self.ipv6PrimaryGateway = None + self.find_devices() + self._primaryNic = self.get_primary_nic() + + def get_primary_nic(self): + """ + Retrieve the primary nic if it exists + @return (NicBase): the primary nic if exists, None otherwise + """ + primary_nics = [nic for nic in self.nics if nic.primary] + if not primary_nics: + return None + elif len(primary_nics) > 1: + raise Exception('There can only be one primary nic', + [nic.mac for nic in primary_nics]) + else: + return primary_nics[0] + + def find_devices(self): + """ + Create the mac2Name dictionary + The mac address(es) are in the lower case + """ + cmd = ['ip', 'addr', 'show'] + (output, err) = util.subp(cmd) + sections = re.split(r'\n\d+: ', '\n' + output)[1:] + + macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))' + for section in sections: + match = re.search(macPat, section) + if not match: # Only keep info about nics + continue + mac = match.group(1).lower() + name = section.split(':', 1)[0] + self.mac2Name[mac] = name + + def gen_one_nic(self, nic): + """ + Return the lines needed to configure a nic + @return (str list): the string list to configure the nic + @param nic (NicBase): the nic to configure + """ + lines = [] + name = self.mac2Name.get(nic.mac.lower()) + if not name: + raise ValueError('No known device has MACADDR: %s' % nic.mac) + + if nic.onboot: + lines.append('auto %s' % name) + + # Customize IPv4 + lines.extend(self.gen_ipv4(name, nic)) + + # Customize IPv6 + lines.extend(self.gen_ipv6(name, nic)) + + lines.append('') + + return lines + + def gen_ipv4(self, name, nic): + """ + Return the lines needed to configure the IPv4 setting of a nic + @return (str list): the string list to configure the gateways + @param name (str): name of the nic + @param nic (NicBase): the nic to configure + """ + lines = [] + + bootproto = nic.bootProto.lower() + if nic.ipv4_mode.lower() == 'disabled': + bootproto = 'manual' + lines.append('iface %s inet %s' % (name, bootproto)) + + if bootproto != 'static': + return lines + + # Static Ipv4 + v4 = nic.staticIpv4 + if v4.ip: + lines.append(' address %s' % v4.ip) + if v4.netmask: + lines.append(' netmask %s' % v4.netmask) + + # Add the primary gateway + if nic.primary and v4.gateways: + self.ipv4PrimaryGateway = v4.gateways[0] + lines.append(' gateway %s metric 0' % self.ipv4PrimaryGateway) + return lines + + # Add routes if there is no primary nic + if not self._primaryNic: + lines.extend(self.gen_ipv4_route(nic, v4.gateways)) + + return lines + + def gen_ipv4_route(self, nic, gateways): + """ + Return the lines needed to configure additional Ipv4 route + @return (str list): the string list to configure the gateways + @param nic (NicBase): the nic to configure + @param gateways (str list): the list of gateways + """ + lines = [] + + for gateway in gateways: + lines.append(' up route add default gw %s metric 10000' % + gateway) + + return lines + + def gen_ipv6(self, name, nic): + """ + Return the lines needed to configure the gateways for a nic + @return (str list): the string list to configure the gateways + @param name (str): name of the nic + @param nic (NicBase): the nic to configure + """ + lines = [] + + if not nic.staticIpv6: + return lines + + # Static Ipv6 + addrs = nic.staticIpv6 + lines.append('iface %s inet6 static' % name) + lines.append(' address %s' % addrs[0].ip) + lines.append(' netmask %s' % addrs[0].netmask) + + for addr in addrs[1:]: + lines.append(' up ifconfig %s inet6 add %s/%s' % (name, addr.ip, + addr.netmask)) + # Add the primary gateway + if nic.primary: + for addr in addrs: + if addr.gateway: + self.ipv6PrimaryGateway = addr.gateway + lines.append(' gateway %s' % self.ipv6PrimaryGateway) + return lines + + # Add routes if there is no primary nic + if not self._primaryNic: + lines.extend(self._genIpv6Route(name, nic, addrs)) + + return lines + + def _genIpv6Route(self, name, nic, addrs): + lines = [] + + for addr in addrs: + lines.append(' up route -A inet6 add default gw ' + '%s metric 10000' % addr.gateway) + + return lines + + def generate(self): + """Return the lines that is needed to configure the nics""" + lines = [] + lines.append('iface lo inet loopback') + lines.append('auto lo') + lines.append('') + + for nic in self.nics: + lines.extend(self.gen_one_nic(nic)) + + return lines + + def clear_dhcp(self): + logger.info('Clearing DHCP leases') + + # Ignore the return code 1. + util.subp(["pkill", "dhclient"], rcs=[0, 1]) + util.subp(["rm", "-f", "/var/lib/dhcp/*"]) + + def if_down_up(self): + names = [] + for nic in self.nics: + name = self.mac2Name.get(nic.mac.lower()) + names.append(name) + + for name in names: + logger.info('Bring down interface %s' % name) + util.subp(["ifdown", "%s" % name]) + + self.clear_dhcp() + + for name in names: + logger.info('Bring up interface %s' % name) + util.subp(["ifup", "%s" % name]) + + def configure(self): + """ + Configure the /etc/network/intefaces + Make a back up of the original + """ + containingDir = '/etc/network' + + interfaceFile = os.path.join(containingDir, 'interfaces') + originalFile = os.path.join(containingDir, + 'interfaces.before_vmware_customization') + + if not os.path.exists(originalFile) and os.path.exists(interfaceFile): + os.rename(interfaceFile, originalFile) + + lines = self.generate() + with open(interfaceFile, 'w') as fp: + for line in lines: + fp.write('%s\n' % line) + + self.if_down_up() diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py new file mode 100644 index 00000000..a367e476 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config_source.py @@ -0,0 +1,23 @@ +# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class ConfigSource:
+ """Specifies a source for the Config Content."""
+ pass
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py new file mode 100644 index 00000000..1b04161f --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py @@ -0,0 +1,24 @@ +# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+# Copyright (C) 2016 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class GuestCustErrorEnum:
+ """Specifies different errors of Guest Customization engine"""
+
+ GUESTCUST_ERROR_SUCCESS = 0
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py new file mode 100644 index 00000000..fc22568f --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py @@ -0,0 +1,27 @@ +# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+# Copyright (C) 2016 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class GuestCustEventEnum:
+ """Specifies different types of Guest Customization Events"""
+
+ GUESTCUST_EVENT_CUSTOMIZE_FAILED = 100
+ GUESTCUST_EVENT_NETWORK_SETUP_FAILED = 101
+ GUESTCUST_EVENT_ENABLE_NICS = 103
+ GUESTCUST_EVENT_QUERY_NICS = 104
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py new file mode 100644 index 00000000..f255be5f --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py @@ -0,0 +1,25 @@ +# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+# Copyright (C) 2016 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class GuestCustStateEnum:
+ """Specifies different states of Guest Customization engine"""
+
+ GUESTCUST_STATE_RUNNING = 4
+ GUESTCUST_STATE_DONE = 5
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py new file mode 100644 index 00000000..d39f0a65 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -0,0 +1,128 @@ +# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+# Copyright (C) 2016 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import logging
+import os
+import time
+
+from cloudinit import util
+
+from .guestcust_state import GuestCustStateEnum
+from .guestcust_event import GuestCustEventEnum
+
+logger = logging.getLogger(__name__)
+
+
+CLOUDINIT_LOG_FILE = "/var/log/cloud-init.log"
+QUERY_NICS_SUPPORTED = "queryNicsSupported"
+NICS_STATUS_CONNECTED = "connected"
+
+
+# This will send a RPC command to the underlying
+# VMware Virtualization Platform.
+def send_rpc(rpc):
+ if not rpc:
+ return None
+
+ out = ""
+ err = "Error sending the RPC command"
+
+ try:
+ logger.debug("Sending RPC command: %s", rpc)
+ (out, err) = util.subp(["vmware-rpctool", rpc], rcs=[0])
+ # Remove the trailing newline in the output.
+ if out:
+ out = out.rstrip()
+ except Exception as e:
+ logger.debug("Failed to send RPC command")
+ logger.exception(e)
+
+ return (out, err)
+
+
+# This will send the customization status to the
+# underlying VMware Virtualization Platform.
+def set_customization_status(custstate, custerror, errormessage=None):
+ message = ""
+
+ if errormessage:
+ message = CLOUDINIT_LOG_FILE + "@" + errormessage
+ else:
+ message = CLOUDINIT_LOG_FILE
+
+ rpc = "deployPkg.update.state %d %d %s" % (custstate, custerror, message)
+ (out, err) = send_rpc(rpc)
+ return (out, err)
+
+
+# This will read the file nics.txt in the specified directory
+# and return the content
+def get_nics_to_enable(dirpath):
+ if not dirpath:
+ return None
+
+ NICS_SIZE = 1024
+ nicsfilepath = os.path.join(dirpath, "nics.txt")
+ if not os.path.exists(nicsfilepath):
+ return None
+
+ with open(nicsfilepath, 'r') as fp:
+ nics = fp.read(NICS_SIZE)
+
+ return nics
+
+
+# This will send a RPC command to the underlying VMware Virtualization platform
+# and enable nics.
+def enable_nics(nics):
+ if not nics:
+ logger.warning("No Nics found")
+ return
+
+ enableNicsWaitRetries = 5
+ enableNicsWaitCount = 5
+ enableNicsWaitSeconds = 1
+
+ for attempt in range(0, enableNicsWaitRetries):
+ logger.debug("Trying to connect interfaces, attempt %d", attempt)
+ (out, err) = set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS,
+ nics)
+ if not out:
+ time.sleep(enableNicsWaitCount * enableNicsWaitSeconds)
+ continue
+
+ if out != QUERY_NICS_SUPPORTED:
+ logger.warning("NICS connection status query is not supported")
+ return
+
+ for count in range(0, enableNicsWaitCount):
+ (out, err) = set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS,
+ nics)
+ if out and out == NICS_STATUS_CONNECTED:
+ logger.info("NICS are connected on %d second", count)
+ return
+
+ time.sleep(enableNicsWaitSeconds)
+
+ logger.warning("Can't connect network interfaces after %d attempts",
+ enableNicsWaitRetries)
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py new file mode 100644 index 00000000..33f88726 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py @@ -0,0 +1,45 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi <stanguturi@vmware.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + + +class Ipv4ModeEnum: + """ + The IPv4 configuration mode which directly represents the user's goal. + + This mode effectively acts as a contract of the in-guest customization + engine. It must be set based on what the user has requested and should + not be changed by those layers. It's up to the in-guest engine to + interpret and materialize the user's request. + """ + + # The legacy mode which only allows dhcp/static based on whether IPv4 + # addresses list is empty or not + IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE' + + # IPv4 must use static address. Reserved for future use + IPV4_MODE_STATIC = 'STATIC' + + # IPv4 must use DHCPv4. Reserved for future use + IPV4_MODE_DHCP = 'DHCP' + + # IPv4 must be disabled + IPV4_MODE_DISABLED = 'DISABLED' + + # IPv4 settings should be left untouched. Reserved for future use + IPV4_MODE_AS_IS = 'AS_IS' diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py new file mode 100644 index 00000000..b5d704ea --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/nic.py @@ -0,0 +1,147 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi <stanguturi@vmware.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +from .boot_proto import BootProtoEnum +from .nic_base import NicBase, StaticIpv4Base, StaticIpv6Base + + +class Nic(NicBase): + """ + Holds the information about each NIC specified + in the customization specification file + """ + + def __init__(self, name, configFile): + self._name = name + self._configFile = configFile + + def _get(self, what): + return self._configFile.get(self.name + '|' + what, None) + + def _get_count_with_prefix(self, prefix): + return self._configFile.get_count_with_prefix(self.name + prefix) + + @property + def name(self): + return self._name + + @property + def mac(self): + return self._get('MACADDR').lower() + + @property + def primary(self): + value = self._get('PRIMARY') + if value: + value = value.lower() + return value == 'yes' or value == 'true' + else: + return False + + @property + def onboot(self): + value = self._get('ONBOOT') + if value: + value = value.lower() + return value == 'yes' or value == 'true' + else: + return False + + @property + def bootProto(self): + value = self._get('BOOTPROTO') + if value: + return value.lower() + else: + return "" + + @property + def ipv4_mode(self): + value = self._get('IPv4_MODE') + if value: + return value.lower() + else: + return "" + + @property + def staticIpv4(self): + """ + Checks the BOOTPROTO property and returns StaticIPv4Addr + configuration object if STATIC configuration is set. + """ + if self.bootProto == BootProtoEnum.STATIC: + return [StaticIpv4Addr(self)] + else: + return None + + @property + def staticIpv6(self): + cnt = self._get_count_with_prefix('|IPv6ADDR|') + + if not cnt: + return None + + result = [] + for index in range(1, cnt + 1): + result.append(StaticIpv6Addr(self, index)) + + return result + + +class StaticIpv4Addr(StaticIpv4Base): + """Static IPV4 Setting.""" + + def __init__(self, nic): + self._nic = nic + + @property + def ip(self): + return self._nic._get('IPADDR') + + @property + def netmask(self): + return self._nic._get('NETMASK') + + @property + def gateways(self): + value = self._nic._get('GATEWAY') + if value: + return [x.strip() for x in value.split(',')] + else: + return None + + +class StaticIpv6Addr(StaticIpv6Base): + """Static IPV6 Address.""" + + def __init__(self, nic, index): + self._nic = nic + self._index = index + + @property + def ip(self): + return self._nic._get('IPv6ADDR|' + str(self._index)) + + @property + def netmask(self): + return self._nic._get('IPv6NETMASK|' + str(self._index)) + + @property + def gateway(self): + return self._nic._get('IPv6GATEWAY|' + str(self._index)) diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py new file mode 100644 index 00000000..030ba311 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/nic_base.py @@ -0,0 +1,154 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi <stanguturi@vmware.com> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + + +class NicBase: + """ + Define what are expected of each nic. + The following properties should be provided in an implementation class. + """ + + @property + def mac(self): + """ + Retrieves the mac address of the nic + @return (str) : the MACADDR setting + """ + raise NotImplementedError('MACADDR') + + @property + def primary(self): + """ + Retrieves whether the nic is the primary nic + Indicates whether NIC will be used to define the default gateway. + If none of the NICs is configured to be primary, default gateway won't + be set. + @return (bool): the PRIMARY setting + """ + raise NotImplementedError('PRIMARY') + + @property + def onboot(self): + """ + Retrieves whether the nic should be up at the boot time + @return (bool) : the ONBOOT setting + """ + raise NotImplementedError('ONBOOT') + + @property + def bootProto(self): + """ + Retrieves the boot protocol of the nic + @return (str): the BOOTPROTO setting, valid values: dhcp and static. + """ + raise NotImplementedError('BOOTPROTO') + + @property + def ipv4_mode(self): + """ + Retrieves the IPv4_MODE + @return (str): the IPv4_MODE setting, valid values: + backwards_compatible, static, dhcp, disabled, as_is + """ + raise NotImplementedError('IPv4_MODE') + + @property + def staticIpv4(self): + """ + Retrieves the static IPv4 configuration of the nic + @return (StaticIpv4Base list): the static ipv4 setting + """ + raise NotImplementedError('Static IPv4') + + @property + def staticIpv6(self): + """ + Retrieves the IPv6 configuration of the nic + @return (StaticIpv6Base list): the static ipv6 setting + """ + raise NotImplementedError('Static Ipv6') + + def validate(self): + """ + Validate the object + For example, the staticIpv4 property is required and should not be + empty when ipv4Mode is STATIC + """ + raise NotImplementedError('Check constraints on properties') + + +class StaticIpv4Base: + """ + Define what are expected of a static IPv4 setting + The following properties should be provided in an implementation class. + """ + + @property + def ip(self): + """ + Retrieves the Ipv4 address + @return (str): the IPADDR setting + """ + raise NotImplementedError('Ipv4 Address') + + @property + def netmask(self): + """ + Retrieves the Ipv4 NETMASK setting + @return (str): the NETMASK setting + """ + raise NotImplementedError('Ipv4 NETMASK') + + @property + def gateways(self): + """ + Retrieves the gateways on this Ipv4 subnet + @return (str list): the GATEWAY setting + """ + raise NotImplementedError('Ipv4 GATEWAY') + + +class StaticIpv6Base: + """Define what are expected of a static IPv6 setting + The following properties should be provided in an implementation class. + """ + + @property + def ip(self): + """ + Retrieves the Ipv6 address + @return (str): the IPv6ADDR setting + """ + raise NotImplementedError('Ipv6 Address') + + @property + def netmask(self): + """ + Retrieves the Ipv6 NETMASK setting + @return (str): the IPv6NETMASK setting + """ + raise NotImplementedError('Ipv6 NETMASK') + + @property + def gateway(self): + """ + Retrieves the Ipv6 GATEWAY setting + @return (str): the IPv6GATEWAY setting + """ + raise NotImplementedError('Ipv6 GATEWAY') diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 9b2f5ed5..c74a7ae2 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -31,7 +31,8 @@ LOG = logging.getLogger(__name__) DEF_SSHD_CFG = "/etc/ssh/sshd_config" # taken from openssh source key.c/key_type_from_name -VALID_KEY_TYPES = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", +VALID_KEY_TYPES = ( + "rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com", "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com", "ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com", diff --git a/cloudinit/stages.py b/cloudinit/stages.py index d28e765b..3fbb4443 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -43,9 +43,11 @@ from cloudinit import distros from cloudinit import helpers from cloudinit import importer from cloudinit import log as logging +from cloudinit import net from cloudinit import sources from cloudinit import type_utils from cloudinit import util +from cloudinit.reporting import events LOG = logging.getLogger(__name__) @@ -53,7 +55,7 @@ NULL_DATA_SOURCE = None class Init(object): - def __init__(self, ds_deps=None): + def __init__(self, ds_deps=None, reporter=None): if ds_deps is not None: self.ds_deps = ds_deps else: @@ -65,6 +67,12 @@ class Init(object): # Changed only when a fetch occurs self.datasource = NULL_DATA_SOURCE + if reporter is None: + reporter = events.ReportEventStack( + name="init-reporter", description="init-desc", + reporting_enabled=False) + self.reporter = reporter + def _reset(self, reset_ds=False): # Recreated on access self._cfg = None @@ -133,7 +141,7 @@ class Init(object): ] return initial_dirs - def purge_cache(self, rm_instance_lnk=True): + def purge_cache(self, rm_instance_lnk=False): rm_list = [] rm_list.append(self.paths.boot_finished) if rm_instance_lnk: @@ -186,40 +194,12 @@ class Init(object): # We try to restore from a current link and static path # by using the instance link, if purge_cache was called # the file wont exist. - pickled_fn = self.paths.get_ipath_cur('obj_pkl') - pickle_contents = None - try: - pickle_contents = util.load_file(pickled_fn, decode=False) - except Exception as e: - if os.path.isfile(pickled_fn): - LOG.warn("failed loading pickle in %s: %s" % (pickled_fn, e)) - pass - - # This is expected so just return nothing - # successfully loaded... - if not pickle_contents: - return None - try: - return pickle.loads(pickle_contents) - except Exception: - util.logexc(LOG, "Failed loading pickled blob from %s", pickled_fn) - return None + return _pkl_load(self.paths.get_ipath_cur('obj_pkl')) def _write_to_cache(self): if self.datasource is NULL_DATA_SOURCE: return False - pickled_fn = self.paths.get_ipath_cur("obj_pkl") - try: - pk_contents = pickle.dumps(self.datasource) - except Exception: - util.logexc(LOG, "Failed pickling datasource %s", self.datasource) - return False - try: - util.write_file(pickled_fn, pk_contents, omode="wb", mode=0o400) - except Exception: - util.logexc(LOG, "Failed pickling datasource to %s", pickled_fn) - return False - return True + return _pkl_store(self.datasource, self.paths.get_ipath_cur("obj_pkl")) def _get_datasources(self): # Any config provided??? @@ -231,13 +211,30 @@ class Init(object): cfg_list = self.cfg.get('datasource_list') or [] return (cfg_list, pkg_list) - def _get_data_source(self): + def _get_data_source(self, existing): if self.datasource is not NULL_DATA_SOURCE: return self.datasource - ds = self._restore_from_cache() - if ds: - LOG.debug("Restored from cache, datasource: %s", ds) + + with events.ReportEventStack( + name="check-cache", + description="attempting to read from cache [%s]" % existing, + parent=self.reporter) as myrep: + ds = self._restore_from_cache() + if ds and existing == "trust": + myrep.description = "restored from cache: %s" % ds + elif ds and existing == "check": + if (hasattr(ds, 'check_instance_id') and + ds.check_instance_id(self.cfg)): + myrep.description = "restored from checked cache: %s" % ds + else: + myrep.description = "cache invalid in datasource: %s" % ds + ds = None + else: + myrep.description = "no cache found" + LOG.debug(myrep.description) + if not ds: + util.del_file(self.paths.instance_link) (cfg_list, pkg_list) = self._get_datasources() # Deep copy so that user-data handlers can not modify # (which will affect user-data handlers down the line...) @@ -246,7 +243,7 @@ class Init(object): self.paths, copy.deepcopy(self.ds_deps), cfg_list, - pkg_list) + pkg_list, self.reporter) LOG.info("Loaded datasource %s - %s", dsname, ds) self.datasource = ds # Ensure we adjust our path members datasource @@ -317,8 +314,8 @@ class Init(object): self._reset() return iid - def fetch(self): - return self._get_data_source() + def fetch(self, existing="check"): + return self._get_data_source(existing=existing) def instancify(self): return self._reflect_cur_instance() @@ -327,7 +324,8 @@ class Init(object): # Form the needed options to cloudify our members return cloud.Cloud(self.datasource, self.paths, self.cfg, - self.distro, helpers.Runners(self.paths)) + self.distro, helpers.Runners(self.paths), + reporter=self.reporter) def update(self): if not self._write_to_cache(): @@ -493,8 +491,14 @@ class Init(object): def consume_data(self, frequency=PER_INSTANCE): # Consume the userdata first, because we need want to let the part # handlers run first (for merging stuff) - self._consume_userdata(frequency) - self._consume_vendordata(frequency) + with events.ReportEventStack("consume-user-data", + "reading and applying user-data", + parent=self.reporter): + self._consume_userdata(frequency) + with events.ReportEventStack("consume-vendor-data", + "reading and applying vendor-data", + parent=self.reporter): + self._consume_vendordata(frequency) # Perform post-consumption adjustments so that # modules that run during the init stage reflect @@ -565,13 +569,53 @@ class Init(object): # Run the handlers self._do_handlers(user_data_msg, c_handlers_list, frequency) + def _find_networking_config(self): + disable_file = os.path.join( + self.paths.get_cpath('data'), 'upgraded-network') + if os.path.exists(disable_file): + return (None, disable_file) + + cmdline_cfg = ('cmdline', net.read_kernel_cmdline_config()) + dscfg = ('ds', None) + if self.datasource and hasattr(self.datasource, 'network_config'): + dscfg = ('ds', self.datasource.network_config) + sys_cfg = ('system_cfg', self.cfg.get('network')) + + for loc, ncfg in (cmdline_cfg, dscfg, sys_cfg): + if net.is_disabled_cfg(ncfg): + LOG.debug("network config disabled by %s", loc) + return (None, loc) + if ncfg: + return (ncfg, loc) + return (net.generate_fallback_config(), "fallback") + + def apply_network_config(self): + netcfg, src = self._find_networking_config() + if netcfg is None: + LOG.info("network config is disabled by %s", src) + return + + LOG.info("Applying network configuration from %s: %s", src, netcfg) + try: + return self.distro.apply_network_config(netcfg) + except NotImplementedError: + LOG.warn("distro '%s' does not implement apply_network_config. " + "networking may not be configured properly." % + self.distro) + return + class Modules(object): - def __init__(self, init, cfg_files=None): + def __init__(self, init, cfg_files=None, reporter=None): self.init = init self.cfg_files = cfg_files # Created on first use self._cached_cfg = None + if reporter is None: + reporter = events.ReportEventStack( + name="module-reporter", description="module-desc", + reporting_enabled=False) + self.reporter = reporter @property def cfg(self): @@ -628,7 +672,7 @@ class Modules(object): else: raise TypeError(("Failed to read '%s' item in config," " unknown type %s") % - (item, type_utils.obj_name(item))) + (item, type_utils.obj_name(item))) return module_list def _fixup_modules(self, raw_mods): @@ -681,7 +725,19 @@ class Modules(object): which_ran.append(name) # This name will affect the semaphore name created run_name = "config-%s" % (name) - cc.run(run_name, mod.handle, func_args, freq=freq) + + desc = "running %s with frequency %s" % (run_name, freq) + myrep = events.ReportEventStack( + name=run_name, description=desc, parent=self.reporter) + + with myrep: + ran, _r = cc.run(run_name, mod.handle, func_args, + freq=freq) + if ran: + myrep.message = "%s ran successfully" % run_name + else: + myrep.message = "%s previously ran" % run_name + except Exception as e: util.logexc(LOG, "Running module %s (%s) failed", name, mod) failures.append((name, e)) @@ -723,8 +779,8 @@ class Modules(object): if skipped: LOG.info("Skipping modules %s because they are not verified " - "on distro '%s'. To run anyway, add them to " - "'unverified_modules' in config.", skipped, d_name) + "on distro '%s'. To run anyway, add them to " + "'unverified_modules' in config.", skipped, d_name) if forced: LOG.info("running unverified_modules: %s", forced) @@ -749,3 +805,36 @@ def fetch_base_config(): base_cfgs.append(default_cfg) return util.mergemanydict(base_cfgs) + + +def _pkl_store(obj, fname): + try: + pk_contents = pickle.dumps(obj) + except Exception: + util.logexc(LOG, "Failed pickling datasource %s", obj) + return False + try: + util.write_file(fname, pk_contents, omode="wb", mode=0o400) + except Exception: + util.logexc(LOG, "Failed pickling datasource to %s", fname) + return False + return True + + +def _pkl_load(fname): + pickle_contents = None + try: + pickle_contents = util.load_file(fname, decode=False) + except Exception as e: + if os.path.isfile(fname): + LOG.warn("failed loading pickle in %s: %s" % (fname, e)) + pass + + # This is allowed so just return nothing successfully loaded... + if not pickle_contents: + return None + try: + return pickle.loads(pickle_contents) + except Exception: + util.logexc(LOG, "Failed loading pickled blob from %s", fname) + return None diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 0e65f431..936f7da5 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -20,12 +20,16 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. -import time - +import json +import os +import requests import six +import time -import requests +from email.utils import parsedate +from functools import partial from requests import exceptions +import oauthlib.oauth1 as oauth1 from six.moves.urllib.parse import ( urlparse, urlunparse, @@ -147,13 +151,14 @@ class UrlResponse(object): class UrlError(IOError): - def __init__(self, cause, code=None, headers=None): + def __init__(self, cause, code=None, headers=None, url=None): IOError.__init__(self, str(cause)) self.cause = cause self.code = code self.headers = headers if self.headers is None: self.headers = {} + self.url = url def _get_ssl_args(url, ssl_details): @@ -206,10 +211,14 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, manual_tries = 1 if retries: manual_tries = max(int(retries) + 1, 1) - if not headers: - headers = { - 'User-Agent': 'Cloud-Init/%s' % (version.version_string()), - } + + def_headers = { + 'User-Agent': 'Cloud-Init/%s' % (version.version_string()), + } + if headers: + def_headers.update(headers) + headers = def_headers + if not headers_cb: def _cb(url): return headers @@ -243,18 +252,21 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, # attrs return UrlResponse(r) except exceptions.RequestException as e: - if (isinstance(e, (exceptions.HTTPError)) - and hasattr(e, 'response') # This appeared in v 0.10.8 - and hasattr(e.response, 'status_code')): + if (isinstance(e, (exceptions.HTTPError)) and + hasattr(e, 'response') and # This appeared in v 0.10.8 + hasattr(e.response, 'status_code')): excps.append(UrlError(e, code=e.response.status_code, - headers=e.response.headers)) + headers=e.response.headers, + url=url)) else: - excps.append(UrlError(e)) + excps.append(UrlError(e, url=url)) if SSL_ENABLED and isinstance(e, exceptions.SSLError): # ssl exceptions are not going to get fixed by waiting a # few seconds break - if exception_cb and not exception_cb(req_args.copy(), excps[-1]): + if exception_cb and exception_cb(req_args.copy(), excps[-1]): + # if an exception callback was given it should return None + # a true-ish value means to break and re-raise the exception break if i + 1 < manual_tries and sec_between > 0: LOG.debug("Please wait %s seconds while we wait to try again", @@ -333,11 +345,11 @@ def wait_for_url(urls, max_wait=None, timeout=None, if not response.contents: reason = "empty response [%s]" % (response.code) url_exc = UrlError(ValueError(reason), code=response.code, - headers=response.headers) + headers=response.headers, url=url) elif not response.ok(): reason = "bad status code [%s]" % (response.code) url_exc = UrlError(ValueError(reason), code=response.code, - headers=response.headers) + headers=response.headers, url=url) else: return url except UrlError as e: @@ -368,3 +380,129 @@ def wait_for_url(urls, max_wait=None, timeout=None, time.sleep(sleep_time) return False + + +class OauthUrlHelper(object): + def __init__(self, consumer_key=None, token_key=None, + token_secret=None, consumer_secret=None, + skew_data_file="/run/oauth_skew.json"): + self.consumer_key = consumer_key + self.consumer_secret = consumer_secret or "" + self.token_key = token_key + self.token_secret = token_secret + self.skew_data_file = skew_data_file + self._do_oauth = True + self.skew_change_limit = 5 + required = (self.token_key, self.token_secret, self.consumer_key) + if not any(required): + self._do_oauth = False + elif not all(required): + raise ValueError("all or none of token_key, token_secret, or " + "consumer_key can be set") + + old = self.read_skew_file() + self.skew_data = old or {} + + def read_skew_file(self): + if self.skew_data_file and os.path.isfile(self.skew_data_file): + with open(self.skew_data_file, mode="r") as fp: + return json.load(fp) + return None + + def update_skew_file(self, host, value): + # this is not atomic + if not self.skew_data_file: + return + cur = self.read_skew_file() + if cur is None: + cur = {} + cur[host] = value + with open(self.skew_data_file, mode="w") as fp: + fp.write(json.dumps(cur)) + + def exception_cb(self, msg, exception): + if not (isinstance(exception, UrlError) and + (exception.code == 403 or exception.code == 401)): + return + + if 'date' not in exception.headers: + LOG.warn("Missing header 'date' in %s response", exception.code) + return + + date = exception.headers['date'] + try: + remote_time = time.mktime(parsedate(date)) + except Exception as e: + LOG.warn("Failed to convert datetime '%s': %s", date, e) + return + + skew = int(remote_time - time.time()) + host = urlparse(exception.url).netloc + old_skew = self.skew_data.get(host, 0) + if abs(old_skew - skew) > self.skew_change_limit: + self.update_skew_file(host, skew) + LOG.warn("Setting oauth clockskew for %s to %d", host, skew) + self.skew_data[host] = skew + + return + + def headers_cb(self, url): + if not self._do_oauth: + return {} + + timestamp = None + host = urlparse(url).netloc + if self.skew_data and host in self.skew_data: + timestamp = int(time.time()) + self.skew_data[host] + + return oauth_headers( + url=url, consumer_key=self.consumer_key, + token_key=self.token_key, token_secret=self.token_secret, + consumer_secret=self.consumer_secret, timestamp=timestamp) + + def _wrapped(self, wrapped_func, args, kwargs): + kwargs['headers_cb'] = partial( + self._headers_cb, kwargs.get('headers_cb')) + kwargs['exception_cb'] = partial( + self._exception_cb, kwargs.get('exception_cb')) + return wrapped_func(*args, **kwargs) + + def wait_for_url(self, *args, **kwargs): + return self._wrapped(wait_for_url, args, kwargs) + + def readurl(self, *args, **kwargs): + return self._wrapped(readurl, args, kwargs) + + def _exception_cb(self, extra_exception_cb, msg, exception): + ret = None + try: + if extra_exception_cb: + ret = extra_exception_cb(msg, exception) + finally: + self.exception_cb(msg, exception) + return ret + + def _headers_cb(self, extra_headers_cb, url): + headers = {} + if extra_headers_cb: + headers = extra_headers_cb(url) + headers.update(self.headers_cb(url)) + return headers + + +def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, + timestamp=None): + if timestamp: + timestamp = str(timestamp) + else: + timestamp = None + + client = oauth1.Client( + consumer_key, + client_secret=consumer_secret, + resource_owner_key=token_key, + resource_owner_secret=token_secret, + signature_method=oauth1.SIGNATURE_PLAINTEXT, + timestamp=timestamp) + uri, signed_headers, body = client.sign(url) + return signed_headers diff --git a/cloudinit/util.py b/cloudinit/util.py index 02ba654a..0d21e11b 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -76,7 +76,11 @@ FALSE_STRINGS = ('off', '0', 'no', 'false') # Helper utils to see if running in a container -CONTAINER_TESTS = ('running-in-container', 'lxc-is-container') +CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'], + ['running-in-container'], + ['lxc-is-container']) + +PROC_CMDLINE = None def decode_binary(blob, encoding='utf-8'): @@ -610,7 +614,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): def make_url(scheme, host, port=None, - path='', params='', query='', fragment=''): + path='', params='', query='', fragment=''): pieces = [] pieces.append(scheme or '') @@ -782,7 +786,8 @@ def read_file_or_url(url, timeout=5, retries=10, code = e.errno if e.errno == errno.ENOENT: code = url_helper.NOT_FOUND - raise url_helper.UrlError(cause=e, code=code, headers=None) + raise url_helper.UrlError(cause=e, code=code, headers=None, + url=url) return url_helper.FileResponse(file_path, contents=contents) else: return url_helper.readurl(url, @@ -801,8 +806,8 @@ def load_yaml(blob, default=None, allowed=(dict,)): blob = decode_binary(blob) try: LOG.debug("Attempting to load yaml from string " - "of length %s with allowed root types %s", - len(blob), allowed) + "of length %s with allowed root types %s", + len(blob), allowed) converted = safeyaml.load(blob) if not isinstance(converted, allowed): # Yes this will just be caught, but thats ok for now... @@ -875,7 +880,7 @@ def read_conf_with_confd(cfgfile): if not isinstance(confd, six.string_types): raise TypeError(("Config file %s contains 'conf_d' " "with non-string type %s") % - (cfgfile, type_utils.obj_name(confd))) + (cfgfile, type_utils.obj_name(confd))) else: confd = str(confd).strip() elif os.path.isdir("%s.d" % cfgfile): @@ -1038,7 +1043,8 @@ def is_resolvable(name): for iname in badnames: try: result = socket.getaddrinfo(iname, None, 0, 0, - socket.SOCK_STREAM, socket.AI_CANONNAME) + socket.SOCK_STREAM, + socket.AI_CANONNAME) badresults[iname] = [] for (_fam, _stype, _proto, cname, sockaddr) in result: badresults[iname].append("%s: %s" % (cname, sockaddr[0])) @@ -1106,7 +1112,7 @@ def close_stdin(): def find_devs_with(criteria=None, oformat='device', - tag=None, no_cache=False, path=None): + tag=None, no_cache=False, path=None): """ find devices matching given criteria (via blkid) criteria can be *one* of: @@ -1187,12 +1193,27 @@ def load_file(fname, read_cb=None, quiet=False, decode=True): def get_cmdline(): if 'DEBUG_PROC_CMDLINE' in os.environ: - cmdline = os.environ["DEBUG_PROC_CMDLINE"] + return os.environ["DEBUG_PROC_CMDLINE"] + + global PROC_CMDLINE + if PROC_CMDLINE is not None: + return PROC_CMDLINE + + if is_container(): + try: + contents = load_file("/proc/1/cmdline") + # replace nulls with space and drop trailing null + cmdline = contents.replace("\x00", " ")[:-1] + except Exception as e: + LOG.warn("failed reading /proc/1/cmdline: %s", e) + cmdline = "" else: try: cmdline = load_file("/proc/cmdline").strip() except: cmdline = "" + + PROC_CMDLINE = cmdline return cmdline @@ -1479,8 +1500,8 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True): mounted = mounts() with tempdir() as tmpd: umount = False - if device in mounted: - mountpoint = mounted[device]['mountpoint'] + if os.path.realpath(device) in mounted: + mountpoint = mounted[os.path.realpath(device)]['mountpoint'] else: failure_reason = None for mtype in mtypes: @@ -1565,7 +1586,7 @@ def uptime(): try: if os.path.exists("/proc/uptime"): method = '/proc/uptime' - contents = load_file("/proc/uptime").strip() + contents = load_file("/proc/uptime") if contents: uptime_str = contents.split()[0] else: @@ -1625,7 +1646,7 @@ def write_file(filename, content, mode=0o644, omode="wb"): content = decode_binary(content) write_type = 'characters' LOG.debug("Writing to %s - %s: [%s] %s %s", - filename, omode, mode, len(content), write_type) + filename, omode, mode, len(content), write_type) with SeLinuxGuard(path=filename): with open(filename, omode) as fh: fh.write(content) @@ -1748,7 +1769,7 @@ def is_container(): try: # try to run a helper program. if it returns true/zero # then we're inside a container. otherwise, no - subp([helper]) + subp(helper) return True except (IOError, OSError): pass @@ -2136,15 +2157,21 @@ def _read_dmi_syspath(key): LOG.debug("did not find %s", dmi_key_path) return None - key_data = load_file(dmi_key_path) + key_data = load_file(dmi_key_path, decode=False) if not key_data: LOG.debug("%s did not return any data", dmi_key_path) return None - LOG.debug("dmi data %s returned %s", dmi_key_path, key_data) - return key_data.strip() + # uninitialized dmi values show as all \xff and /sys appends a '\n'. + # in that event, return a string of '.' in the same length. + if key_data == b'\xff' * (len(key_data) - 1) + b'\n': + key_data = b"" - except Exception as e: + str_data = key_data.decode('utf8').strip() + LOG.debug("dmi data %s returned %s", dmi_key_path, str_data) + return str_data + + except Exception: logexc(LOG, "failed read of %s", dmi_key_path) return None @@ -2158,6 +2185,9 @@ def _call_dmidecode(key, dmidecode_path): cmd = [dmidecode_path, "--string", key] (result, _err) = subp(cmd) LOG.debug("dmidecode returned '%s' for '%s'", result, key) + result = result.strip() + if result.replace(".", "") == "": + return "" return result except (IOError, OSError) as _err: LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err.message) |