From 53f1938a1c33b4d9e333101d1d614803373a6bc5 Mon Sep 17 00:00:00 2001 From: Harm Weites Date: Fri, 6 Dec 2013 21:25:04 +0000 Subject: new: FreeBSD module to support cloud-init on the FBSD10 platform. In its current form its still missing some modules though. Supported: -SSH-keys -growpart -growfs -adduser -powerstate --- cloudinit/config/cc_growpart.py | 40 +++++- cloudinit/config/cc_power_state_change.py | 32 ++++- cloudinit/config/cc_resizefs.py | 7 +- cloudinit/distros/__init__.py | 1 + cloudinit/distros/freebsd.py | 208 ++++++++++++++++++++++++++++++ cloudinit/netinfo.py | 46 ++++++- cloudinit/sources/__init__.py | 2 +- cloudinit/util.py | 61 +++++++-- 8 files changed, 370 insertions(+), 27 deletions(-) create mode 100644 cloudinit/distros/freebsd.py (limited to 'cloudinit') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 0dd92a46..81de4c37 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -22,6 +22,7 @@ import os import os.path import re import stat +import sys from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS @@ -137,6 +138,35 @@ class ResizeGrowPart(object): return (before, get_size(partdev)) +class ResizeGpart(object): + def available(self): + if not os.path.exists('/usr/local/sbin/gpart'): + return False + return True + + def resize(self, diskdev, partnum, partdev): + """ + GPT disks store metadata at the beginning (primary) and at the + end (secondary) of the disk. When launching an image with a + larger disk compared to the original image, the secondary copy + is lost. Thus, the metadata will be marked CORRUPT, and need to + be recovered. + """ + try: + util.subp(["gpart", "recover", diskdev]) + except util.ProcessExecutionError as e: + if e.exit_code != 0: + util.logexc(LOG, "Failed: gpart recover %s", diskdev) + raise ResizeFailedException(e) + + before = get_size(partdev) + try: + util.subp(["gpart", "resize", "-i", partnum, diskdev]) + except util.ProcessExecutionError as e: + util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev) + raise ResizeFailedException(e) + + return (before, get_size(partdev)) def get_size(filename): fd = os.open(filename, os.O_RDONLY) @@ -156,6 +186,12 @@ def device_part_info(devpath): bname = os.path.basename(rpath) syspath = "/sys/class/block/%s" % bname + # FreeBSD doesn't know of sysfs so just get everything we need from + # the device, like /dev/vtbd0p2. + if sys.platform.startswith('freebsd'): + m = re.search('^(/dev/.+)p([0-9])$', devpath) + return (m.group(1), m.group(2)) + if not os.path.exists(syspath): raise ValueError("%s had no syspath (%s)" % (devpath, syspath)) @@ -206,7 +242,7 @@ def resize_devices(resizer, devices): "stat of '%s' failed: %s" % (blockdev, e),)) continue - if not stat.S_ISBLK(statret.st_mode): + if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode): info.append((devent, RESIZE.SKIPPED, "device '%s' not a block device" % blockdev,)) continue @@ -281,4 +317,4 @@ def handle(_name, cfg, _cloud, log, _args): # LP: 1212444 FIXME re-order and favor ResizeParted #RESIZERS = (('growpart', ResizeGrowPart),) -RESIZERS = (('growpart', ResizeGrowPart), ('parted', ResizeParted)) +RESIZERS = (('growpart', ResizeGrowPart), ('parted', ResizeParted), ('gpart', ResizeGpart)) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index e3150808..46ce5ba5 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -23,12 +23,34 @@ import errno import os import re import subprocess +import sys import time frequency = PER_INSTANCE EXIT_FAIL = 254 +# +# Returns the cmdline for the given process id. +# + +def givecmdline(pid): + # Check if this pid still exists by sending it the harmless 0 signal. + try: + os.kill(pid, 0) + except OSError: + return None + else: + # Example output from procstat -c 16357 + # PID COMM ARGS + # 1 init /bin/init -- + if sys.platform.startswith('freebsd'): + (output, _err) = util.subp(['procstat', '-c', str(pid)]) + line = output.splitlines()[1] + m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line) + return m.group(2) + else: + return util.load_file("/proc/%s/cmdline" % pid) def handle(_name, cfg, _cloud, log, _args): @@ -42,8 +64,8 @@ def handle(_name, cfg, _cloud, log, _args): return mypid = os.getpid() - cmdline = util.load_file("/proc/%s/cmdline" % mypid) + cmdline = givecmdline(mypid) if not cmdline: log.warn("power_state: failed to get cmdline of current process") return @@ -119,8 +141,6 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args): msg = None end_time = time.time() + timeout - cmdline_f = "/proc/%s/cmdline" % pid - def fatal(msg): if log: log.warn(msg) @@ -134,16 +154,14 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args): break try: - cmdline = "" - with open(cmdline_f) as fp: - cmdline = fp.read() + cmdline = givecmdline(pid) if cmdline != pidcmdline: msg = "cmdline changed for %s [now: %s]" % (pid, cmdline) break except IOError as ioerr: if ioerr.errno in known_errnos: - msg = "pidfile '%s' gone [%d]" % (cmdline_f, ioerr.errno) + msg = "pidfile gone [%d]" % ioerr.errno else: fatal("IOError during wait: %s" % ioerr) break diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 56040fdd..95bc7a4e 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -39,6 +39,10 @@ def _resize_ext(mount_point, devpth): # pylint: disable=W0613 def _resize_xfs(mount_point, devpth): # pylint: disable=W0613 return ('xfs_growfs', devpth) + +def _resize_ufs(mount_point, devpth): # pylint: disable=W0613 + return ('growfs', devpth) + # Do not use a dictionary as these commands should be able to be used # for multiple filesystem types if possible, e.g. one command for # ext2, ext3 and ext4. @@ -46,6 +50,7 @@ RESIZE_FS_PREFIXES_CMDS = [ ('btrfs', _resize_btrfs), ('ext', _resize_ext), ('xfs', _resize_xfs), + ('ufs', _resize_ufs), ] NOBLOCK = "noblock" @@ -91,7 +96,7 @@ def handle(name, cfg, _cloud, log, args): raise exc return - if not stat.S_ISBLK(statret.st_mode): + if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode): if util.is_container(): log.debug("device '%s' not a block device in container." " cannot resize: %s" % (devpth, info)) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 74e95797..46b67fa3 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -39,6 +39,7 @@ from cloudinit.distros.parsers import hosts OSFAMILIES = { 'debian': ['debian', 'ubuntu'], 'redhat': ['fedora', 'rhel'], + 'freebsd': ['freebsd'], 'suse': ['sles'] } diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py new file mode 100644 index 00000000..2d6cd924 --- /dev/null +++ b/cloudinit/distros/freebsd.py @@ -0,0 +1,208 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Canonical Ltd. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Scott Moser +# Author: Juerg Haefliger +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from cloudinit import distros +from cloudinit import helpers +from cloudinit import log as logging +from cloudinit import netinfo +from cloudinit import ssh_util +from cloudinit import util + +from cloudinit.settings import PER_INSTANCE + +LOG = logging.getLogger(__name__) + +class Distro(distros.Distro): + def __init__(self, name, cfg, paths): + distros.Distro.__init__(self, name, cfg, paths) + # This will be used to restrict certain + # calls from repeatly happening (when they + # should only happen say once per instance...) + self._runner = helpers.Runners(paths) + self.osfamily = 'freebsd' + + def updatercconf(self, key, value): + LOG.debug("updatercconf: %s => %s" % (key, value)) + conf = {} + configchanged = False + with open("/etc/rc.conf") as file: + for line in file: + tok = line.split('=') + # TODO: Handle keys with spaces, make this a bit more robust. + if tok[0] == key: + if tok[1] != value: + conf[tok[0]] = value + LOG.debug("[rc.conf]: Value %s for key %s needs to be changed" % (value, key)) + configchanged = True + else: + conf[tok[0]] = tok[1].rstrip() + + if configchanged: + LOG.debug("Writing new /etc/rc.conf file") + with open ('/etc/rc.conf', 'w') as file: + for keyval in conf.items(): + file.write("%s=%s\n" % keyval) + + def _read_hostname(): + return + + def _read_system_hostname(): + return + + def _select_hostname(self, hostname, fqdn): + if not hostname: + return fqdn + return hostname + + def _write_hostname(self, your_hostname, out_fn): + self.updatercconf('hostname', your_hostname) + + def create_group(self, name, members): + group_add_cmd = ['pw', '-n', name] + if util.is_group(name): + LOG.warn("Skipping creation of existing group '%s'" % name) + else: + try: + util.subp(group_add_cmd) + LOG.info("Created new group %s" % name) + except Exception: + util.logexc("Failed to create group %s", name) + + if len(members) > 0: + for member in members: + if not util.is_user(member): + LOG.warn("Unable to add group member '%s' to group '%s'" + "; user does not exist.", member, name) + continue + util.subp(['pw', 'usermod', '-n', name, '-G', member]) + LOG.info("Added user '%s' to group '%s'" % (member, name)) + + def add_user(self, name, **kwargs): + if util.is_user(name): + LOG.info("User %s already exists, skipping." % name) + return False + + adduser_cmd = ['pw', 'useradd', '-n', name] + log_adduser_cmd = ['pw', 'useradd', '-n', name] + + adduser_opts = { + "homedir": '-d', + "gecos": '-c', + "primary_group": '-g', + "groups": '-G', + "passwd": '-h', + "shell": '-s', + "inactive": '-E', + } + adduser_flags = { + "no_user_group": '--no-user-group', + "system": '--system', + "no_log_init": '--no-log-init', + } + + redact_opts = ['passwd'] + + for key, val in kwargs.iteritems(): + if key in adduser_opts and val and isinstance(val, str): + adduser_cmd.extend([adduser_opts[key], val]) + + # Redact certain fields from the logs + if key in redact_opts: + log_adduser_cmd.extend([adduser_opts[key], 'REDACTED']) + else: + log_adduser_cmd.extend([adduser_opts[key], val]) + + elif key in adduser_flags and val: + adduser_cmd.append(adduser_flags[key]) + log_adduser_cmd.append(adduser_flags[key]) + + if 'no_create_home' in kwargs or 'system' in kwargs: + adduser_cmd.append('-d/nonexistent') + log_adduser_cmd.append('-d/nonexistent') + else: + adduser_cmd.append('-d/usr/home/%s' % name) + adduser_cmd.append('-m') + log_adduser_cmd.append('-d/usr/home/%s' % name) + log_adduser_cmd.append('-m') + + # Run the command + LOG.info("Adding user %s", name) + try: + util.subp(adduser_cmd, logstring=log_adduser_cmd) + except Exception as e: + util.logexc(LOG, "Failed to create user %s", name) + raise e + + # TODO: + def set_passwd(self, name, **kwargs): + return False + + def lock_passwd(self, name): + try: + util.subp(['pw', 'usermod', name, '-h', '-']) + except Exception as e: + util.logexc(LOG, "Failed to lock user %s", name) + raise e + + # TODO: + def write_sudo_rules(self, name, rules, sudo_file=None): + LOG.debug("[write_sudo_rules] Name: %s" % name) + + def create_user(self, name, **kwargs): + self.add_user(name, **kwargs) + + # Set password if plain-text password provided and non-empty + if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']: + self.set_passwd(name, kwargs['plain_text_passwd']) + + # Default locking down the account. 'lock_passwd' defaults to True. + # lock account unless lock_password is False. + if kwargs.get('lock_passwd', True): + self.lock_passwd(name) + + # Configure sudo access + if 'sudo' in kwargs: + self.write_sudo_rules(name, kwargs['sudo']) + + # Import SSH keys + if 'ssh_authorized_keys' in kwargs: + keys = set(kwargs['ssh_authorized_keys']) or [] + ssh_util.setup_user_keys(keys, name, options=None) + + def _write_network(self, settings): + return + + def apply_locale(): + return + + def install_packages(): + return + + def package_command(): + return + + def set_timezone(): + return + + def update_package_sources(): + return + diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index feba5a62..f5949122 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -34,6 +34,7 @@ def netdev_info(empty=""): continue if line[0] not in ("\t", " "): curdev = line.split()[0] + # TODO: up/down detection fails on FreeBSD devs[curdev] = {"up": False} for field in fields: devs[curdev][field] = "" @@ -46,21 +47,32 @@ def netdev_info(empty=""): fieldpost = "6" for i in range(len(toks)): - if toks[i] == "hwaddr": + if toks[i] == "hwaddr" or toks[i] == "ether": try: devs[curdev]["hwaddr"] = toks[i + 1] except IndexError: pass - for field in ("addr", "bcast", "mask"): + + """ + Couple the different items we're interested in with the correct field + since FreeBSD/CentOS/Fedora differ in the output. + """ + + ifconfigfields = { + "addr:":"addr", "inet":"addr", + "bcast:":"bcast", "broadcast":"bcast", + "mask:":"mask", "netmask":"mask" + } + for origfield, field in ifconfigfields.items(): target = "%s%s" % (field, fieldpost) if devs[curdev].get(target, ""): continue - if toks[i] == "%s:" % field: + if toks[i] == "%s" % origfield: try: devs[curdev][target] = toks[i + 1] except IndexError: pass - elif toks[i].startswith("%s:" % field): + elif toks[i].startswith("%s" % origfield): devs[curdev][target] = toks[i][len(field) + 1:] if empty != "": @@ -71,17 +83,38 @@ def netdev_info(empty=""): return devs +# +# Use netstat instead of route since that produces more portable output. +# def route_info(): - (route_out, _err) = util.subp(["route", "-n"]) + (route_out, _err) = util.subp(["netstat", "-rn"]) routes = [] entries = route_out.splitlines()[1:] for line in entries: if not line: continue toks = line.split() - if len(toks) < 8 or toks[0] == "Kernel" or toks[0] == "Destination": + + """ + FreeBSD shows 6 items in the routing table: + Destination Gateway Flags Refs Use Netif Expire + default 10.65.0.1 UGS 0 34920 vtnet0 + + Linux netstat shows 2 more: + Destination Gateway Genmask Flags MSS Window irtt Iface + 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0 + """ + + if len(toks) < 6 or toks[0] == "Kernel" or toks[0] == "Destination" or toks[0] == "Internet" or toks[0] == "Internet6" or toks[0] == "Routing": continue + + if len(toks) < 8: + toks.append("-") + toks.append("-") + toks[7] = toks[5] + toks[5] = "-" + entry = { 'destination': toks[0], 'gateway': toks[1], @@ -92,6 +125,7 @@ def route_info(): 'use': toks[6], 'iface': toks[7], } + routes.append(entry) return routes diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 7dc1fbde..d799a211 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -119,7 +119,7 @@ class DataSource(object): # when the kernel named them 'vda' or 'xvda' # we want to return the correct value for what will actually # exist in this instance - mappings = {"sd": ("vd", "xvd")} + mappings = {"sd": ("vd", "xvd", "vtb")} for (nfrom, tlist) in mappings.iteritems(): if not short_name.startswith(nfrom): continue diff --git a/cloudinit/util.py b/cloudinit/util.py index a8ddb390..8b77b163 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -26,6 +26,7 @@ from StringIO import StringIO import contextlib import copy as obj_copy +import ctypes import errno import glob import grp @@ -36,6 +37,7 @@ import os.path import platform import pwd import random +import re import shutil import socket import stat @@ -1300,11 +1302,25 @@ def mounts(): mounted = {} try: # Go through mounts to see what is already mounted - mount_locs = load_file("/proc/mounts").splitlines() + if os.path.exists("/proc/mounts"): + mount_locs = load_file("/proc/mounts").splitlines() + method = 'proc' + else: + (mountoutput, _err) = subp("mount") + mount_locs = mountoutput.splitlines() + method = 'mount' for mpline in mount_locs: - # Format at: man fstab + # Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered) + # FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates) try: - (dev, mp, fstype, opts, _freq, _passno) = mpline.split() + if method == 'proc' and len(mpline) == 6: + (dev, mp, fstype, opts, _freq, _passno) = mpline.split() + elif method == 'mount': + m = re.search('^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', mpline) + dev = m.group(1) + mp = m.group(2) + fstype = m.group(3) + opts = m.group(4) except: continue # If the name of the mount point contains spaces these @@ -1315,9 +1331,9 @@ def mounts(): 'mountpoint': mp, 'opts': opts, } - LOG.debug("Fetched %s mounts from %s", mounted, "/proc/mounts") + LOG.debug("Fetched %s mounts from %s", mounted, method) except (IOError, OSError): - logexc(LOG, "Failed fetching mount points from /proc/mounts") + logexc(LOG, "Failed fetching mount points") return mounted @@ -1403,11 +1419,22 @@ def time_rfc2822(): def uptime(): uptime_str = '??' try: - contents = load_file("/proc/uptime").strip() - if contents: - uptime_str = contents.split()[0] + if os.path.exists("/proc/uptime"): + contents = load_file("/proc/uptime").strip() + if contents: + uptime_str = contents.split()[0] + else: + libc = ctypes.CDLL('/lib/libc.so.7') + size = ctypes.c_size_t() + buf = ctypes.c_int() + size.value = ctypes.sizeof(buf) + libc.sysctlbyname("kern.boottime", ctypes.byref(buf), ctypes.byref(size), None, 0) + now = time.time() + bootup = buf.value + uptime_str = now - bootup + except: - logexc(LOG, "Unable to read uptime from /proc/uptime") + logexc(LOG, "Unable to read uptime") return uptime_str @@ -1746,6 +1773,18 @@ def parse_mtab(path): return None +def parse_mount(path): + (mountoutput, _err) = subp("mount") + mount_locs = mountoutput.splitlines() + for line in mount_locs: + m = re.search('^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line) + devpth = m.group(1) + mount_point = m.group(2) + fs_type = m.group(3) + if mount_point == path: + return devpth, fs_type, mount_point + return None + def get_mount_info(path, log=LOG): # Use /proc/$$/mountinfo to find the device where path is mounted. # This is done because with a btrfs filesystem using os.stat(path) @@ -1779,8 +1818,10 @@ def get_mount_info(path, log=LOG): if os.path.exists(mountinfo_path): lines = load_file(mountinfo_path).splitlines() return parse_mount_info(path, lines, log) - else: + elif os.path.exists("/etc/mtab"): return parse_mtab(path) + else: + return parse_mount(path) def which(program): -- cgit v1.2.3 From ab2bf49eecede2fe0ce4f7685f751c64b20dd390 Mon Sep 17 00:00:00 2001 From: Harm Weites Date: Sat, 14 Dec 2013 12:50:01 +0000 Subject: fix: Fallback to check the interface state, specifically freebsd benefits of this. --- cloudinit/netinfo.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index f5949122..a9c3090e 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -21,6 +21,7 @@ # along with this program. If not, see . import cloudinit.util as util +import re from prettytable import PrettyTable @@ -34,13 +35,17 @@ def netdev_info(empty=""): continue if line[0] not in ("\t", " "): curdev = line.split()[0] - # TODO: up/down detection fails on FreeBSD devs[curdev] = {"up": False} for field in fields: devs[curdev][field] = "" toks = line.lower().strip().split() if toks[0] == "up": devs[curdev]['up'] = True + # If the output of ifconfig doesn't contain the required info in the + # obvious place, use a regex filter to be sure. + elif len(toks) > 1: + if re.search("flags=\d+ Date: Sat, 14 Dec 2013 17:38:04 +0000 Subject: fix: Log the used method as well. --- cloudinit/util.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/util.py b/cloudinit/util.py index 8b77b163..e24e6d8d 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1418,12 +1418,15 @@ def time_rfc2822(): def uptime(): uptime_str = '??' + method = 'unknown' try: if os.path.exists("/proc/uptime"): + method = '/proc/uptime' contents = load_file("/proc/uptime").strip() if contents: uptime_str = contents.split()[0] else: + method = 'ctypes' libc = ctypes.CDLL('/lib/libc.so.7') size = ctypes.c_size_t() buf = ctypes.c_int() @@ -1434,7 +1437,7 @@ def uptime(): uptime_str = now - bootup except: - logexc(LOG, "Unable to read uptime") + logexc(LOG, "Unable to read uptime using method: %s" % method) return uptime_str -- cgit v1.2.3 From 45a3ef2157c21155a7d0a286849db330e767608d Mon Sep 17 00:00:00 2001 From: Harm Weites Date: Sat, 14 Dec 2013 18:58:31 +0000 Subject: change: Separate functions to load, read and write /etc/rc.conf keys and values. Use these right away to read and change the hostname. --- cloudinit/distros/freebsd.py | 54 ++++++++++++++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 17 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 2d6cd924..55efbb7b 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -40,33 +40,53 @@ class Distro(distros.Distro): self._runner = helpers.Runners(paths) self.osfamily = 'freebsd' + # Updates a key in /etc/rc.conf. def updatercconf(self, key, value): LOG.debug("updatercconf: %s => %s" % (key, value)) - conf = {} + conf = self.loadrcconf() configchanged = False - with open("/etc/rc.conf") as file: - for line in file: - tok = line.split('=') - # TODO: Handle keys with spaces, make this a bit more robust. - if tok[0] == key: - if tok[1] != value: - conf[tok[0]] = value - LOG.debug("[rc.conf]: Value %s for key %s needs to be changed" % (value, key)) - configchanged = True - else: - conf[tok[0]] = tok[1].rstrip() + for item in conf: + if item == key and conf[item] != value: + conf[item] = value + LOG.debug("[rc.conf]: Value %s for key %s needs to be changed" % (value, key)) + configchanged = True if configchanged: LOG.debug("Writing new /etc/rc.conf file") - with open ('/etc/rc.conf', 'w') as file: + with open('/etc/rc.conf', 'w') as file: for keyval in conf.items(): file.write("%s=%s\n" % keyval) - def _read_hostname(): - return + # Load the contents of /etc/rc.conf and store all keys in a dict. + def loadrcconf(self): + conf = {} + with open("/etc/rc.conf") as file: + for line in file: + tok = line.split('=') + conf[tok[0]] = tok[1].rstrip() + return conf - def _read_system_hostname(): - return + def readrcconf(self, key): + conf = self.loadrcconf() + try: + val = conf[key] + except KeyError: + val = None + return val + + def _read_system_hostname(self): + sys_hostname = self._read_hostname() + return ('rc.conf', sys_hostname) + + def _read_hostname(self, default=None): + hostname = None + try: + hostname = self.readrcconf('hostname') + except IOError: + pass + if not hostname: + return default + return hostname def _select_hostname(self, hostname, fqdn): if not hostname: -- cgit v1.2.3 From 43b88392a14f6ab9395313353e28e60acc85ab75 Mon Sep 17 00:00:00 2001 From: Harm Weites Date: Sat, 14 Dec 2013 19:11:47 +0000 Subject: change: Use util.system_info(). --- cloudinit/config/cc_growpart.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 81de4c37..07556f03 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -22,7 +22,6 @@ import os import os.path import re import stat -import sys from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS @@ -188,7 +187,7 @@ def device_part_info(devpath): # FreeBSD doesn't know of sysfs so just get everything we need from # the device, like /dev/vtbd0p2. - if sys.platform.startswith('freebsd'): + if util.system_info()["platform"].startswith('FreeBSD'): m = re.search('^(/dev/.+)p([0-9])$', devpath) return (m.group(1), m.group(2)) -- cgit v1.2.3 From bd96af406f268e3fe41537125be4cf2dfc9ea5bc Mon Sep 17 00:00:00 2001 From: Harm Weites Date: Sat, 14 Dec 2013 19:14:18 +0000 Subject: new: Touch a reboot-required file to make clear we want a reboot after resizing the partition. --- cloudinit/config/cc_growpart.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 07556f03..29d8b49b 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -165,6 +165,10 @@ class ResizeGpart(object): util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev) raise ResizeFailedException(e) + # Since growing the FS requires a reboot, make sure we reboot + # first when this module has finished. + open('/var/run/reboot-required', 'a').close() + return (before, get_size(partdev)) def get_size(filename): -- cgit v1.2.3 From 8a2b80adfb66f9036dc617ff65b7f6ab4464ca5d Mon Sep 17 00:00:00 2001 From: Harm Weites Date: Sat, 14 Dec 2013 19:16:05 +0000 Subject: change: Use util.system_info(). --- cloudinit/config/cc_power_state_change.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 46ce5ba5..8efef24b 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -23,7 +23,6 @@ import errno import os import re import subprocess -import sys import time frequency = PER_INSTANCE @@ -44,7 +43,7 @@ def givecmdline(pid): # Example output from procstat -c 16357 # PID COMM ARGS # 1 init /bin/init -- - if sys.platform.startswith('freebsd'): + if util.system_info()["platform"].startswith('FreeBSD'): (output, _err) = util.subp(['procstat', '-c', str(pid)]) line = output.splitlines()[1] m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line) -- cgit v1.2.3 From 76756d5985cac6d0d8eafbbc336dc140cb3ecb1d Mon Sep 17 00:00:00 2001 From: Harm Weites Date: Sat, 14 Dec 2013 22:30:29 +0000 Subject: change: Use a proper signal instead of 'just' 0. --- cloudinit/config/cc_power_state_change.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 8efef24b..2797b3d9 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -22,6 +22,7 @@ from cloudinit import util import errno import os import re +import signal import subprocess import time @@ -36,7 +37,7 @@ EXIT_FAIL = 254 def givecmdline(pid): # Check if this pid still exists by sending it the harmless 0 signal. try: - os.kill(pid, 0) + os.kill(pid, signal.SIG_DFL) except OSError: return None else: -- cgit v1.2.3 From d6dcee2a818b97ccae8cd662cf108e954fc89e5c Mon Sep 17 00:00:00 2001 From: Harm Weites Date: Sat, 14 Dec 2013 22:31:32 +0000 Subject: fix: Proper comment. --- cloudinit/config/cc_power_state_change.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 2797b3d9..50897b5f 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -41,7 +41,7 @@ def givecmdline(pid): except OSError: return None else: - # Example output from procstat -c 16357 + # Example output from procstat -c 1 # PID COMM ARGS # 1 init /bin/init -- if util.system_info()["platform"].startswith('FreeBSD'): -- cgit v1.2.3 From d5613a54c27f3b494c7012dbdd68635a112e1e57 Mon Sep 17 00:00:00 2001 From: Harm Weites Date: Sat, 14 Dec 2013 22:49:32 +0000 Subject: change: Just run the required command and let the exception do the rest if the process died. Checking first if the process is still alive proofed to be quite error prone, atleast on a rather slow compute node. --- cloudinit/config/cc_power_state_change.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 50897b5f..6bd14b7e 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -31,16 +31,12 @@ frequency = PER_INSTANCE EXIT_FAIL = 254 # -# Returns the cmdline for the given process id. +# Returns the cmdline for the given process id. In Linux we can use procfs for +# this but on BSD there is /usr/bin/procstat. # def givecmdline(pid): - # Check if this pid still exists by sending it the harmless 0 signal. try: - os.kill(pid, signal.SIG_DFL) - except OSError: - return None - else: # Example output from procstat -c 1 # PID COMM ARGS # 1 init /bin/init -- @@ -51,6 +47,8 @@ def givecmdline(pid): return m.group(2) else: return util.load_file("/proc/%s/cmdline" % pid) + except IOError: + return None def handle(_name, cfg, _cloud, log, _args): -- cgit v1.2.3 From 1781668dd65737a800c2c8fdbb79c6f1288d3ef2 Mon Sep 17 00:00:00 2001 From: Harm Weites Date: Wed, 18 Dec 2013 23:36:16 +0000 Subject: new: Apply the locale to the default login class. --- cloudinit/distros/freebsd.py | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 55efbb7b..fd66b901 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -210,10 +210,32 @@ class Distro(distros.Distro): def _write_network(self, settings): return - - def apply_locale(): - return - + + def apply_locale(self, locale, out_fn=None): + loginconf = '/etc/login.conf' + newloginconf = '/tmp/login.conf.new' + backupconf = '/etc/login.conf.orig' + + newconf = open(newloginconf, 'w') + origconf = open(loginconf, 'r') + + for line in origconf: + newconf.write(re.sub('^default:', r'default:lang=%s:' % locale, line)) + newconf.close() + origconf.close() + # Make a backup of login.conf. + copyfile(loginconf, backupconf) + # And copy the new login.conf. + copyfile(newloginconf, loginconf) + + try: + util.logexc("Running cap_mkdb for %s", locale) + util.subp(['cap_mkdb', '/etc/login.conf']) + except: + # cap_mkdb failed, so restore the backup. + util.logexc("Failed to apply locale %s", locale) + copyfile(backupconf, loginconf) + def install_packages(): return -- cgit v1.2.3 From 66aa9826b818c3478516104b38039fecbd717b6b Mon Sep 17 00:00:00 2001 From: Paul Querna Date: Thu, 9 Jan 2014 21:14:51 +0000 Subject: Allow a Config Drive source on a partition, if the label matches. --- cloudinit/sources/DataSourceConfigDrive.py | 6 ++++-- tests/unittests/test_datasource/test_configdrive.py | 5 +++-- 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 4f437244..2a244496 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -284,8 +284,10 @@ def find_candidate_devs(): # followed by fstype items, but with dupes removed combined = (by_label + [d for d in by_fstype if d not in by_label]) - # We are looking for block device (sda, not sda1), ignore partitions - combined = [d for d in combined if not util.is_partition(d)] + # We are looking for a block device or partition with necessary label or + # an unpartitioned block device. + combined = [d for d in combined + if d in by_label or not util.is_partition(d)] return combined diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index d5935294..3c1e8add 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -285,10 +285,11 @@ class TestConfigDriveDataSource(MockerTestCase): self.assertEqual(["/dev/vdb", "/dev/zdd"], ds.find_candidate_devs()) - # verify that partitions are not considered + # verify that partitions are considered, but only if they have a label. devs_with_answers = {"TYPE=vfat": ["/dev/sda1"], "TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]} - self.assertEqual([], ds.find_candidate_devs()) + self.assertEqual(["/dev/vdb3"], + ds.find_candidate_devs()) finally: util.find_devs_with = orig_find_devs_with -- cgit v1.2.3 From 31ece5e92797bf20141878de5dd7cd91559cb336 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 22 Jan 2014 12:04:39 -0800 Subject: Split net-parsing into own module The ubuntu/debian networking file parsing function really is more generic than just a rhel utility function and can be used by others that want to use this functionality for there own purposes (say in writing down a freebsd network format instead) so moving this to its own module to encourage its usage outside of rhel. --- cloudinit/distros/net_util.py | 110 +++++++++++++++++++++++++++++++++++++++++ cloudinit/distros/rhel.py | 4 +- cloudinit/distros/rhel_util.py | 88 --------------------------------- cloudinit/distros/sles.py | 4 +- 4 files changed, 116 insertions(+), 90 deletions(-) create mode 100644 cloudinit/distros/net_util.py (limited to 'cloudinit') diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py new file mode 100644 index 00000000..4c9095be --- /dev/null +++ b/cloudinit/distros/net_util.py @@ -0,0 +1,110 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Canonical Ltd. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Scott Moser +# Author: Juerg Haefliger +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +# This is a util function to translate debian based distro interface blobs as +# given in /etc/network/interfaces to an *somewhat* agnostic format for +# distributions that use other formats. +# +# TODO(harlowja) remove when we have python-netcf active... +def translate_network(settings): + # Get the standard cmd, args from the ubuntu format + entries = [] + for line in settings.splitlines(): + line = line.strip() + if not line or line.startswith("#"): + continue + split_up = line.split(None, 1) + if len(split_up) <= 1: + continue + entries.append(split_up) + # Figure out where each iface section is + ifaces = [] + consume = {} + for (cmd, args) in entries: + if cmd == 'iface': + if consume: + ifaces.append(consume) + consume = {} + consume[cmd] = args + else: + consume[cmd] = args + # Check if anything left over to consume + absorb = False + for (cmd, args) in consume.iteritems(): + if cmd == 'iface': + absorb = True + if absorb: + ifaces.append(consume) + # Now translate + real_ifaces = {} + for info in ifaces: + if 'iface' not in info: + continue + iface_details = info['iface'].split(None) + dev_name = None + if len(iface_details) >= 1: + dev = iface_details[0].strip().lower() + if dev: + dev_name = dev + if not dev_name: + continue + iface_info = {} + if len(iface_details) >= 3: + proto_type = iface_details[2].strip().lower() + # Seems like this can be 'loopback' which we don't + # really care about + if proto_type in ['dhcp', 'static']: + iface_info['bootproto'] = proto_type + # These can just be copied over + for k in ['netmask', 'address', 'gateway', 'broadcast']: + if k in info: + val = info[k].strip().lower() + if val: + iface_info[k] = val + # Name server info provided?? + if 'dns-nameservers' in info: + iface_info['dns-nameservers'] = info['dns-nameservers'].split() + # Name server search info provided?? + if 'dns-search' in info: + iface_info['dns-search'] = info['dns-search'].split() + # Is any mac address spoofing going on?? + if 'hwaddress' in info: + hw_info = info['hwaddress'].lower().strip() + hw_split = hw_info.split(None, 1) + if len(hw_split) == 2 and hw_split[0].startswith('ether'): + hw_addr = hw_split[1] + if hw_addr: + iface_info['hwaddress'] = hw_addr + real_ifaces[dev_name] = iface_info + # Check for those that should be started on boot via 'auto' + for (cmd, args) in entries: + if cmd == 'auto': + # Seems like auto can be like 'auto eth0 eth0:1' so just get the + # first part out as the device name + args = args.split(None) + if not args: + continue + dev_name = args[0].strip().lower() + if dev_name in real_ifaces: + real_ifaces[dev_name]['auto'] = True + return real_ifaces diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 30195384..6087929e 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -25,7 +25,9 @@ from cloudinit import helpers from cloudinit import log as logging from cloudinit import util +from cloudinit.distros import net_util from cloudinit.distros import rhel_util + from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -63,7 +65,7 @@ class Distro(distros.Distro): def _write_network(self, settings): # TODO(harlowja) fix this... since this is the ubuntu format - entries = rhel_util.translate_network(settings) + entries = net_util.translate_network(settings) LOG.debug("Translated ubuntu style network settings %s into %s", settings, entries) # Make the intermediate format as the rhel format... diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py index 1aba58b8..063d536e 100644 --- a/cloudinit/distros/rhel_util.py +++ b/cloudinit/distros/rhel_util.py @@ -30,94 +30,6 @@ from cloudinit import util LOG = logging.getLogger(__name__) -# This is a util function to translate Debian based distro interface blobs as -# given in /etc/network/interfaces to an equivalent format for distributions -# that use ifcfg-* style (Red Hat and SUSE). -# TODO(harlowja) remove when we have python-netcf active... -def translate_network(settings): - # Get the standard cmd, args from the ubuntu format - entries = [] - for line in settings.splitlines(): - line = line.strip() - if not line or line.startswith("#"): - continue - split_up = line.split(None, 1) - if len(split_up) <= 1: - continue - entries.append(split_up) - # Figure out where each iface section is - ifaces = [] - consume = {} - for (cmd, args) in entries: - if cmd == 'iface': - if consume: - ifaces.append(consume) - consume = {} - consume[cmd] = args - else: - consume[cmd] = args - # Check if anything left over to consume - absorb = False - for (cmd, args) in consume.iteritems(): - if cmd == 'iface': - absorb = True - if absorb: - ifaces.append(consume) - # Now translate - real_ifaces = {} - for info in ifaces: - if 'iface' not in info: - continue - iface_details = info['iface'].split(None) - dev_name = None - if len(iface_details) >= 1: - dev = iface_details[0].strip().lower() - if dev: - dev_name = dev - if not dev_name: - continue - iface_info = {} - if len(iface_details) >= 3: - proto_type = iface_details[2].strip().lower() - # Seems like this can be 'loopback' which we don't - # really care about - if proto_type in ['dhcp', 'static']: - iface_info['bootproto'] = proto_type - # These can just be copied over - for k in ['netmask', 'address', 'gateway', 'broadcast']: - if k in info: - val = info[k].strip().lower() - if val: - iface_info[k] = val - # Name server info provided?? - if 'dns-nameservers' in info: - iface_info['dns-nameservers'] = info['dns-nameservers'].split() - # Name server search info provided?? - if 'dns-search' in info: - iface_info['dns-search'] = info['dns-search'].split() - # Is any mac address spoofing going on?? - if 'hwaddress' in info: - hw_info = info['hwaddress'].lower().strip() - hw_split = hw_info.split(None, 1) - if len(hw_split) == 2 and hw_split[0].startswith('ether'): - hw_addr = hw_split[1] - if hw_addr: - iface_info['hwaddress'] = hw_addr - real_ifaces[dev_name] = iface_info - # Check for those that should be started on boot via 'auto' - for (cmd, args) in entries: - if cmd == 'auto': - # Seems like auto can be like 'auto eth0 eth0:1' so just get the - # first part out as the device name - args = args.split(None) - if not args: - continue - dev_name = args[0].strip().lower() - if dev_name in real_ifaces: - real_ifaces[dev_name]['auto'] = True - return real_ifaces - - # Helper function to update a RHEL/SUSE /etc/sysconfig/* file def update_sysconfig_file(fn, adjustments, allow_empty=False): if not adjustments: diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py index f2ac4efc..239e51b5 100644 --- a/cloudinit/distros/sles.py +++ b/cloudinit/distros/sles.py @@ -26,7 +26,9 @@ from cloudinit import helpers from cloudinit import log as logging from cloudinit import util +from cloudinit.distros import net_util from cloudinit.distros import rhel_util + from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) @@ -54,7 +56,7 @@ class Distro(distros.Distro): def _write_network(self, settings): # Convert debian settings to ifcfg format - entries = rhel_util.translate_network(settings) + entries = net_util.translate_network(settings) LOG.debug("Translated ubuntu style network settings %s into %s", settings, entries) # Make the intermediate format as the suse format... -- cgit v1.2.3 From 5f14b91c2a17101e845ee18068c7bb8c3c7a0556 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 22 Jan 2014 12:39:37 -0800 Subject: Add comments as to format with example in/out --- cloudinit/distros/net_util.py | 52 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) (limited to 'cloudinit') diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py index 4c9095be..8c781f9a 100644 --- a/cloudinit/distros/net_util.py +++ b/cloudinit/distros/net_util.py @@ -26,6 +26,58 @@ # distributions that use other formats. # # TODO(harlowja) remove when we have python-netcf active... +# +# The format is the following: +# { +# : { +# # All optional (if not existent in original format) +# "netmask": , +# "broadcast": , +# "gateway": , +# "address": , +# "bootproto": "static"|"dhcp", +# "dns-search": , +# "hwaddress": , +# "auto": True (or non-existent), +# "dns-nameservers": [, ...], +# } +# } +# +# Things to note, comments are removed, if a ubuntu/debian interface is +# marked as auto then only then first segment (?) is retained, ie +# 'auto eth0 eth0:1' just marks eth0 as auto (not eth0:1). +# +# Example input: +# +# auto lo +# iface lo inet loopback +# +# auto eth0 +# iface eth0 inet static +# address 10.0.0.1 +# netmask 255.255.252.0 +# broadcast 10.0.0.255 +# gateway 10.0.0.2 +# dns-nameservers 98.0.0.1 98.0.0.2 +# +# Example output: +# { +# "lo": { +# "auto": true +# }, +# "eth0": { +# "auto": true, +# "dns-nameservers": [ +# "98.0.0.1", +# "98.0.0.2" +# ], +# "broadcast": "10.0.0.255", +# "netmask": "255.255.252.0", +# "bootproto": "static", +# "address": "10.0.0.1", +# "gateway": "10.0.0.2" +# } +# } def translate_network(settings): # Get the standard cmd, args from the ubuntu format entries = [] -- cgit v1.2.3 From efe969756d0208433e953d1692ea85006a56abe3 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 22 Jan 2014 12:47:51 -0800 Subject: Add a new line --- cloudinit/distros/net_util.py | 1 + 1 file changed, 1 insertion(+) (limited to 'cloudinit') diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py index 8c781f9a..5f60666d 100644 --- a/cloudinit/distros/net_util.py +++ b/cloudinit/distros/net_util.py @@ -78,6 +78,7 @@ # "gateway": "10.0.0.2" # } # } + def translate_network(settings): # Get the standard cmd, args from the ubuntu format entries = [] -- cgit v1.2.3 From 84514cdff8ff025df052fe6301d2a7ed751d7d61 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 22 Jan 2014 16:54:07 -0500 Subject: cc_resizefs: figure out what /dev/root means via kernel cmdline If mount_info says that the root filesystem is on /dev/root and /dev/root does not exist, then we'll try to glean that information from the linux kernel cmdline. This situation occurs at least when you boot without an initramfs for the current ppc64el cloud images: qemu-system-ppc64 ... -kernel my.kernel -append 'root=/dev/sda' When doing that, /proc/1/mountinfo will say '/dev/root' for '/'. --- ChangeLog | 2 ++ cloudinit/config/cc_resizefs.py | 33 +++++++++++++++++++++++++++++++-- 2 files changed, 33 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/ChangeLog b/ChangeLog index cb9586f0..32f2a8d0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -17,6 +17,8 @@ - drop dependency on boto for crawling ec2 metadata service. - add 'Requires' on sudo (for OpenNebula datasource) in rpm specs, and 'Recommends' in the debian/control.in [Vlastimil Holer] + - if mount_info reports /dev/root is a device path for /, then convert + that to a device via help of kernel cmdline. 0.7.4: - fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a partitioned block device with target filesystem on ephemeral0.1. diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 56040fdd..388ca66f 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -51,6 +51,25 @@ RESIZE_FS_PREFIXES_CMDS = [ NOBLOCK = "noblock" +def rootdev_from_cmdline(cmdline): + found = None + for tok in cmdline.split(): + if tok.startswith("root="): + found = tok[5:] + break + if found is None: + return None + + if found.startswith("/dev/"): + return found + if found.startswith("LABEL="): + return "/dev/disk/by-label/" + found[len("LABEL="):] + if found.startswith("UUID="): + return "/dev/disk/by-uuid/" + found[len("UUID="):] + + return "/dev/" + found + + def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] @@ -78,10 +97,20 @@ def handle(name, cfg, _cloud, log, args): info = "dev=%s mnt_point=%s path=%s" % (devpth, mount_point, resize_what) log.debug("resize_info: %s" % info) + container = util.is_container() + + if (devpth == "/dev/root" and not os.path.exists(devpth) and + not container): + devpth = rootdev_from_cmdline(util.get_cmdline()) + if devpth is None: + log.warn("Unable to find device '/dev/root'") + return + log.debug("Converted /dev/root to '%s' per kernel cmdline", devpth) + try: statret = os.stat(devpth) except OSError as exc: - if util.is_container() and exc.errno == errno.ENOENT: + if container and exc.errno == errno.ENOENT: log.debug("Device '%s' did not exist in container. " "cannot resize: %s" % (devpth, info)) elif exc.errno == errno.ENOENT: @@ -92,7 +121,7 @@ def handle(name, cfg, _cloud, log, args): return if not stat.S_ISBLK(statret.st_mode): - if util.is_container(): + if container: log.debug("device '%s' not a block device in container." " cannot resize: %s" % (devpth, info)) else: -- cgit v1.2.3 From c0ee33fc1d70b272a96430def658afd6f1867afa Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Jan 2014 14:06:13 -0500 Subject: remove some white space --- cloudinit/distros/rhel.py | 1 - cloudinit/distros/sles.py | 1 - 2 files changed, 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 6087929e..e8abf111 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -27,7 +27,6 @@ from cloudinit import util from cloudinit.distros import net_util from cloudinit.distros import rhel_util - from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py index 239e51b5..9788a1ba 100644 --- a/cloudinit/distros/sles.py +++ b/cloudinit/distros/sles.py @@ -28,7 +28,6 @@ from cloudinit import util from cloudinit.distros import net_util from cloudinit.distros import rhel_util - from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) -- cgit v1.2.3 From c833a84f08019ba4413937f2f1b1f12a4ffe5632 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Jan 2014 14:28:59 -0500 Subject: pep8 --- cloudinit/config/cc_debug.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index cfd31fa1..7219b0f8 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -14,10 +14,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from StringIO import StringIO -from cloudinit import util from cloudinit import type_utils +from cloudinit import util import copy +from StringIO import StringIO def _make_header(text): -- cgit v1.2.3 From 84ad0fd0a5471d650c039241286be17cc1163df6 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Jan 2014 15:24:36 -0500 Subject: fix broken consumption of /proc/mounts this was checking that the lenghth of the mount line from /proc/mounts was 6, not the number of tokens. --- cloudinit/util.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/util.py b/cloudinit/util.py index 6fe0e0e6..ce8dacbe 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1325,9 +1325,9 @@ def mounts(): # Linux: /dev/sda1 on /boot type ext4 (rw,relatime,data=ordered) # FreeBSD: /dev/vtbd0p2 on / (ufs, local, journaled soft-updates) try: - if method == 'proc' and len(mpline) == 6: + if method == 'proc': (dev, mp, fstype, opts, _freq, _passno) = mpline.split() - elif method == 'mount': + else: m = re.search('^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', mpline) dev = m.group(1) mp = m.group(2) -- cgit v1.2.3 From c2b41f399778213414aa8a9a7f39a03a15ed79df Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Jan 2014 15:35:12 -0500 Subject: pep8 --- cloudinit/netinfo.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index a9c3090e..63f720e4 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -64,11 +64,11 @@ def netdev_info(empty=""): """ ifconfigfields = { - "addr:":"addr", "inet":"addr", - "bcast:":"bcast", "broadcast":"bcast", - "mask:":"mask", "netmask":"mask" + "addr:": "addr", "inet": "addr", + "bcast:": "bcast", "broadcast": "bcast", + "mask:": "mask", "netmask": "mask" } - for origfield, field in ifconfigfields.items(): + for origfield, field in ifconfigfields.items(): target = "%s%s" % (field, fieldpost) if devs[curdev].get(target, ""): continue @@ -88,9 +88,6 @@ def netdev_info(empty=""): return devs -# -# Use netstat instead of route since that produces more portable output. -# def route_info(): (route_out, _err) = util.subp(["netstat", "-rn"]) @@ -105,11 +102,11 @@ def route_info(): FreeBSD shows 6 items in the routing table: Destination Gateway Flags Refs Use Netif Expire default 10.65.0.1 UGS 0 34920 vtnet0 - + Linux netstat shows 2 more: Destination Gateway Genmask Flags MSS Window irtt Iface 0.0.0.0 10.65.0.1 0.0.0.0 UG 0 0 0 eth0 - """ + """ if len(toks) < 6 or toks[0] == "Kernel" or toks[0] == "Destination" or toks[0] == "Internet" or toks[0] == "Internet6" or toks[0] == "Routing": continue -- cgit v1.2.3 From 5323320a7251b124c231ba6be25b8583535f1b62 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Jan 2014 16:13:09 -0500 Subject: pep8, use which rather than hard coded path --- cloudinit/config/cc_growpart.py | 6 ++++-- cloudinit/config/cc_power_state_change.py | 7 +++---- 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 1d3a4412..b81951ad 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -113,9 +113,10 @@ class ResizeGrowPart(object): return (before, get_size(partdev)) + class ResizeGpart(object): def available(self): - if not os.path.exists('/usr/local/sbin/gpart'): + if not util.which('gpart'): return False return True @@ -138,7 +139,7 @@ class ResizeGpart(object): try: util.subp(["gpart", "resize", "-i", partnum, diskdev]) except util.ProcessExecutionError as e: - util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev) + util.logexc(LOG, "Failed: gpart resize -i %s %s", partnum, diskdev) raise ResizeFailedException(e) # Since growing the FS requires a reboot, make sure we reboot @@ -147,6 +148,7 @@ class ResizeGpart(object): return (before, get_size(partdev)) + def get_size(filename): fd = os.open(filename, os.O_RDONLY) try: diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 6bd14b7e..561c5abd 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -30,12 +30,10 @@ frequency = PER_INSTANCE EXIT_FAIL = 254 -# -# Returns the cmdline for the given process id. In Linux we can use procfs for -# this but on BSD there is /usr/bin/procstat. -# def givecmdline(pid): + # Returns the cmdline for the given process id. In Linux we can use procfs + # for this but on BSD there is /usr/bin/procstat. try: # Example output from procstat -c 1 # PID COMM ARGS @@ -50,6 +48,7 @@ def givecmdline(pid): except IOError: return None + def handle(_name, cfg, _cloud, log, _args): try: -- cgit v1.2.3 From eff68fbbb7eb9e5a9a9d9cab4ab357edd1476859 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Jan 2014 16:25:58 -0500 Subject: pep8/tab to 8 spaces --- cloudinit/distros/freebsd.py | 72 ++++++++++++++++++++++---------------------- 1 file changed, 36 insertions(+), 36 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index fd66b901..a62503f6 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -31,6 +31,7 @@ from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) + class Distro(distros.Distro): def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -42,9 +43,9 @@ class Distro(distros.Distro): # Updates a key in /etc/rc.conf. def updatercconf(self, key, value): - LOG.debug("updatercconf: %s => %s" % (key, value)) + LOG.debug("updatercconf: %s => %s" % (key, value)) conf = self.loadrcconf() - configchanged = False + configchanged = False for item in conf: if item == key and conf[item] != value: conf[item] = value @@ -54,8 +55,8 @@ class Distro(distros.Distro): if configchanged: LOG.debug("Writing new /etc/rc.conf file") with open('/etc/rc.conf', 'w') as file: - for keyval in conf.items(): - file.write("%s=%s\n" % keyval) + for keyval in conf.items(): + file.write("%s=%s\n" % keyval) # Load the contents of /etc/rc.conf and store all keys in a dict. def loadrcconf(self): @@ -70,7 +71,7 @@ class Distro(distros.Distro): conf = self.loadrcconf() try: val = conf[key] - except KeyError: + except KeyError: val = None return val @@ -94,7 +95,7 @@ class Distro(distros.Distro): return hostname def _write_hostname(self, your_hostname, out_fn): - self.updatercconf('hostname', your_hostname) + self.updatercconf('hostname', your_hostname) def create_group(self, name, members): group_add_cmd = ['pw', '-n', name] @@ -124,26 +125,26 @@ class Distro(distros.Distro): adduser_cmd = ['pw', 'useradd', '-n', name] log_adduser_cmd = ['pw', 'useradd', '-n', name] - adduser_opts = { - "homedir": '-d', - "gecos": '-c', - "primary_group": '-g', - "groups": '-G', - "passwd": '-h', - "shell": '-s', - "inactive": '-E', - } - adduser_flags = { - "no_user_group": '--no-user-group', - "system": '--system', - "no_log_init": '--no-log-init', - } - - redact_opts = ['passwd'] - - for key, val in kwargs.iteritems(): - if key in adduser_opts and val and isinstance(val, str): - adduser_cmd.extend([adduser_opts[key], val]) + adduser_opts = { + "homedir": '-d', + "gecos": '-c', + "primary_group": '-g', + "groups": '-G', + "passwd": '-h', + "shell": '-s', + "inactive": '-E', + } + adduser_flags = { + "no_user_group": '--no-user-group', + "system": '--system', + "no_log_init": '--no-log-init', + } + + redact_opts = ['passwd'] + + for key, val in kwargs.iteritems(): + if key in adduser_opts and val and isinstance(val, str): + adduser_cmd.extend([adduser_opts[key], val]) # Redact certain fields from the logs if key in redact_opts: @@ -160,9 +161,9 @@ class Distro(distros.Distro): log_adduser_cmd.append('-d/nonexistent') else: adduser_cmd.append('-d/usr/home/%s' % name) - adduser_cmd.append('-m') + adduser_cmd.append('-m') log_adduser_cmd.append('-d/usr/home/%s' % name) - log_adduser_cmd.append('-m') + log_adduser_cmd.append('-m') # Run the command LOG.info("Adding user %s", name) @@ -174,7 +175,7 @@ class Distro(distros.Distro): # TODO: def set_passwd(self, name, **kwargs): - return False + return False def lock_passwd(self, name): try: @@ -185,7 +186,7 @@ class Distro(distros.Distro): # TODO: def write_sudo_rules(self, name, rules, sudo_file=None): - LOG.debug("[write_sudo_rules] Name: %s" % name) + LOG.debug("[write_sudo_rules] Name: %s" % name) def create_user(self, name, **kwargs): self.add_user(name, **kwargs) @@ -209,7 +210,7 @@ class Distro(distros.Distro): ssh_util.setup_user_keys(keys, name, options=None) def _write_network(self, settings): - return + return def apply_locale(self, locale, out_fn=None): loginconf = '/etc/login.conf' @@ -237,14 +238,13 @@ class Distro(distros.Distro): copyfile(backupconf, loginconf) def install_packages(): - return + return def package_command(): - return + return def set_timezone(): - return + return def update_package_sources(): - return - + return -- cgit v1.2.3 From 75d6f035bcd94e6420ba6de5a9d12c1f554771cf Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Jan 2014 16:31:40 -0500 Subject: fix freebsd new file header --- cloudinit/distros/freebsd.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index a62503f6..f1650a77 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -1,12 +1,8 @@ # vi: ts=4 expandtab # -# Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. -# Copyright (C) 2012 Yahoo! Inc. +# Copyright (C) 2014 Harm Weites # -# Author: Scott Moser -# Author: Juerg Haefliger -# Author: Joshua Harlow +# Author: Harm Weites # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as -- cgit v1.2.3 From 5aa7d4ccf984ac296f58fa355bdce17d175dcc7d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Jan 2014 16:48:12 -0500 Subject: fix util.which if PATH is not in environment This fixes a test case that failed because PATH was unset in the os.environ. --- cloudinit/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/util.py b/cloudinit/util.py index ce8dacbe..77f9ab36 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1850,7 +1850,7 @@ def which(program): if is_exe(program): return program else: - for path in os.environ["PATH"].split(os.pathsep): + for path in os.environ.get("PATH", "").split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): -- cgit v1.2.3 From ba84f51f0143a8ca1ca5113ae932505ce1bfe5e5 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 23 Jan 2014 14:41:09 -0800 Subject: Skip retry and continued fetch of userdata when NOT_FOUND When a 404 http code comes back from the fetching of ec2 data, instead of retrying immediatly stop the fetching process and in the userdata fetching function handle this case as a special case of no userdata being fetched (an empty string in this case). --- cloudinit/ec2_utils.py | 27 ++++++++++++++++++++++++--- cloudinit/url_helper.py | 17 +++++++++-------- cloudinit/util.py | 5 +++-- tests/unittests/test_datasource/test_maas.py | 3 ++- tests/unittests/test_ec2_util.py | 8 ++++++++ 5 files changed, 46 insertions(+), 14 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 92a22747..cd94ad4c 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -16,6 +16,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import httplib from urlparse import (urlparse, urlunparse) import functools @@ -23,9 +24,11 @@ import json import urllib from cloudinit import log as logging +from cloudinit import url_helper from cloudinit import util LOG = logging.getLogger(__name__) +SKIP_USERDATA_CODES = frozenset([httplib.NOT_FOUND]) def maybe_json_object(text): @@ -138,20 +141,38 @@ class MetadataMaterializer(object): return joined +def _skip_retry_on_codes(status_codes, request_args, cause): + """Returns if a request should retry based on a given set of codes that + case retrying to be stopped/skipped. + """ + if cause.code in status_codes: + return False + return True + + def get_instance_userdata(api_version='latest', metadata_address='http://169.254.169.254', ssl_details=None, timeout=5, retries=5): ud_url = combine_url(metadata_address, api_version) ud_url = combine_url(ud_url, 'user-data') + user_data = '' try: + # It is ok for userdata to not exist (thats why we are stopping if + # NOT_FOUND occurs) and just in that case returning an empty string. + exception_cb = functools.partial(_skip_retry_on_codes, + SKIP_USERDATA_CODES) response = util.read_file_or_url(ud_url, ssl_details=ssl_details, timeout=timeout, - retries=retries) - return str(response) + retries=retries, + exception_cb=exception_cb) + user_data = str(response) + except url_helper.UrlError as e: + if e.code not in SKIP_USERDATA_CODES: + util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) except Exception: util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) - return '' + return user_data def get_instance_metadata(api_version='latest', diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 19a30409..42edf9cf 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -103,7 +103,7 @@ class UrlError(IOError): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, headers=None, headers_cb=None, ssl_details=None, - check_status=True, allow_redirects=True): + check_status=True, allow_redirects=True, exception_cb=None): url = _cleanurl(url) req_args = { 'url': url, @@ -163,14 +163,13 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, # Handle retrying ourselves since the built-in support # doesn't handle sleeping between tries... for i in range(0, manual_tries): + req_args['headers'] = headers_cb(url) + filtered_req_args = {} + for (k, v) in req_args.items(): + if k == 'data': + continue + filtered_req_args[k] = v try: - req_args['headers'] = headers_cb(url) - filtered_req_args = {} - for (k, v) in req_args.items(): - if k == 'data': - continue - filtered_req_args[k] = v - LOG.debug("[%s/%s] open '%s' with %s configuration", i, manual_tries, url, filtered_req_args) @@ -196,6 +195,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, # ssl exceptions are not going to get fixed by waiting a # few seconds break + if exception_cb and not exception_cb(filtered_req_args, e): + break if i + 1 < manual_tries and sec_between > 0: LOG.debug("Please wait %s seconds while we wait to try again", sec_between) diff --git a/cloudinit/util.py b/cloudinit/util.py index 77f9ab36..e1263f47 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -691,7 +691,7 @@ def fetch_ssl_details(paths=None): def read_file_or_url(url, timeout=5, retries=10, headers=None, data=None, sec_between=1, ssl_details=None, - headers_cb=None): + headers_cb=None, exception_cb=None): url = url.lstrip() if url.startswith("/"): url = "file://%s" % url @@ -708,7 +708,8 @@ def read_file_or_url(url, timeout=5, retries=10, headers_cb=headers_cb, data=data, sec_between=sec_between, - ssl_details=ssl_details) + ssl_details=ssl_details, + exception_cb=exception_cb) def load_yaml(blob, default=None, allowed=(dict,)): diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index 2007a6df..ebfb24da 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -119,7 +119,8 @@ class TestMAASDataSource(mocker.MockerTestCase): mock_request(url, headers=None, timeout=mocker.ANY, data=mocker.ANY, sec_between=mocker.ANY, ssl_details=mocker.ANY, retries=mocker.ANY, - headers_cb=my_headers_cb) + headers_cb=my_headers_cb, + exception_cb=mocker.ANY) resp = valid.get(key) self.mocker.result(util.StringResponse(resp)) self.mocker.replay() diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py index dd588aca..957dc3f2 100644 --- a/tests/unittests/test_ec2_util.py +++ b/tests/unittests/test_ec2_util.py @@ -33,6 +33,14 @@ class TestEc2Util(helpers.TestCase): userdata = eu.get_instance_userdata(self.VERSION, retries=0) self.assertEquals('', userdata) + @hp.activate + def test_userdata_fetch_fail_server_not_found(self): + hp.register_uri(hp.GET, + 'http://169.254.169.254/%s/user-data' % (self.VERSION), + status=404) + userdata = eu.get_instance_userdata(self.VERSION) + self.assertEquals('', userdata) + @hp.activate def test_metadata_fetch_no_keys(self): base_url = 'http://169.254.169.254/%s/meta-data' % (self.VERSION) -- cgit v1.2.3 From e6da32a91c59f33fd72bebc43f8e6beae73fbf39 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 23 Jan 2014 14:48:32 -0800 Subject: Remove pylint warning about unused request_args --- cloudinit/ec2_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index cd94ad4c..7f4c0443 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -141,7 +141,7 @@ class MetadataMaterializer(object): return joined -def _skip_retry_on_codes(status_codes, request_args, cause): +def _skip_retry_on_codes(status_codes, _request_args, cause): """Returns if a request should retry based on a given set of codes that case retrying to be stopped/skipped. """ -- cgit v1.2.3 From 31d855041901c86df143b90b7f964c387297f1ca Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 23 Jan 2014 17:36:18 -0800 Subject: Use the right exception --- cloudinit/url_helper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 42edf9cf..227983f3 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -195,7 +195,7 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, # ssl exceptions are not going to get fixed by waiting a # few seconds break - if exception_cb and not exception_cb(filtered_req_args, e): + if exception_cb and not exception_cb(filtered_req_args, excps[-1]): break if i + 1 < manual_tries and sec_between > 0: LOG.debug("Please wait %s seconds while we wait to try again", -- cgit v1.2.3 From fb55c1079375454d2a2a2f82c6c1812759eeb1f1 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Fri, 24 Jan 2014 12:29:04 -0700 Subject: Fixes for SmartOS datasource (LP: #1272115): 1. fixed conflation of user-data and cloud-init user-data. Cloud-init user-data is now namespaced as 'cloud-init:user-data'. 2. user-scripts are now fetched from the meta-data service each boot and executed as in the scripts directory 3. datacenter name is now namespaced as sdc:datacenter 4. user-scripts should be shebanged if there is no file magic --- cloudinit/sources/DataSourceSmartOS.py | 45 +++++++- cloudinit/util.py | 72 ++++++++++++ doc/sources/smartos/README.rst | 92 ++++++++++++--- tests/unittests/test_datasource/test_smartos.py | 145 ++++++++++++++++++++++-- 4 files changed, 322 insertions(+), 32 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 6593ce6e..6bd4a5c7 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -25,7 +25,9 @@ # requests on the console. For example, to get the hostname, you # would send "GET hostname" on /dev/ttyS1. # - +# Certain behavior is defined by the DataDictionary +# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html +# Comments with "@datadictionary" are snippets of the definition import base64 from cloudinit import log as logging @@ -43,10 +45,11 @@ SMARTOS_ATTRIB_MAP = { 'local-hostname': ('hostname', True), 'public-keys': ('root_authorized_keys', True), 'user-script': ('user-script', False), - 'user-data': ('user-data', False), + 'legacy-user-data': ('user-data', False), + 'user-data': ('cloud-init:user-data', False), 'iptables_disable': ('iptables_disable', True), 'motd_sys_info': ('motd_sys_info', True), - 'availability_zone': ('datacenter_name', True), + 'availability_zone': ('sdc:datacenter_name', True), 'vendordata': ('sdc:operator-script', False), } @@ -71,7 +74,11 @@ BUILTIN_DS_CONFIG = { 'seed_timeout': 60, 'no_base64_decode': ['root_authorized_keys', 'motd_sys_info', - 'iptables_disable'], + 'iptables_disable', + 'user-data', + 'user-script', + 'sdc:datacenter_name', + ], 'base64_keys': [], 'base64_all': False, 'disk_aliases': {'ephemeral0': '/dev/vdb'}, @@ -88,6 +95,11 @@ BUILTIN_CLOUD_CONFIG = { 'device': 'ephemeral0'}], } +# @datadictionary: this is legacy path for placing files from metadata +# per the SmartOS location. It is not preferable, but is done for +# legacy reasons +LEGACY_USER_D = "/var/db" + class DataSourceSmartOS(sources.DataSource): def __init__(self, sys_cfg, distro, paths): @@ -107,6 +119,9 @@ class DataSourceSmartOS(sources.DataSource): self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode') self.b64_keys = self.ds_cfg.get('base64_keys') self.b64_all = self.ds_cfg.get('base64_all') + self.script_base_d = os.path.join(self.paths.get_cpath("scripts")) + self.user_script_d = os.path.join(self.paths.get_cpath("scripts"), + 'per-boot') def __str__(self): root = sources.DataSource.__str__(self) @@ -144,14 +159,32 @@ class DataSourceSmartOS(sources.DataSource): smartos_noun, strip = attribute md[ci_noun] = self.query(smartos_noun, strip=strip) + # @datadictionary: This key has no defined format, but its value + # is written to the file /var/db/mdata-user-data on each boot prior + # to the phase that runs user-script. This file is not to be executed. + # This allows a configuration file of some kind to be injected into + # the machine to be consumed by the user-script when it runs. + u_script = md.get('user-script') + u_script_f = "%s/99_user_script" % self.user_script_d + u_script_l = "%s/user-script" % LEGACY_USER_D + util.write_content(u_script, u_script_f, link=u_script_l, + executable=True) + + # @datadictionary: This key may contain a program that is written + # to a file in the filesystem of the guest on each boot and then + # executed. It may be of any format that would be considered + # executable in the guest instance. + u_data = md.get('legacy-user-data') + u_data_f = "%s/mdata-user-data" % LEGACY_USER_D + util.write_content(u_data, u_data_f) + + # Handle the cloud-init regular meta if not md['local-hostname']: md['local-hostname'] = system_uuid ud = None if md['user-data']: ud = md['user-data'] - elif md['user-script']: - ud = md['user-script'] self.metadata = util.mergemanydict([md, self.metadata]) self.userdata_raw = ud diff --git a/cloudinit/util.py b/cloudinit/util.py index 77f9ab36..5f64cb69 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1904,3 +1904,75 @@ def expand_dotted_devname(dotted): return toks else: return (dotted, None) + + +def write_executable_content(script, script_f): + """ + This writes executable content and ensures that the shebang + exists. + """ + write_file(script_f, script, mode=0700) + try: + cmd = ["file", "--brief", "--mime-type", script_f] + (f_type, _err) = subp(cmd) + + LOG.debug("script %s mime type is %s" % (script_f, f_type)) + + # if the magic is text/plain, re-write with the shebang + if f_type.strip() == "text/plain": + with open(script_f, 'w') as f: + f.write("#!/bin/bash\n") + f.write(script) + LOG.debug("added shebang to file %s" % script_f) + + except ProcessExecutionError as e: + logexc(LOG, "Failed to identify script type for %s" % script_f, e) + return False + + except IOError as e: + logexc(LOG, "Failed to add shebang to file %s" % script_f, e) + return False + + return True + + +def write_content(content, content_f, link=None, + executable=False): + """ + Write the content to content_f. Under the following rules: + 1. Backup previous content_f + 2. Write the contente + 3. If no content, remove the file + 4. If there is a link, create it + + @param content: what to write + @param content_f: the file name + @param backup_d: the directory to save the backup at + @param link: if defined, location to create a symlink to + @param executable: is the file executable + """ + + if content: + if not executable: + write_file(content_f, content, mode=0400) + else: + w = write_executable_content(content, content_f) + if not w: + LOG.debug("failed to write file to %s" % content_f) + return False + + if not content and os.path.exists(content_f): + os.unlink(content_f) + + if link: + try: + if os.path.islink(link): + os.unlink(link) + if content and os.path.exists(content_f): + ensure_dir(os.path.dirname(link)) + os.symlink(content_f, link) + except IOError as e: + logexc(LOG, "failed establishing content link", e) + return False + + return True diff --git a/doc/sources/smartos/README.rst b/doc/sources/smartos/README.rst index 8b63e520..e63f311f 100644 --- a/doc/sources/smartos/README.rst +++ b/doc/sources/smartos/README.rst @@ -16,11 +16,35 @@ responds with the status and if "SUCCESS" returns until a single ".\n". New versions of the SmartOS tooling will include support for base64 encoded data. -Userdata --------- - -In SmartOS parlance, user-data is a actually meta-data. This userdata can be -provided as key-value pairs. +Meta-data channels +------------------ + +Cloud-init supports three modes of delivering user/meta-data via the flexible +channels of SmartOS. + +* user-data is written to /var/db/user-data + - per the spec, user-data is for consumption by the end-user, not provisioning + tools + - cloud-init entirely ignores this channel other than writting it to disk + - removal of the meta-data key means that /var/db/user-data gets removed + - a backup of previous meta-data is maintained as /var/db/user-data. + - is the epoch time when cloud-init ran + +* user-script is written to /var/lib/cloud/scripts/per-boot/99_user_data + - this is executed each boot + - a link is created to /var/db/user-script + - previous versions of the user-script is written to + /var/lib/cloud/scripts/per-boot.backup/99_user_script.. + - is the epoch time when cloud-init ran. + - when the 'user-script' meta-data key goes missing, the user-script is + removed from the file system, although a backup is maintained. + - if the script is not shebanged (i.e. starts with #!), then + or is not an executable, cloud-init will add a shebang of "#!/bin/bash" + +* cloud-init:user-data is treated like on other Clouds. + - this channel is used for delivering _all_ cloud-init instructions + - scripts delivered over this channel must be well formed (i.e. must have + a shebang) Cloud-init supports reading the traditional meta-data fields supported by the SmartOS tools. These are: @@ -32,19 +56,49 @@ SmartOS tools. These are: Note: At this time iptables_disable and enable_motd_sys_info are read but are not actioned. -user-script ------------ - -SmartOS traditionally supports sending over a user-script for execution at the -rc.local level. Cloud-init supports running user-scripts as if they were -cloud-init user-data. In this sense, anything with a shell interpreter -directive will run. - -user-data and user-script -------------------------- - -In the event that a user defines the meta-data key of "user-data" it will -always supersede any user-script data. This is for consistency. +disabling user-script +--------------------- + +Cloud-init uses the per-boot script functionality to handle the execution +of the user-script. If you want to prevent this use a cloud-config of: + +#cloud-config +cloud_final_modules: + - scripts-per-once + - scripts-per-instance + - scripts-user + - ssh-authkey-fingerprints + - keys-to-console + - phone-home + - final-message + - power-state-change + +Alternatively you can use the json patch method +#cloud-config-jsonp +[ + { "op": "replace", + "path": "/cloud_final_modules", + "value": ["scripts-per-once", + "scripts-per-instance", + "scripts-user", + "ssh-authkey-fingerprints", + "keys-to-console", + "phone-home", + "final-message", + "power-state-change"] + } +] + +The default cloud-config includes "script-per-boot". Cloud-init will still +ingest and write the user-data but will not execute it, when you disable +the per-boot script handling. + +Note: Unless you have an explicit use-case, it is recommended that you not + disable the per-boot script execution, especially if you are using + any of the life-cycle management features of SmartOS. + +The cloud-config needs to be delivered over the cloud-init:user-data channel +in order for cloud-init to ingest it. base64 ------ @@ -54,6 +108,8 @@ are provided by SmartOS: * root_authorized_keys * enable_motd_sys_info * iptables_disable + * user-data + * user-script This list can be changed through system config of variable 'no_base64_decode'. diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 956767d8..ae427bb5 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -27,6 +27,10 @@ from cloudinit import helpers from cloudinit.sources import DataSourceSmartOS from mocker import MockerTestCase +import os +import os.path +import re +import stat import uuid MOCK_RETURNS = { @@ -35,7 +39,11 @@ MOCK_RETURNS = { 'disable_iptables_flag': None, 'enable_motd_sys_info': None, 'test-var1': 'some data', - 'user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']), + 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']), + 'sdc:datacenter_name': 'somewhere2', + 'sdc:operator-script': '\n'.join(['bin/true', '']), + 'user-data': '\n'.join(['something', '']), + 'user-script': '\n'.join(['/bin/true', '']), } DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc') @@ -101,6 +109,7 @@ class TestSmartOSDataSource(MockerTestCase): def setUp(self): # makeDir comes from MockerTestCase self.tmp = self.makeDir() + self.legacy_user_d = self.makeDir() # patch cloud_dir, so our 'seed_dir' is guaranteed empty self.paths = helpers.Paths({'cloud_dir': self.tmp}) @@ -138,6 +147,7 @@ class TestSmartOSDataSource(MockerTestCase): sys_cfg['datasource'] = sys_cfg.get('datasource', {}) sys_cfg['datasource']['SmartOS'] = ds_cfg + self.apply_patches([(mod, 'LEGACY_USER_D', self.legacy_user_d)]) self.apply_patches([(mod, 'get_serial', _get_serial)]) self.apply_patches([(mod, 'dmi_data', _dmi_data)]) dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None, @@ -194,7 +204,7 @@ class TestSmartOSDataSource(MockerTestCase): # metadata provided base64_all of true my_returns = MOCK_RETURNS.copy() my_returns['base64_all'] = "true" - for k in ('hostname', 'user-data'): + for k in ('hostname', 'cloud-init:user-data'): my_returns[k] = base64.b64encode(my_returns[k]) dsrc = self._get_ds(mockdata=my_returns) @@ -202,7 +212,7 @@ class TestSmartOSDataSource(MockerTestCase): self.assertTrue(ret) self.assertEquals(MOCK_RETURNS['hostname'], dsrc.metadata['local-hostname']) - self.assertEquals(MOCK_RETURNS['user-data'], + self.assertEquals(MOCK_RETURNS['cloud-init:user-data'], dsrc.userdata_raw) self.assertEquals(MOCK_RETURNS['root_authorized_keys'], dsrc.metadata['public-keys']) @@ -213,9 +223,9 @@ class TestSmartOSDataSource(MockerTestCase): def test_b64_userdata(self): my_returns = MOCK_RETURNS.copy() - my_returns['b64-user-data'] = "true" + my_returns['b64-cloud-init:user-data'] = "true" my_returns['b64-hostname'] = "true" - for k in ('hostname', 'user-data'): + for k in ('hostname', 'cloud-init:user-data'): my_returns[k] = base64.b64encode(my_returns[k]) dsrc = self._get_ds(mockdata=my_returns) @@ -223,7 +233,8 @@ class TestSmartOSDataSource(MockerTestCase): self.assertTrue(ret) self.assertEquals(MOCK_RETURNS['hostname'], dsrc.metadata['local-hostname']) - self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw) + self.assertEquals(MOCK_RETURNS['cloud-init:user-data'], + dsrc.userdata_raw) self.assertEquals(MOCK_RETURNS['root_authorized_keys'], dsrc.metadata['public-keys']) @@ -238,13 +249,131 @@ class TestSmartOSDataSource(MockerTestCase): self.assertTrue(ret) self.assertEquals(MOCK_RETURNS['hostname'], dsrc.metadata['local-hostname']) - self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw) + self.assertEquals(MOCK_RETURNS['cloud-init:user-data'], + dsrc.userdata_raw) def test_userdata(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw) + self.assertEquals(MOCK_RETURNS['user-data'], + dsrc.metadata['legacy-user-data']) + self.assertEquals(MOCK_RETURNS['cloud-init:user-data'], + dsrc.userdata_raw) + + def test_sdc_scripts(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(MOCK_RETURNS['user-script'], + dsrc.metadata['user-script']) + + legacy_script_f = "%s/user-script" % self.legacy_user_d + self.assertTrue(os.path.exists(legacy_script_f)) + self.assertTrue(os.path.islink(legacy_script_f)) + user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] + self.assertEquals(user_script_perm, '700') + + def test_scripts_shebanged(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(MOCK_RETURNS['user-script'], + dsrc.metadata['user-script']) + + legacy_script_f = "%s/user-script" % self.legacy_user_d + self.assertTrue(os.path.exists(legacy_script_f)) + self.assertTrue(os.path.islink(legacy_script_f)) + shebang = None + with open(legacy_script_f, 'r') as f: + shebang = f.readlines()[0].strip() + self.assertEquals(shebang, "#!/bin/bash") + user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] + self.assertEquals(user_script_perm, '700') + + def test_scripts_shebang_not_added(self): + """ + Test that the SmartOS requirement that plain text scripts + are executable. This test makes sure that plain texts scripts + with out file magic have it added appropriately by cloud-init. + """ + + my_returns = MOCK_RETURNS.copy() + my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl', + 'print("hi")', '']) + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(my_returns['user-script'], + dsrc.metadata['user-script']) + + legacy_script_f = "%s/user-script" % self.legacy_user_d + self.assertTrue(os.path.exists(legacy_script_f)) + self.assertTrue(os.path.islink(legacy_script_f)) + shebang = None + with open(legacy_script_f, 'r') as f: + shebang = f.readlines()[0].strip() + self.assertEquals(shebang, "#!/usr/bin/perl") + + def test_scripts_removed(self): + """ + Since SmartOS requires that the user script is fetched + each boot, we want to make sure that the information + is backed-up for user-review later. + + This tests the behavior of when a script is removed. It makes + sure that a) the previous script is backed-up; and 2) that + there is no script remaining. + """ + + script_d = os.path.join(self.tmp, "scripts", "per-boot") + os.makedirs(script_d) + + test_script_f = "%s/99_user_script" % script_d + with open(test_script_f, 'w') as f: + f.write("TEST DATA") + + my_returns = MOCK_RETURNS.copy() + del my_returns['user-script'] + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertFalse(dsrc.metadata['user-script']) + self.assertFalse(os.path.exists(test_script_f)) + + def test_userdata_removed(self): + """ + User-data in the SmartOS world is supposed to be written to a file + each and every boot. This tests to make sure that in the event the + legacy user-data is removed, the existing user-data is backed-up and + there is no /var/db/user-data left. + """ + + user_data_f = "%s/mdata-user-data" % self.legacy_user_d + with open(user_data_f, 'w') as f: + f.write("PREVIOUS") + + my_returns = MOCK_RETURNS.copy() + del my_returns['user-data'] + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertFalse(dsrc.metadata.get('legacy-user-data')) + + found_new = False + for root, _dirs, files in os.walk(self.legacy_user_d): + for name in files: + name_f = os.path.join(root, name) + permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:] + if re.match(r'.*\/mdata-user-data$', name_f): + found_new = True + print name_f + self.assertEquals(permissions, '400') + + self.assertFalse(found_new) def test_disable_iptables_flag(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) -- cgit v1.2.3 From c92cd051a1d598f83de03c4135c800b17fd46a9a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 24 Jan 2014 14:47:28 -0500 Subject: pep8/pylint fixes tools/run-pep8 wasn't checking all python files. tools/run-pylint wasnt checking bin/cloud-init fixed resultant pep8 issues after finding them. --- cloudinit/distros/freebsd.py | 2 +- cloudinit/distros/net_util.py | 18 +++++++++--------- doc/rtd/conf.py | 3 ++- setup.py | 6 +++--- tools/run-pep8 | 11 +---------- tools/run-pylint | 2 +- 6 files changed, 17 insertions(+), 25 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index f1650a77..d28860eb 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -2,7 +2,7 @@ # # Copyright (C) 2014 Harm Weites # -# Author: Harm Weites +# Author: Harm Weites # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py index 5f60666d..b9bcfd8b 100644 --- a/cloudinit/distros/net_util.py +++ b/cloudinit/distros/net_util.py @@ -51,7 +51,7 @@ # # auto lo # iface lo inet loopback -# +# # auto eth0 # iface eth0 inet static # address 10.0.0.1 @@ -64,17 +64,17 @@ # { # "lo": { # "auto": true -# }, +# }, # "eth0": { -# "auto": true, +# "auto": true, # "dns-nameservers": [ -# "98.0.0.1", +# "98.0.0.1", # "98.0.0.2" -# ], -# "broadcast": "10.0.0.255", -# "netmask": "255.255.252.0", -# "bootproto": "static", -# "address": "10.0.0.1", +# ], +# "broadcast": "10.0.0.255", +# "netmask": "255.255.252.0", +# "bootproto": "static", +# "address": "10.0.0.1", # "gateway": "10.0.0.2" # } # } diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index c9ae79f4..52a8f92b 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -1,4 +1,5 @@ -import sys, os +import os +import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the diff --git a/setup.py b/setup.py index 8d18b97e..9118e5f6 100755 --- a/setup.py +++ b/setup.py @@ -63,7 +63,7 @@ def tiny_p(cmd, capture=True): (out, err) = sp.communicate() ret = sp.returncode # pylint: disable=E1101 if ret not in [0]: - raise RuntimeError("Failed running %s [rc=%s] (%s, %s)" + raise RuntimeError("Failed running %s [rc=%s] (%s, %s)" % (cmd, ret, out, err)) return (out, err) @@ -102,7 +102,7 @@ class InitsysInstallData(install): " specifying a init system!") % (", ".join(INITSYS_TYPES))) elif self.init_system: self.distribution.data_files.append( - (INITSYS_ROOTS[self.init_system], + (INITSYS_ROOTS[self.init_system], INITSYS_FILES[self.init_system])) # Force that command to reinitalize (with new file list) self.distribution.reinitialize_command('install_data', True) @@ -134,7 +134,7 @@ setuptools.setup(name='cloud-init', [f for f in glob('doc/examples/seed/*') if is_f(f)]), ], install_requires=read_requires(), - cmdclass = { + cmdclass={ # Use a subclass for install that handles # adding on the right init system configuration files 'install': InitsysInstallData, diff --git a/tools/run-pep8 b/tools/run-pep8 index 20e594bc..cfce5edd 100755 --- a/tools/run-pep8 +++ b/tools/run-pep8 @@ -1,15 +1,7 @@ #!/bin/bash -ci_files='cloudinit/*.py cloudinit/config/*.py' -test_files=$(find tests -name "*.py") -def_files="$ci_files $test_files" - if [ $# -eq 0 ]; then - files=( ) - for f in $def_files; do - [ -f "$f" ] || { echo "failed, $f not a file" 1>&2; exit 1; } - files[${#files[@]}]=${f} - done + files=( bin/cloud-init $(find * -name "*.py" -type f) ) else files=( "$@" ); fi @@ -44,4 +36,3 @@ cmd=( echo -e "\nRunning 'cloudinit' pep8:" echo "${cmd[@]}" "${cmd[@]}" - diff --git a/tools/run-pylint b/tools/run-pylint index b74efda9..0b7c16d4 100755 --- a/tools/run-pylint +++ b/tools/run-pylint @@ -1,7 +1,7 @@ #!/bin/bash if [ $# -eq 0 ]; then - files=( $(find * -name "*.py" -type f) ) + files=( bin/cloud-init $(find * -name "*.py" -type f) ) else files=( "$@" ); fi -- cgit v1.2.3 From 7079fac4646380db1e064a433d7843473bda1542 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Fri, 24 Jan 2014 12:52:04 -0700 Subject: Fixed flip-flopped comment --- cloudinit/distros/freebsd.py | 2 +- cloudinit/distros/net_util.py | 18 +++++++++--------- cloudinit/sources/DataSourceSmartOS.py | 18 +++++++++--------- 3 files changed, 19 insertions(+), 19 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index f1650a77..d28860eb 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -2,7 +2,7 @@ # # Copyright (C) 2014 Harm Weites # -# Author: Harm Weites +# Author: Harm Weites # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py index 5f60666d..b9bcfd8b 100644 --- a/cloudinit/distros/net_util.py +++ b/cloudinit/distros/net_util.py @@ -51,7 +51,7 @@ # # auto lo # iface lo inet loopback -# +# # auto eth0 # iface eth0 inet static # address 10.0.0.1 @@ -64,17 +64,17 @@ # { # "lo": { # "auto": true -# }, +# }, # "eth0": { -# "auto": true, +# "auto": true, # "dns-nameservers": [ -# "98.0.0.1", +# "98.0.0.1", # "98.0.0.2" -# ], -# "broadcast": "10.0.0.255", -# "netmask": "255.255.252.0", -# "bootproto": "static", -# "address": "10.0.0.1", +# ], +# "broadcast": "10.0.0.255", +# "netmask": "255.255.252.0", +# "bootproto": "static", +# "address": "10.0.0.1", # "gateway": "10.0.0.2" # } # } diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 6bd4a5c7..73dd2ba0 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -159,21 +159,21 @@ class DataSourceSmartOS(sources.DataSource): smartos_noun, strip = attribute md[ci_noun] = self.query(smartos_noun, strip=strip) - # @datadictionary: This key has no defined format, but its value - # is written to the file /var/db/mdata-user-data on each boot prior - # to the phase that runs user-script. This file is not to be executed. - # This allows a configuration file of some kind to be injected into - # the machine to be consumed by the user-script when it runs. + # @datadictionary: This key may contain a program that is written + # to a file in the filesystem of the guest on each boot and then + # executed. It may be of any format that would be considered + # executable in the guest instance. u_script = md.get('user-script') u_script_f = "%s/99_user_script" % self.user_script_d u_script_l = "%s/user-script" % LEGACY_USER_D util.write_content(u_script, u_script_f, link=u_script_l, executable=True) - # @datadictionary: This key may contain a program that is written - # to a file in the filesystem of the guest on each boot and then - # executed. It may be of any format that would be considered - # executable in the guest instance. + # @datadictionary: This key has no defined format, but its value + # is written to the file /var/db/mdata-user-data on each boot prior + # to the phase that runs user-script. This file is not to be executed. + # This allows a configuration file of some kind to be injected into + # the machine to be consumed by the user-script when it runs. u_data = md.get('legacy-user-data') u_data_f = "%s/mdata-user-data" % LEGACY_USER_D util.write_content(u_data, u_data_f) -- cgit v1.2.3 From 4919cd124e57e82ecfcdaa9bfcbc051c719708e6 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 24 Jan 2014 15:29:09 -0500 Subject: pylint and long line fixes. This fixes up many long lines to be < 80 chars and some other pylint issues. pylint 1.1 (in trusty) is now complaining about the lazy logging, so I'll clean that up when I touch things. --- cloudinit/config/cc_growpart.py | 3 +- cloudinit/distros/freebsd.py | 47 +++++++++++----------- cloudinit/netinfo.py | 30 ++++++-------- cloudinit/user_data.py | 3 +- cloudinit/util.py | 12 +++--- .../unittests/test_datasource/test_configdrive.py | 2 +- 6 files changed, 49 insertions(+), 48 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index b81951ad..f52c41f0 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -223,7 +223,8 @@ def resize_devices(resizer, devices): "stat of '%s' failed: %s" % (blockdev, e),)) continue - if not stat.S_ISBLK(statret.st_mode) and not stat.S_ISCHR(statret.st_mode): + if (not stat.S_ISBLK(statret.st_mode) and + not stat.S_ISCHR(statret.st_mode)): info.append((devent, RESIZE.SKIPPED, "device '%s' not a block device" % blockdev,)) continue diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index d28860eb..4c0c6d29 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -16,15 +16,14 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import re + from cloudinit import distros from cloudinit import helpers from cloudinit import log as logging -from cloudinit import netinfo from cloudinit import ssh_util from cloudinit import util -from cloudinit.settings import PER_INSTANCE - LOG = logging.getLogger(__name__) @@ -39,26 +38,27 @@ class Distro(distros.Distro): # Updates a key in /etc/rc.conf. def updatercconf(self, key, value): - LOG.debug("updatercconf: %s => %s" % (key, value)) + LOG.debug("updatercconf: %s => %s", key, value) conf = self.loadrcconf() configchanged = False for item in conf: if item == key and conf[item] != value: conf[item] = value - LOG.debug("[rc.conf]: Value %s for key %s needs to be changed" % (value, key)) + LOG.debug("[rc.conf]: Value %s for key %s needs to be changed", + value, key) configchanged = True if configchanged: LOG.debug("Writing new /etc/rc.conf file") - with open('/etc/rc.conf', 'w') as file: + with open('/etc/rc.conf', 'w') as fp: for keyval in conf.items(): - file.write("%s=%s\n" % keyval) + fp.write("%s=%s\n" % keyval) # Load the contents of /etc/rc.conf and store all keys in a dict. def loadrcconf(self): conf = {} - with open("/etc/rc.conf") as file: - for line in file: + with open("/etc/rc.conf") as fp: + for line in fp: tok = line.split('=') conf[tok[0]] = tok[1].rstrip() return conf @@ -75,7 +75,7 @@ class Distro(distros.Distro): sys_hostname = self._read_hostname() return ('rc.conf', sys_hostname) - def _read_hostname(self, default=None): + def _read_hostname(self, filename, default=None): hostname = None try: hostname = self.readrcconf('hostname') @@ -90,17 +90,17 @@ class Distro(distros.Distro): return fqdn return hostname - def _write_hostname(self, your_hostname, out_fn): - self.updatercconf('hostname', your_hostname) + def _write_hostname(self, hostname, filename): + self.updatercconf('hostname', hostname) def create_group(self, name, members): group_add_cmd = ['pw', '-n', name] if util.is_group(name): - LOG.warn("Skipping creation of existing group '%s'" % name) + LOG.warn("Skipping creation of existing group '%s'", name) else: try: util.subp(group_add_cmd) - LOG.info("Created new group %s" % name) + LOG.info("Created new group %s", name) except Exception: util.logexc("Failed to create group %s", name) @@ -111,11 +111,11 @@ class Distro(distros.Distro): "; user does not exist.", member, name) continue util.subp(['pw', 'usermod', '-n', name, '-G', member]) - LOG.info("Added user '%s' to group '%s'" % (member, name)) + LOG.info("Added user '%s' to group '%s'", member, name) def add_user(self, name, **kwargs): if util.is_user(name): - LOG.info("User %s already exists, skipping." % name) + LOG.info("User %s already exists, skipping.", name) return False adduser_cmd = ['pw', 'useradd', '-n', name] @@ -170,7 +170,7 @@ class Distro(distros.Distro): raise e # TODO: - def set_passwd(self, name, **kwargs): + def set_passwd(self, user, passwd, hashed=False): return False def lock_passwd(self, name): @@ -182,7 +182,7 @@ class Distro(distros.Distro): # TODO: def write_sudo_rules(self, name, rules, sudo_file=None): - LOG.debug("[write_sudo_rules] Name: %s" % name) + LOG.debug("[write_sudo_rules] Name: %s", name) def create_user(self, name, **kwargs): self.add_user(name, **kwargs) @@ -217,7 +217,8 @@ class Distro(distros.Distro): origconf = open(loginconf, 'r') for line in origconf: - newconf.write(re.sub('^default:', r'default:lang=%s:' % locale, line)) + newconf.write(re.sub(r'^default:', + r'default:lang=%s:' % locale, line)) newconf.close() origconf.close() # Make a backup of login.conf. @@ -233,14 +234,14 @@ class Distro(distros.Distro): util.logexc("Failed to apply locale %s", locale) copyfile(backupconf, loginconf) - def install_packages(): + def install_packages(self, pkglist): return - def package_command(): + def package_command(self, cmd, args=None, pkgs=None): return - def set_timezone(): + def set_timezone(self, tz): return - def update_package_sources(): + def update_package_sources(self): return diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index 63f720e4..ac3c011f 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -44,7 +44,7 @@ def netdev_info(empty=""): # If the output of ifconfig doesn't contain the required info in the # obvious place, use a regex filter to be sure. elif len(toks) > 1: - if re.search("flags=\d+ %r" % (link, source)) + LOG.debug("Creating symbolic link from %r => %r", link, source) os.symlink(source, link) @@ -1444,7 +1445,8 @@ def uptime(): size = ctypes.c_size_t() buf = ctypes.c_int() size.value = ctypes.sizeof(buf) - libc.sysctlbyname("kern.boottime", ctypes.byref(buf), ctypes.byref(size), None, 0) + libc.sysctlbyname("kern.boottime", ctypes.byref(buf), + ctypes.byref(size), None, 0) now = time.time() bootup = buf.value uptime_str = now - bootup @@ -1793,7 +1795,7 @@ def parse_mount(path): (mountoutput, _err) = subp("mount") mount_locs = mountoutput.splitlines() for line in mount_locs: - m = re.search('^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line) + m = re.search(r'^(/dev/[\S]+) on (/.*) \((.+), .+, (.+)\)$', line) devpth = m.group(1) mount_point = m.group(2) fs_type = m.group(3) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 3c1e8add..1f4a0a0b 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -285,7 +285,7 @@ class TestConfigDriveDataSource(MockerTestCase): self.assertEqual(["/dev/vdb", "/dev/zdd"], ds.find_candidate_devs()) - # verify that partitions are considered, but only if they have a label. + # verify that partitions are considered, that have correct label. devs_with_answers = {"TYPE=vfat": ["/dev/sda1"], "TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]} self.assertEqual(["/dev/vdb3"], -- cgit v1.2.3 From 93c0bcf6a048e278ead6b4392d3507c40441b7bb Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Fri, 24 Jan 2014 15:28:55 -0700 Subject: Make SmartOS script handling self-contained in datasource. --- cloudinit/sources/DataSourceSmartOS.py | 63 +++++++++++++++++++++++++++-- cloudinit/util.py | 72 ---------------------------------- 2 files changed, 60 insertions(+), 75 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 73dd2ba0..b0fabe05 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -36,6 +36,7 @@ from cloudinit import util import os import os.path import serial +import subprocess LOG = logging.getLogger(__name__) @@ -166,8 +167,8 @@ class DataSourceSmartOS(sources.DataSource): u_script = md.get('user-script') u_script_f = "%s/99_user_script" % self.user_script_d u_script_l = "%s/user-script" % LEGACY_USER_D - util.write_content(u_script, u_script_f, link=u_script_l, - executable=True) + write_boot_content(u_script, u_script_f, link=u_script_l, shebang=True, + mode=0700) # @datadictionary: This key has no defined format, but its value # is written to the file /var/db/mdata-user-data on each boot prior @@ -176,7 +177,7 @@ class DataSourceSmartOS(sources.DataSource): # the machine to be consumed by the user-script when it runs. u_data = md.get('legacy-user-data') u_data_f = "%s/mdata-user-data" % LEGACY_USER_D - util.write_content(u_data, u_data_f) + write_boot_content(u_data, u_data_f) # Handle the cloud-init regular meta if not md['local-hostname']: @@ -312,6 +313,62 @@ def dmi_data(): return (sys_uuid.lower().strip(), sys_type.strip()) +def write_boot_content(content, content_f, link=None, shebang=False, mode=0400): + """ + Write the content to content_f. Under the following rules: + 1. If no content, remove the file + 2. Write the content + 3. If executable and no file magic, add it + 4. If there is a link, create it + + @param content: what to write + @param content_f: the file name + @param backup_d: the directory to save the backup at + @param link: if defined, location to create a symlink to + @param shebang: if no file magic, set shebang + @param mode: file mode + + Becuase of the way that Cloud-init executes scripts (no shell), + a script will fail to execute if does not have a magic bit (shebang) set + for the file. If shebang=True, then the script will be checked for a magic + bit and to the SmartOS default of assuming that bash. + """ + + if not content and os.path.exists(content_f): + os.unlink(content_f) + if link and os.path.islink(link): + os.unlink(link) + if not content: + return + + util.write_file(content_f, content, mode=mode) + + if shebang: + try: + cmd = ["file", "--brief", "--mime-type", content_f] + (f_type, _err) = util.subp(cmd) + LOG.debug("script %s mime type is %s" % (content_f, f_type)) + line_one = content.splitlines()[0] + if f_type.strip() == "text/plain" and "#!" not in line_one: + new_content = "\n".join(["#!/bin/bash", content]) + util.write_file(content_f, new_content, mode=mode) + LOG.debug("added shebang to file %s" % content_f) + + except Exception as e: + util.logexc(LOG, ("Failed to identify script type for %s" % + content_f, e)) + + if link: + try: + if os.path.islink(link): + os.unlink(link) + if content and os.path.exists(content_f): + util.ensure_dir(os.path.dirname(link)) + os.symlink(content_f, link) + except IOError as e: + util.logexc(LOG, "failed establishing content link", e) + + # Used to match classes to dependencies datasources = [ (DataSourceSmartOS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), diff --git a/cloudinit/util.py b/cloudinit/util.py index bf4006cb..d350ba08 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1907,75 +1907,3 @@ def expand_dotted_devname(dotted): return toks else: return (dotted, None) - - -def write_executable_content(script, script_f): - """ - This writes executable content and ensures that the shebang - exists. - """ - write_file(script_f, script, mode=0700) - try: - cmd = ["file", "--brief", "--mime-type", script_f] - (f_type, _err) = subp(cmd) - - LOG.debug("script %s mime type is %s" % (script_f, f_type)) - - # if the magic is text/plain, re-write with the shebang - if f_type.strip() == "text/plain": - with open(script_f, 'w') as f: - f.write("#!/bin/bash\n") - f.write(script) - LOG.debug("added shebang to file %s" % script_f) - - except ProcessExecutionError as e: - logexc(LOG, "Failed to identify script type for %s" % script_f, e) - return False - - except IOError as e: - logexc(LOG, "Failed to add shebang to file %s" % script_f, e) - return False - - return True - - -def write_content(content, content_f, link=None, - executable=False): - """ - Write the content to content_f. Under the following rules: - 1. Backup previous content_f - 2. Write the contente - 3. If no content, remove the file - 4. If there is a link, create it - - @param content: what to write - @param content_f: the file name - @param backup_d: the directory to save the backup at - @param link: if defined, location to create a symlink to - @param executable: is the file executable - """ - - if content: - if not executable: - write_file(content_f, content, mode=0400) - else: - w = write_executable_content(content, content_f) - if not w: - LOG.debug("failed to write file to %s" % content_f) - return False - - if not content and os.path.exists(content_f): - os.unlink(content_f) - - if link: - try: - if os.path.islink(link): - os.unlink(link) - if content and os.path.exists(content_f): - ensure_dir(os.path.dirname(link)) - os.symlink(content_f, link) - except IOError as e: - logexc(LOG, "failed establishing content link", e) - return False - - return True -- cgit v1.2.3 From eb1cc91597656414642ca7b66d5912882acd7ffc Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 24 Jan 2014 15:20:56 -0800 Subject: Freebsd cleanups - Remove direct usage of open() and use the corresponding helpers instead. - Fix the non-existence of the copyfile routine and just use the ones that do exist in the utils module to do the file backup. - Use class level constants for the various file names read, this matches the same usage in the other distro types. --- cloudinit/distros/freebsd.py | 62 ++++++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 28 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 4c0c6d29..d9d51dea 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -16,6 +16,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from StringIO import StringIO + import re from cloudinit import distros @@ -28,6 +30,10 @@ LOG = logging.getLogger(__name__) class Distro(distros.Distro): + rc_conf_fn = "/etc/rc.conf" + login_conf_fn = '/etc/login.conf' + login_conf_fn_bak = '/etc/login.conf.orig' + def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) # This will be used to restrict certain @@ -40,27 +46,28 @@ class Distro(distros.Distro): def updatercconf(self, key, value): LOG.debug("updatercconf: %s => %s", key, value) conf = self.loadrcconf() - configchanged = False + config_changed = False for item in conf: if item == key and conf[item] != value: conf[item] = value LOG.debug("[rc.conf]: Value %s for key %s needs to be changed", value, key) - configchanged = True + config_changed = True - if configchanged: - LOG.debug("Writing new /etc/rc.conf file") - with open('/etc/rc.conf', 'w') as fp: - for keyval in conf.items(): - fp.write("%s=%s\n" % keyval) + if config_changed: + LOG.debug("Writing new %s file", self.rc_conf_fn) + buf = StringIO() + for keyval in conf.items(): + buf.write("%s=%s\n" % keyval) + util.write_file(self.rc_conf_fn, buf.getvalue()) # Load the contents of /etc/rc.conf and store all keys in a dict. def loadrcconf(self): conf = {} - with open("/etc/rc.conf") as fp: - for line in fp: - tok = line.split('=') - conf[tok[0]] = tok[1].rstrip() + lines = util.load_file(self.rc_conf_fn).splitlines() + for line in lines: + tok = line.split('=') + conf[tok[0]] = tok[1].rstrip() return conf def readrcconf(self, key): @@ -139,7 +146,7 @@ class Distro(distros.Distro): redact_opts = ['passwd'] for key, val in kwargs.iteritems(): - if key in adduser_opts and val and isinstance(val, str): + if key in adduser_opts and val and isinstance(val, basestring): adduser_cmd.extend([adduser_opts[key], val]) # Redact certain fields from the logs @@ -209,30 +216,29 @@ class Distro(distros.Distro): return def apply_locale(self, locale, out_fn=None): - loginconf = '/etc/login.conf' - newloginconf = '/tmp/login.conf.new' - backupconf = '/etc/login.conf.orig' - - newconf = open(newloginconf, 'w') - origconf = open(loginconf, 'r') - - for line in origconf: + # Adjust the locals value to the new value + newconf = StringIO() + for line in util.load_file(self.login_conf_fn).splitlines(): newconf.write(re.sub(r'^default:', r'default:lang=%s:' % locale, line)) - newconf.close() - origconf.close() + newconf.write("\n") + # Make a backup of login.conf. - copyfile(loginconf, backupconf) - # And copy the new login.conf. - copyfile(newloginconf, loginconf) + util.copy(self.login_conf_fn, self.login_conf_fn_bak) + + # And write the new login.conf. + util.write_file(self.login_conf_fn, newconf.getvalue()) try: util.logexc("Running cap_mkdb for %s", locale) - util.subp(['cap_mkdb', '/etc/login.conf']) - except: + util.subp(['cap_mkdb', self.login_conf_fn]) + except util.ProcessExecutionError: # cap_mkdb failed, so restore the backup. util.logexc("Failed to apply locale %s", locale) - copyfile(backupconf, loginconf) + try: + util.copy(self.login_conf_fn_bak, self.login_conf_fn) + except IOError: + pass def install_packages(self, pkglist): return -- cgit v1.2.3 From d32091e5016924e15a956d264f5d44ccc8613e2a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 24 Jan 2014 15:23:57 -0800 Subject: Log failure to restore backup locale file --- cloudinit/distros/freebsd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index d9d51dea..ab5e334a 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -238,7 +238,7 @@ class Distro(distros.Distro): try: util.copy(self.login_conf_fn_bak, self.login_conf_fn) except IOError: - pass + util.logexc("Failed to restore %s backup", self.login_conf_fn) def install_packages(self, pkglist): return -- cgit v1.2.3 From 9876ad7d74f90f7c7433fb4dc1fa07e664ff92bc Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 24 Jan 2014 20:13:38 -0500 Subject: minor changes for pylint, write_boot_content improvement. if write_boot_content is given somethign that starts with #!, then there isn't a reason to invoke 'file' to tell us that it starts with shebang. This way, we only run file in 2 cases: a.) binary content (don't really know if that is supported or not) b.) magic "user meant to run this with /bin/bash but couldn't be bothered to type that" --- cloudinit/sources/DataSourceSmartOS.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index b0fabe05..140c7814 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -36,7 +36,6 @@ from cloudinit import util import os import os.path import serial -import subprocess LOG = logging.getLogger(__name__) @@ -313,7 +312,8 @@ def dmi_data(): return (sys_uuid.lower().strip(), sys_type.strip()) -def write_boot_content(content, content_f, link=None, shebang=False, mode=0400): +def write_boot_content(content, content_f, link=None, shebang=False, + mode=0400): """ Write the content to content_f. Under the following rules: 1. If no content, remove the file @@ -343,16 +343,15 @@ def write_boot_content(content, content_f, link=None, shebang=False, mode=0400): util.write_file(content_f, content, mode=mode) - if shebang: + if shebang and not content.startswith("#!"): try: cmd = ["file", "--brief", "--mime-type", content_f] (f_type, _err) = util.subp(cmd) - LOG.debug("script %s mime type is %s" % (content_f, f_type)) - line_one = content.splitlines()[0] - if f_type.strip() == "text/plain" and "#!" not in line_one: + LOG.debug("script %s mime type is %s", content_f, f_type) + if f_type.strip() == "text/plain": new_content = "\n".join(["#!/bin/bash", content]) util.write_file(content_f, new_content, mode=mode) - LOG.debug("added shebang to file %s" % content_f) + LOG.debug("added shebang to file %s", content_f) except Exception as e: util.logexc(LOG, ("Failed to identify script type for %s" % -- cgit v1.2.3 From 8c7aecbb695f50514ae1bea9c105176b6345fb95 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 24 Jan 2014 17:34:52 -0800 Subject: Fix logexc usage in freebsd distro - There appeared to be a few logexc calls that did not pass the logger in, fix those locations where this occured. - When a group member adding fails, log the error and try the next member instead of failing adding any more members --- cloudinit/distros/freebsd.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index ab5e334a..0f1656c3 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -109,16 +109,20 @@ class Distro(distros.Distro): util.subp(group_add_cmd) LOG.info("Created new group %s", name) except Exception: - util.logexc("Failed to create group %s", name) + util.logexc(LOG, "Failed to create group %s", name) if len(members) > 0: for member in members: if not util.is_user(member): LOG.warn("Unable to add group member '%s' to group '%s'" - "; user does not exist.", member, name) + "; user does not exist.", member, name) continue - util.subp(['pw', 'usermod', '-n', name, '-G', member]) - LOG.info("Added user '%s' to group '%s'", member, name) + try: + util.subp(['pw', 'usermod', '-n', name, '-G', member]) + LOG.info("Added user '%s' to group '%s'", member, name) + except Exception: + util.logexc(LOG, "Failed to add user '%s' to group '%s'", + member, name) def add_user(self, name, **kwargs): if util.is_user(name): @@ -230,15 +234,16 @@ class Distro(distros.Distro): util.write_file(self.login_conf_fn, newconf.getvalue()) try: - util.logexc("Running cap_mkdb for %s", locale) + LOG.debug("Running cap_mkdb for %s", locale) util.subp(['cap_mkdb', self.login_conf_fn]) except util.ProcessExecutionError: # cap_mkdb failed, so restore the backup. - util.logexc("Failed to apply locale %s", locale) + util.logexc(LOG, "Failed to apply locale %s", locale) try: util.copy(self.login_conf_fn_bak, self.login_conf_fn) except IOError: - util.logexc("Failed to restore %s backup", self.login_conf_fn) + util.logexc(LOG, "Failed to restore %s backup", + self.login_conf_fn) def install_packages(self, pkglist): return -- cgit v1.2.3 From 15ebe2a01c0bcdaae43054e0e3559871a99f72aa Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 24 Jan 2014 17:39:51 -0800 Subject: Don't try to create members if group creation fails --- cloudinit/distros/freebsd.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 0f1656c3..afb502c9 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -108,8 +108,9 @@ class Distro(distros.Distro): try: util.subp(group_add_cmd) LOG.info("Created new group %s", name) - except Exception: + except Exception as e: util.logexc(LOG, "Failed to create group %s", name) + raise e if len(members) > 0: for member in members: -- cgit v1.2.3 From 0be4922d92e874b2e3300bdde65829cdb6569524 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 24 Jan 2014 22:31:28 -0500 Subject: read_file_or_url: raise UrlError with 404 on ENOENT This makes it easier to call read_file_or_url and handle file or url errors. Now read_file_or_url will raise a UrlError in either case on errors. --- cloudinit/url_helper.py | 28 ++++++++++++++++++++ cloudinit/util.py | 38 ++++++++-------------------- tests/unittests/test__init__.py | 6 ++--- tests/unittests/test_datasource/test_maas.py | 2 +- 4 files changed, 42 insertions(+), 32 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 227983f3..97ed75ad 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -20,6 +20,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import httplib import time import requests @@ -32,6 +33,8 @@ from cloudinit import version LOG = logging.getLogger(__name__) +NOT_FOUND = httplib.NOT_FOUND + # Check if requests has ssl support (added in requests >= 0.8.8) SSL_ENABLED = False CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0) @@ -58,6 +61,31 @@ def _cleanurl(url): return urlunparse(parsed_url) +# Made to have same accessors as UrlResponse so that the +# read_file_or_url can return this or that object and the +# 'user' of those objects will not need to know the difference. +class StringResponse(object): + def __init__(self, contents, code=200): + self.code = code + self.headers = {} + self.contents = contents + self.url = None + + def ok(self, *args, **kwargs): # pylint: disable=W0613 + if self.code != 200: + return False + return True + + def __str__(self): + return self.contents + + +class FileResponse(StringResponse): + def __init__(self, path, contents, code=200): + StringResponse.__init__(self, contents, code=code) + self.url = path + + class UrlResponse(object): def __init__(self, response): self._response = response diff --git a/cloudinit/util.py b/cloudinit/util.py index d350ba08..b3332acd 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -74,31 +74,6 @@ FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters) CONTAINER_TESTS = ['running-in-container', 'lxc-is-container'] -# Made to have same accessors as UrlResponse so that the -# read_file_or_url can return this or that object and the -# 'user' of those objects will not need to know the difference. -class StringResponse(object): - def __init__(self, contents, code=200): - self.code = code - self.headers = {} - self.contents = contents - self.url = None - - def ok(self, *args, **kwargs): # pylint: disable=W0613 - if self.code != 200: - return False - return True - - def __str__(self): - return self.contents - - -class FileResponse(StringResponse): - def __init__(self, path, contents, code=200): - StringResponse.__init__(self, contents, code=code) - self.url = path - - class ProcessExecutionError(IOError): MESSAGE_TMPL = ('%(description)s\n' @@ -651,8 +626,8 @@ def read_optional_seed(fill, base="", ext="", timeout=5): fill['user-data'] = ud fill['meta-data'] = md return True - except IOError as e: - if e.errno == errno.ENOENT: + except url_helper.UrlError as e: + if e.code == url_helper.NOT_FOUND: return False raise @@ -699,7 +674,14 @@ def read_file_or_url(url, timeout=5, retries=10, if data: LOG.warn("Unable to post data to file resource %s", url) file_path = url[len("file://"):] - return FileResponse(file_path, contents=load_file(file_path)) + try: + contents = load_file(file_path) + except IOError as e: + code = e.errno + if e.errno == errno.ENOENT: + code = url_helper.NOT_FOUND + raise url_helper.UrlError(cause=e, code=code, headers=None) + return url_helper.FileResponse(file_path, contents=contents) else: return url_helper.readurl(url, timeout=timeout, diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index b4b20e51..8c41c1ca 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -196,7 +196,7 @@ class TestCmdlineUrl(MockerTestCase): mock_readurl = self.mocker.replace(url_helper.readurl, passthrough=False) mock_readurl(url, ARGS, KWARGS) - self.mocker.result(util.StringResponse(payload)) + self.mocker.result(url_helper.StringResponse(payload)) self.mocker.replay() self.assertEqual((key, url, None), @@ -212,7 +212,7 @@ class TestCmdlineUrl(MockerTestCase): mock_readurl = self.mocker.replace(url_helper.readurl, passthrough=False) mock_readurl(url, ARGS, KWARGS) - self.mocker.result(util.StringResponse(payload)) + self.mocker.result(url_helper.StringResponse(payload)) self.mocker.replay() self.assertEqual((key, url, payload), @@ -225,7 +225,7 @@ class TestCmdlineUrl(MockerTestCase): cmdline = "ro %s=%s bar=1" % (key, url) self.mocker.replace(url_helper.readurl, passthrough=False) - self.mocker.result(util.StringResponse("")) + self.mocker.result(url_helper.StringResponse("")) self.mocker.replay() self.assertEqual((None, None, None), diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index ebfb24da..bd5d23fd 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -122,7 +122,7 @@ class TestMAASDataSource(mocker.MockerTestCase): headers_cb=my_headers_cb, exception_cb=mocker.ANY) resp = valid.get(key) - self.mocker.result(util.StringResponse(resp)) + self.mocker.result(url_helper.StringResponse(resp)) self.mocker.replay() (userdata, metadata) = DataSourceMAAS.read_maas_seed_url(my_seed, -- cgit v1.2.3 From 50c744fc4ebc0de5fc7fdfee6a874cb9cc62bba8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 27 Jan 2014 13:05:11 -0500 Subject: add 'pathprefix2dict' utility for use by DataSourceNoCloud --- cloudinit/util.py | 25 +++++++++++++++++++++ tests/unittests/test_pathprefix2dict.py | 40 +++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 tests/unittests/test_pathprefix2dict.py (limited to 'cloudinit') diff --git a/cloudinit/util.py b/cloudinit/util.py index b3332acd..f36e2733 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1889,3 +1889,28 @@ def expand_dotted_devname(dotted): return toks else: return (dotted, None) + + +def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep): + # return a dictionary populated with keys in 'required' and 'optional' + # by reading files in prefix + delim + entry + if required is None: + required = [] + if optional is None: + optional = [] + + missing = [] + ret = {} + for f in required + optional: + try: + ret[f] = load_file(base + delim + f, quiet=False) + except IOError as e: + if e.errno != errno.ENOENT: + raise + if f in required: + missing.append(f) + + if len(missing): + raise ValueError("Missing required files: %s", ','.join(missing)) + + return ret diff --git a/tests/unittests/test_pathprefix2dict.py b/tests/unittests/test_pathprefix2dict.py new file mode 100644 index 00000000..c68c263c --- /dev/null +++ b/tests/unittests/test_pathprefix2dict.py @@ -0,0 +1,40 @@ +from cloudinit import util + +from mocker import MockerTestCase +from tests.unittests.helpers import populate_dir + + +class TestPathPrefix2Dict(MockerTestCase): + + def setUp(self): + self.tmp = self.makeDir() + + def test_required_only(self): + dirdata = {'f1': 'f1content', 'f2': 'f2content'} + populate_dir(self.tmp, dirdata) + + ret = util.pathprefix2dict(self.tmp, required=['f1', 'f2']) + self.assertEqual(dirdata, ret) + + def test_required_missing(self): + dirdata = {'f1': 'f1content'} + populate_dir(self.tmp, dirdata) + kwargs = {'required': ['f1', 'f2']} + self.assertRaises(ValueError, util.pathprefix2dict, self.tmp, **kwargs) + + def test_no_required_and_optional(self): + dirdata = {'f1': 'f1c', 'f2': 'f2c'} + populate_dir(self.tmp, dirdata) + + ret = util.pathprefix2dict(self.tmp, required=None, + optional=['f1', 'f2']) + self.assertEqual(dirdata, ret) + + def test_required_and_optional(self): + dirdata = {'f1': 'f1c', 'f2': 'f2c'} + populate_dir(self.tmp, dirdata) + + ret = util.pathprefix2dict(self.tmp, required=['f1'], optional=['f2']) + self.assertEqual(dirdata, ret) + +# vi: ts=4 expandtab -- cgit v1.2.3 From 7659f06379122218e11ef445cd1f435ea5e74f40 Mon Sep 17 00:00:00 2001 From: Vlastimil Holer Date: Tue, 28 Jan 2014 19:09:46 +0100 Subject: Allow zeros when detecting IPv4 address (e.g. 192.168.0.1) --- cloudinit/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit') diff --git a/cloudinit/util.py b/cloudinit/util.py index b3332acd..61bcdeb1 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -369,7 +369,7 @@ def is_ipv4(instr): return False try: - toks = [x for x in toks if (int(x) < 256 and int(x) > 0)] + toks = [x for x in toks if (int(x) < 256 and int(x) >= 0)] except: return False -- cgit v1.2.3 From c1253945761c33bfa89289a63cdb8799fc18d019 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 28 Jan 2014 14:03:20 -0500 Subject: DataSourceNoCloud: support reading vendor-data Here we add the ability to read vendor-data from a file named vendor-data at the same location as the user-data and meta-data files. At the moment, vendor-data is not read at all from 'seedfrom'. --- cloudinit/sources/DataSourceNoCloud.py | 92 ++++++++++++++++++++++------------ 1 file changed, 60 insertions(+), 32 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 4ef92a56..cbaac29f 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -50,40 +50,47 @@ class DataSourceNoCloud(sources.DataSource): } found = [] - md = {} - ud = "" + mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': ""} try: # Parse the kernel command line, getting data passed in + md = {} if parse_cmdline_data(self.cmdline_id, md): found.append("cmdline") + mydata.update(md) except: util.logexc(LOG, "Unable to parse command line data") return False # Check to see if the seed dir has data. - seedret = {} - if util.read_optional_seed(seedret, base=self.seed_dir + "/"): - md = util.mergemanydict([md, seedret['meta-data']]) - ud = seedret['user-data'] + pp2d_kwargs = {'required': ['user-data', 'meta-data'], + 'optional': ['vendor-data']} + + try: + seeded = util.pathprefix2dict(self.seed_dir, **pp2d_kwargs) found.append(self.seed_dir) - LOG.debug("Using seeded cache data from %s", self.seed_dir) + LOG.debug("Using seeded data from %s", self.seed_dir) + except ValueError as e: + pass + + if self.seed_dir in found: + mydata = _merge_new_seed(mydata, seeded) # If the datasource config had a 'seedfrom' entry, then that takes # precedence over a 'seedfrom' that was found in a filesystem # but not over external media - if 'seedfrom' in self.ds_cfg and self.ds_cfg['seedfrom']: - found.append("ds_config") - md["seedfrom"] = self.ds_cfg['seedfrom'] + if self.ds_cfg.get('seedfrom'): + found.append("ds_config_seedfrom") + mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom'] - # if ds_cfg has 'user-data' and 'meta-data' + # fields appropriately named can also just come from the datasource + # config (ie, 'user-data', 'meta-data', 'vendor-data' there) if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg: - if self.ds_cfg['user-data']: - ud = self.ds_cfg['user-data'] - if self.ds_cfg['meta-data'] is not False: - md = util.mergemanydict([md, self.ds_cfg['meta-data']]) - if 'ds_config' not in found: - found.append("ds_config") + mydata = _merge_new_seed(mydata, self.ds_cfg) + found.append("ds_config") + + def _pp2d_callback(mp, data): + util.pathprefix2dict(mp, **data) label = self.ds_cfg.get('fs_label', "cidata") if label is not None: @@ -102,15 +109,21 @@ class DataSourceNoCloud(sources.DataSource): try: LOG.debug("Attempting to use data from %s", dev) - (newmd, newud) = util.mount_cb(dev, util.read_seeded) - md = util.mergemanydict([newmd, md]) - ud = newud + try: + seeded = util.mount_cb(dev, _pp2d_callback) + except ValueError as e: + if dev in label_list: + LOG.warn("device %s with label=%s not a" + "valid seed.", dev, label) + continue + + mydata = _merge_new_seed(mydata, seeded) # For seed from a device, the default mode is 'net'. # that is more likely to be what is desired. If they want # dsmode of local, then they must specify that. - if 'dsmode' not in md: - md['dsmode'] = "net" + if 'dsmode' not in mydata['meta-data']: + mydata['meta-data'] = "net" LOG.debug("Using data from %s", dev) found.append(dev) @@ -133,8 +146,8 @@ class DataSourceNoCloud(sources.DataSource): # attempt to seed the userdata / metadata from its value # its primarily value is in allowing the user to type less # on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg - if "seedfrom" in md: - seedfrom = md["seedfrom"] + if "seedfrom" in mydata['meta-data']: + seedfrom = mydata['meta-data']["seedfrom"] seedfound = False for proto in self.supported_seed_starts: if seedfrom.startswith(proto): @@ -144,7 +157,7 @@ class DataSourceNoCloud(sources.DataSource): LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False - if 'network-interfaces' in md: + if 'network-interfaces' in mydata['meta-data']: seeded_interfaces = self.dsmode # This could throw errors, but the user told us to do it @@ -153,25 +166,30 @@ class DataSourceNoCloud(sources.DataSource): LOG.debug("Using seeded cache data from %s", seedfrom) # Values in the command line override those from the seed - md = util.mergemanydict([md, md_seed]) + mydata['meta-data'] = util.mergemanydict([mydata['meta-data'], + md_seed]) + mydata['user-data'] = ud found.append(seedfrom) # Now that we have exhausted any other places merge in the defaults - md = util.mergemanydict([md, defaults]) + mydata['meta-data'] = util.mergemanydict([mydata['meta-data'], + defaults]) # Update the network-interfaces if metadata had 'network-interfaces' # entry and this is the local datasource, or 'seedfrom' was used # and the source of the seed was self.dsmode # ('local' for NoCloud, 'net' for NoCloudNet') - if ('network-interfaces' in md and + if ('network-interfaces' in mydata['meta-data'] and (self.dsmode in ("local", seeded_interfaces))): LOG.debug("Updating network interfaces from %s", self) - self.distro.apply_network(md['network-interfaces']) + self.distro.apply_network( + mydata['meta-data']['network-interfaces']) - if md['dsmode'] == self.dsmode: + if mydata['meta-data']['dsmode'] == self.dsmode: self.seed = ",".join(found) - self.metadata = md - self.userdata_raw = ud + self.metadata = mydata['meta-data'] + self.userdata_raw = mydata['user-data'] + self.vendordata = mydata['vendor-data'] return True LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode']) @@ -222,6 +240,16 @@ def parse_cmdline_data(ds_id, fill, cmdline=None): return True +def _merge_new_seed(cur, seeded): + ret = cur.copy() + ret['meta-data'] = util.mergemanydict([cur['meta-data'], + util.load_yaml(seeded['meta-data'])]) + ret['user-data'] = seeded['user-data'] + if 'vendor-data' in seeded: + ret['vendor-data'] = seeded['vendor-data'] + return ret + + class DataSourceNoCloudNet(DataSourceNoCloud): def __init__(self, sys_cfg, distro, paths): DataSourceNoCloud.__init__(self, sys_cfg, distro, paths) -- cgit v1.2.3 From 2083bc757515e92982497ba4ce27b07cea4409ce Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 28 Jan 2014 14:04:06 -0500 Subject: cloudinit/util.py: fix pylint complaints --- cloudinit/util.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'cloudinit') diff --git a/cloudinit/util.py b/cloudinit/util.py index f36e2733..c0121848 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -369,11 +369,11 @@ def is_ipv4(instr): return False try: - toks = [x for x in toks if (int(x) < 256 and int(x) > 0)] + toks = [x for x in toks if int(x) < 256 and int(x) > 0] except: return False - return (len(toks) == 4) + return len(toks) == 4 def get_cfg_option_bool(yobj, key, default=False): @@ -972,7 +972,7 @@ def gethostbyaddr(ip): def is_resolvable_url(url): """determine if this url is resolvable (existing or ip).""" - return (is_resolvable(urlparse.urlparse(url).hostname)) + return is_resolvable(urlparse.urlparse(url).hostname) def search_for_mirror(candidates): -- cgit v1.2.3