From 2487e4f4498492f6b7bb0b144264bf7bd0544025 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 10 Aug 2012 18:58:14 -0700 Subject: Add the translation from the ubuntu format for nameservers and dns search servers and add in the writing of /etc/resolv.conf in rhel from that translation. --- cloudinit/distros/rhel.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 7fa69f03..700a98a4 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -68,12 +68,26 @@ class Distro(distros.Distro): def install_packages(self, pkglist): self.package_command('install', pkglist) + def _write_resolve(self, dns_servers, search_servers): + contents = [] + if dns_servers: + for s in dns_servers: + contents.append("nameserver %s" % (s)) + if search_servers: + contents.append("search %s" % (" ".join(search_servers))) + if contents: + resolve_rw_fn = self._paths.join(False, "/etc/resolv.conf") + contents.insert(0, '# Created by cloud-init') + util.write_file(resolve_rw_fn, "\n".join(contents), 0644) + def _write_network(self, settings): # TODO fix this... since this is the ubuntu format entries = translate_network(settings) LOG.debug("Translated ubuntu style network settings %s into %s", settings, entries) # Make the intermediate format as the rhel format... + nameservers = [] + searchservers = [] for (dev, info) in entries.iteritems(): net_fn = NETWORK_FN_TPL % (dev) net_ro_fn = self._paths.join(True, net_fn) @@ -102,11 +116,17 @@ class Distro(distros.Distro): if mac_addr: net_cfg["MACADDR"] = mac_addr lines = net_cfg.write() + if 'dns-nameservers' in info: + nameservers.extend(info['dns-nameservers']) + if 'dns-search' in info: + searchservers.extend(info['dns-search']) if not prev_exist: lines.insert(0, '# Created by cloud-init') w_contents = "\n".join(lines) net_rw_fn = self._paths.join(False, net_fn) util.write_file(net_rw_fn, w_contents, 0644) + if nameservers or searchservers: + self._write_resolve(nameservers, searchservers) def set_hostname(self, hostname): out_fn = self._paths.join(False, '/etc/sysconfig/network') @@ -314,6 +334,12 @@ def translate_network(settings): val = info[k].strip().lower() if val: iface_info[k] = val + # Name server info provided?? + if 'dns-nameservers' in info: + iface_info['dns-nameservers'] = info['dns-nameservers'].split() + # Name server search info provided?? + if 'dns-search' in info: + iface_info['dns-search'] = info['dns-search'].split() # Is any mac address spoofing going on?? if 'hwaddress' in info: hw_info = info['hwaddress'].lower().strip() -- cgit v1.2.3 From c76daa63ecfa0901769f1cb8211465c06b33770c Mon Sep 17 00:00:00 2001 From: Thomas Hervé Date: Wed, 5 Sep 2012 18:55:58 +0200 Subject: Check if the config was specified --- cloudinit/config/cc_landscape.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index d351d941..7cfb8296 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -79,7 +79,8 @@ def handle(_name, cfg, cloud, log, _args): util.write_file(lsc_client_fn, contents.getvalue()) log.debug("Wrote landscape config file to %s", lsc_client_fn) - util.write_file(LS_DEFAULT_FILE, "RUN=1\n") + if ls_cloudcfg: + util.write_file(LS_DEFAULT_FILE, "RUN=1\n") def merge_together(objs): -- cgit v1.2.3 From 01ad8fc5b1928c01367b32022ceab09f1dc5dc26 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 6 Sep 2012 18:12:21 -0700 Subject: Add capturing of basic signal handlers to handle those signals more gracefully and with better messaging than what comes builtin. --- bin/cloud-init | 4 +++ cloudinit/signal_handler.py | 65 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+) create mode 100644 cloudinit/signal_handler.py diff --git a/bin/cloud-init b/bin/cloud-init index 1f017475..a6ad14a6 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -35,6 +35,7 @@ if os.path.exists(os.path.join(possible_topdir, "cloudinit", "__init__.py")): from cloudinit import log as logging from cloudinit import netinfo +from cloudinit import signal_handler from cloudinit import sources from cloudinit import stages from cloudinit import templater @@ -494,6 +495,9 @@ def main(): if args.debug: logging.setupBasicLogging() + # Setup signal handlers before running + signal_handler.attach_handlers() + (name, functor) = args.action return functor(name, args) diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py new file mode 100644 index 00000000..244293b4 --- /dev/null +++ b/cloudinit/signal_handler.py @@ -0,0 +1,65 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Canonical Ltd. +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Scott Moser +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import inspect +import signal +import sys + +from StringIO import StringIO + +from cloudinit import util +from cloudinit import version as vr + + +EXIT_FOR = { + signal.SIGINT: ('Cloud-init %(version)s interrupted, exiting...', 1), + signal.SIGTERM: ('Cloud-init %(version)s terminated, exiting...', 1), + # signal.SIGKILL: ('Cloud-init killed, exiting...', 1), - Can't be caught... + signal.SIGABRT: ('Cloud-init %(version)s aborted, exiting...', 1), +} + + +def _pprint_frame(frame, depth=1, max_depth=3): + if depth > max_depth or not frame: + return + frame_info = inspect.getframeinfo(frame) + prefix = " " * (depth * 2) + contents = StringIO() + contents.write("%sFilename: %s\n" % (prefix, frame_info.filename)) + contents.write("%sFunction: %s\n" % (prefix, frame_info.function)) + contents.write("%sLine number: %s\n" % (prefix, frame_info.lineno)) + util.multi_log(contents.getvalue()) + _pprint_frame(frame.f_back, depth + 1, max_depth) + + +def _handle_exit(signum, frame): + (msg, rc) = EXIT_FOR[signum] + msg = msg % ({'version': vr.version()}) + util.multi_log("%s\n" % (msg)) + _pprint_frame(frame) + sys.exit(rc) + + +def attach_handlers(): + sigs_attached = 0 + for signum in EXIT_FOR.keys(): + signal.signal(signum, _handle_exit) + sigs_attached += len(EXIT_FOR) + return sigs_attached -- cgit v1.2.3 From 0f385445522cf8fe76391c0c0c855b6fa7ad9f50 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 6 Sep 2012 18:14:03 -0700 Subject: Fix pylintness. --- cloudinit/signal_handler.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py index 244293b4..96b618f3 100644 --- a/cloudinit/signal_handler.py +++ b/cloudinit/signal_handler.py @@ -31,7 +31,8 @@ from cloudinit import version as vr EXIT_FOR = { signal.SIGINT: ('Cloud-init %(version)s interrupted, exiting...', 1), signal.SIGTERM: ('Cloud-init %(version)s terminated, exiting...', 1), - # signal.SIGKILL: ('Cloud-init killed, exiting...', 1), - Can't be caught... + # Can't be caught... + # signal.SIGKILL: ('Cloud-init killed, exiting...', 1), signal.SIGABRT: ('Cloud-init %(version)s aborted, exiting...', 1), } -- cgit v1.2.3 From 67bf7722398278e7089098dfd683389ef4e418c5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 13 Sep 2012 14:01:04 -0400 Subject: add cloud-init-container.conf This changes the way that we avoid cloud-init-nonet hanging in a container. Previously, under LP: #800824 we tried 'start networking', but that caused issues described in LP: #1031065. Here, we emit the net-device-added for any devices that have not yet been seen. LP: #1031065 --- upstart/cloud-init-container.conf | 41 +++++++++++++++++++++++++++++++++++++++ upstart/cloud-init-nonet.conf | 2 -- 2 files changed, 41 insertions(+), 2 deletions(-) create mode 100644 upstart/cloud-init-container.conf diff --git a/upstart/cloud-init-container.conf b/upstart/cloud-init-container.conf new file mode 100644 index 00000000..b3123721 --- /dev/null +++ b/upstart/cloud-init-container.conf @@ -0,0 +1,41 @@ +# in a lxc container, events for network interfaces do not +# get created or may be missed. This helps cloud-init-nonet along +# by emitting those events if they have not been emitted. + +start on container +stop on static-network-up +task + +emits net-device-added + +console output + +script + # if we are inside a container, then we may have to emit the ifup + # events for 'auto' network devices. + set -f + + # from /etc/network/if-up.d/upstart + MARK_DEV_PREFIX="/run/network/ifup." + MARK_STATIC_NETWORK_EMITTED="/run/network/static-network-up-emitted" + # if the all static network interfaces are already up, nothing to do + [ -f "$MARK_STATIC_NETWORK_EMITTED" ] && exit 0 + + # get list of all 'auto' interfaces. if there are none, nothing to do. + auto_list=$(ifquery --list --allow auto 2>/dev/null) || : + [ -z "$auto_list" ] && exit 0 + set -- ${auto_list} + [ "$*" = "lo" ] && exit 0 + + to_emit="" + for iface in "$@"; do + [ "$iface" = "lo" ] && continue + # skip interfaces that are already up + [ -f "${MARK_DEV_PREFIX}${iface}" ] && continue + initctl emit --no-wait net-device-added "INTERFACE=$iface" && + emitted="${emitted} ${iface}" || : + done + + [ -z "${emitted# }" ] || + echo "${UPSTART_JOB}: emitted ifup for ${emitted# }" +end script diff --git a/upstart/cloud-init-nonet.conf b/upstart/cloud-init-nonet.conf index 7b69e584..118ffc1c 100644 --- a/upstart/cloud-init-nonet.conf +++ b/upstart/cloud-init-nonet.conf @@ -18,8 +18,6 @@ script [ -f /var/lib/cloud/instance/obj.pkl ] && exit 0 - start networking >/dev/null - short=10; long=120; sleep ${short} echo $UPSTART_JOB "waiting ${long} seconds for a network device." -- cgit v1.2.3 From d2f00fd2af9417a91c78e9cab5747cd6e218aa39 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 13 Sep 2012 20:02:37 -0400 Subject: only emit if there is a device present. --- upstart/cloud-init-container.conf | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/upstart/cloud-init-container.conf b/upstart/cloud-init-container.conf index b3123721..497e75c8 100644 --- a/upstart/cloud-init-container.conf +++ b/upstart/cloud-init-container.conf @@ -27,13 +27,23 @@ script set -- ${auto_list} [ "$*" = "lo" ] && exit 0 - to_emit="" + # we only want to emit for interfaces that do not exist, so filter + # out anything that does not exist. for iface in "$@"; do [ "$iface" = "lo" ] && continue # skip interfaces that are already up [ -f "${MARK_DEV_PREFIX}${iface}" ] && continue + + if [ -d /sys/net ]; then + # if /sys is mounted, and there is no /sys/net/iface, then no device + [ -e "/sys/net/$iface" ] && continue + else + # sys wasn't mounted, so just check via 'ifconfig' + ifconfig "$iface" >/dev/null 2>&1 || continue + fi initctl emit --no-wait net-device-added "INTERFACE=$iface" && - emitted="${emitted} ${iface}" || : + emitted="$emitted $iface" || + echo "warn: ${UPSTART_JOB} failed to emit net-device-added INTERFACE=$iface" done [ -z "${emitted# }" ] || -- cgit v1.2.3 From 18cbd9ae7872f83b4eaccdbdf2cf5023b92d7812 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 13 Sep 2012 20:07:18 -0400 Subject: remove trailing whitespace --- upstart/cloud-init-container.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/upstart/cloud-init-container.conf b/upstart/cloud-init-container.conf index 497e75c8..051c6e50 100644 --- a/upstart/cloud-init-container.conf +++ b/upstart/cloud-init-container.conf @@ -40,7 +40,7 @@ script else # sys wasn't mounted, so just check via 'ifconfig' ifconfig "$iface" >/dev/null 2>&1 || continue - fi + fi initctl emit --no-wait net-device-added "INTERFACE=$iface" && emitted="$emitted $iface" || echo "warn: ${UPSTART_JOB} failed to emit net-device-added INTERFACE=$iface" -- cgit v1.2.3 From acdc3c45a3deefaf599627eee889d11299525c4c Mon Sep 17 00:00:00 2001 From: Andy Grimm Date: Fri, 14 Sep 2012 11:50:11 -0400 Subject: Fix hostname derived from IP. (LP: 1050962) --- cloudinit/sources/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 3f611d44..6f126091 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -173,7 +173,7 @@ class DataSource(object): # make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx lhost = self.metadata['local-hostname'] if util.is_ipv4(lhost): - toks = "ip-%s" % lhost.replace(".", "-") + toks = [ "ip-%s" % lhost.replace(".", "-") ] else: toks = lhost.split(".") -- cgit v1.2.3 From 2899c638c04907461024dff0d32c1cc57ae4b6d4 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 14 Sep 2012 14:32:31 -0700 Subject: Add a basic signal test and write out all the signal information in one block instead of many. --- cloudinit/signal_handler.py | 28 +++++++++++++++++++------ tests/unittests/test_signal.py | 46 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 6 deletions(-) create mode 100644 tests/unittests/test_signal.py diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py index 96b618f3..b6ea592b 100644 --- a/cloudinit/signal_handler.py +++ b/cloudinit/signal_handler.py @@ -24,10 +24,14 @@ import sys from StringIO import StringIO +from cloudinit import log as logging from cloudinit import util from cloudinit import version as vr +LOG = logging.getLogger(__name__) + +BACK_FRAME_TRACE_DEPTH = 3 EXIT_FOR = { signal.SIGINT: ('Cloud-init %(version)s interrupted, exiting...', 1), signal.SIGTERM: ('Cloud-init %(version)s terminated, exiting...', 1), @@ -37,24 +41,36 @@ EXIT_FOR = { } -def _pprint_frame(frame, depth=1, max_depth=3): +def _pprint_frame(frame, depth, max_depth, contents): if depth > max_depth or not frame: return frame_info = inspect.getframeinfo(frame) prefix = " " * (depth * 2) - contents = StringIO() contents.write("%sFilename: %s\n" % (prefix, frame_info.filename)) contents.write("%sFunction: %s\n" % (prefix, frame_info.function)) contents.write("%sLine number: %s\n" % (prefix, frame_info.lineno)) - util.multi_log(contents.getvalue()) - _pprint_frame(frame.f_back, depth + 1, max_depth) + _pprint_frame(frame.f_back, depth + 1, max_depth, contents) def _handle_exit(signum, frame): (msg, rc) = EXIT_FOR[signum] + # Reset logging so that only the basic logging + # is active since the state of syslog or other + # logging processes is unknown if we are being + # signaled by a reboot process which is external and + # killing other processes while this process is being + # finished off... + try: + logging.resetLogging() + logging.setupBasicLogging() + except: + pass msg = msg % ({'version': vr.version()}) - util.multi_log("%s\n" % (msg)) - _pprint_frame(frame) + contents = StringIO() + contents.write("%s\n" % (msg)) + _pprint_frame(frame, 1, BACK_FRAME_TRACE_DEPTH, contents) + util.multi_log(contents.getvalue(), + console=True, stderr=False, log=LOG) sys.exit(rc) diff --git a/tests/unittests/test_signal.py b/tests/unittests/test_signal.py new file mode 100644 index 00000000..02fd1ef1 --- /dev/null +++ b/tests/unittests/test_signal.py @@ -0,0 +1,46 @@ +"""Tests for handling of signals within cloud init.""" + +import os +import subprocess +import sys +import time + +from StringIO import StringIO + +from mocker import MockerTestCase + + +class TestSignal(MockerTestCase): + + def test_signal_output(self): + + # This is done since nose/unittest is actually setting up + # output capturing, signal handling itself, and its easier + # to just call out to cloudinit with a loop and see what the result is + run_what = [sys.executable, + '-c', ('import time; from cloudinit import signal_handler;' + 'signal_handler.attach_handlers(); time.sleep(120)')] + + pc_info = subprocess.Popen(run_what, stderr=subprocess.PIPE, stdout=subprocess.PIPE) + + # Let it start up + time.sleep(0.5) + dead = None + while dead is None: + pc_info.terminate() + # Ok not dead yet. try again + time.sleep(0.5) + dead = pc_info.poll() + + outputs = StringIO() + if pc_info.stdout: + outputs.write(pc_info.stdout.read()) + if pc_info.stderr: + outputs.write(pc_info.stderr.read()) + val = outputs.getvalue() + print val + + # Check some of the outputs that should of happened + self.assertEquals(1, pc_info.wait()) + self.assertTrue(len(val) != 0) + self.assertTrue(val.find("terminated") != -1) -- cgit v1.2.3 From 5f33046a1e059f23ce1f4aceeec33813c57a7b70 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 14 Sep 2012 14:35:32 -0700 Subject: Cleanup pylint warnings. --- tests/unittests/test_signal.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/unittests/test_signal.py b/tests/unittests/test_signal.py index 02fd1ef1..8cb31fd7 100644 --- a/tests/unittests/test_signal.py +++ b/tests/unittests/test_signal.py @@ -1,6 +1,5 @@ """Tests for handling of signals within cloud init.""" -import os import subprocess import sys import time @@ -17,11 +16,14 @@ class TestSignal(MockerTestCase): # This is done since nose/unittest is actually setting up # output capturing, signal handling itself, and its easier # to just call out to cloudinit with a loop and see what the result is - run_what = [sys.executable, - '-c', ('import time; from cloudinit import signal_handler;' - 'signal_handler.attach_handlers(); time.sleep(120)')] + run_what = [sys.executable, + '-c', + ('import time; from cloudinit import signal_handler;' + 'signal_handler.attach_handlers(); time.sleep(120)')] - pc_info = subprocess.Popen(run_what, stderr=subprocess.PIPE, stdout=subprocess.PIPE) + pc_info = subprocess.Popen(run_what, + stderr=subprocess.PIPE, + stdout=subprocess.PIPE) # Let it start up time.sleep(0.5) @@ -38,7 +40,6 @@ class TestSignal(MockerTestCase): if pc_info.stderr: outputs.write(pc_info.stderr.read()) val = outputs.getvalue() - print val # Check some of the outputs that should of happened self.assertEquals(1, pc_info.wait()) -- cgit v1.2.3 From c558334c6564341f97ad024f2687044534efc17f Mon Sep 17 00:00:00 2001 From: Garrett Holmstrom Date: Mon, 17 Sep 2012 11:23:51 -0700 Subject: Fill in basic info for the Fedora distro --- cloudinit/distros/fedora.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloudinit/distros/fedora.py b/cloudinit/distros/fedora.py index c777845d..9f76a116 100644 --- a/cloudinit/distros/fedora.py +++ b/cloudinit/distros/fedora.py @@ -28,4 +28,5 @@ LOG = logging.getLogger(__name__) class Distro(rhel.Distro): - pass + distro_name = 'fedora' + default_user = 'ec2-user' -- cgit v1.2.3 From 11db1e91ddc047728b0161eb1da30e54084ae5eb Mon Sep 17 00:00:00 2001 From: Garrett Holmstrom Date: Tue, 18 Sep 2012 10:27:41 -0700 Subject: Add support for useradd --selinux-user'' --- cloudinit/distros/__init__.py | 1 + doc/examples/cloud-config-user-groups.txt | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 40c6aa4f..3e9d934d 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -239,6 +239,7 @@ class Distro(object): "shell": '--shell', "expiredate": '--expiredate', "inactive": '--inactive', + "selinux_user": '--selinux-user', } adduser_opts_flags = { diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt index d0b3e2ff..1da0d717 100644 --- a/doc/examples/cloud-config-user-groups.txt +++ b/doc/examples/cloud-config-user-groups.txt @@ -12,6 +12,7 @@ users: gecos: Foo B. Bar primary-group: foobar groups: users + selinux-user: staff_u expiredate: 2012-09-01 ssh-import-id: foobar lock-passwd: false @@ -38,6 +39,9 @@ users: # primary-group: define the primary group. Defaults to a new group created # named after the user. # groups: Optional. Additional groups to add the user to. Defaults to none +# selinux-user: Optional. The SELinux user for the user's login, such as +# "staff_u". When this is omitted the system will select the default +# SELinux user. # lock-passwd: Defaults to true. Lock the password to disable password login # inactive: Create the user as inactive # passwd: The hash -- not the password itself -- of the password you want -- cgit v1.2.3 From a32af38b525e4bb7a1d8317e136073d7a9323f3e Mon Sep 17 00:00:00 2001 From: Garrett Holmstrom Date: Tue, 18 Sep 2012 10:55:12 -0700 Subject: Make update_package_sources stop upgrading packages on RHEL --- cloudinit/distros/rhel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index d81ee5fb..7f90cd8d 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -208,7 +208,7 @@ class Distro(distros.Distro): def update_package_sources(self): self._runner.run("update-sources", self.package_command, - ["update"], freq=PER_INSTANCE) + ["clean", "expire-cache"], freq=PER_INSTANCE) # This class helps adjust the configobj -- cgit v1.2.3 From 8be10579588d4b2ebb425fa58f01856947c8d3c9 Mon Sep 17 00:00:00 2001 From: Robie Basak Date: Wed, 19 Sep 2012 10:07:24 +0100 Subject: Fix ports.ubuntu.com mirror locations --- config/cloud.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config/cloud.cfg b/config/cloud.cfg index d5079721..b3411d11 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -88,6 +88,6 @@ system_info: security: [] - arches: [armhf, armel, default] failsafe: - primary: http://ports.ubuntu.com/ubuntu - security: http://ports.ubuntu.com/ubuntu + primary: http://ports.ubuntu.com/ubuntu-ports + security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh -- cgit v1.2.3 From 91f1fc58e134fea2ac3ff04301bb0ea51f8797b1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Sep 2012 11:31:06 -0400 Subject: write trailing newlines on generated files This adds trailing newlines to /etc/default/locale, /etc/hostname, /etc/timezone. --- cloudinit/distros/debian.py | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index da8c1a5b..da27f780 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -46,11 +46,8 @@ class Distro(distros.Distro): out_fn = self._paths.join(False, '/etc/default/locale') util.subp(['locale-gen', locale], capture=False) util.subp(['update-locale', locale], capture=False) - contents = [ - "# Created by cloud-init", - 'LANG="%s"' % (locale), - ] - util.write_file(out_fn, "\n".join(contents)) + lines = ["# Created by cloud-init", 'LANG="%s"' % (locale), ""] + util.write_file(out_fn, "\n".join(lines)) def install_packages(self, pkglist): self.update_package_sources() @@ -69,11 +66,9 @@ class Distro(distros.Distro): util.subp(['hostname', hostname]) def _write_hostname(self, hostname, out_fn): - lines = [] - lines.append("# Created by cloud-init") - lines.append(str(hostname)) - contents = "\n".join(lines) - util.write_file(out_fn, contents, 0644) + # "" gives trailing newline. + lines = ["# Created by cloud-init", str(hostname), ""] + util.write_file(out_fn, '\n'.join(lines), 0644) def update_hostname(self, hostname, prev_fn): hostname_prev = self._read_hostname(prev_fn) @@ -123,13 +118,10 @@ class Distro(distros.Distro): if not os.path.isfile(tz_file): raise RuntimeError(("Invalid timezone %s," " no file found at %s") % (tz, tz_file)) - tz_lines = [ - "# Created by cloud-init", - str(tz), - ] - tz_contents = "\n".join(tz_lines) + # "" provides trailing newline during join + tz_lines = ["# Created by cloud-init", str(tz), ""] tz_fn = self._paths.join(False, "/etc/timezone") - util.write_file(tz_fn, tz_contents) + util.write_file(tz_fn, "\n".join(tz_lines)) util.copy(tz_file, self._paths.join(False, "/etc/localtime")) def package_command(self, command, args=None): -- cgit v1.2.3 From 49f5833d03b41663ccd705521136718f6230eb4b Mon Sep 17 00:00:00 2001 From: Garrett Holmstrom Date: Wed, 19 Sep 2012 09:42:08 -0700 Subject: Actually download yum metadata with update_package_sources --- cloudinit/distros/rhel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 7f90cd8d..9b95d0d4 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -208,7 +208,7 @@ class Distro(distros.Distro): def update_package_sources(self): self._runner.run("update-sources", self.package_command, - ["clean", "expire-cache"], freq=PER_INSTANCE) + ["makecache"], freq=PER_INSTANCE) # This class helps adjust the configobj -- cgit v1.2.3 From b2be60ef19ffa95546dd64ea91913c8fe0096719 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Sep 2012 15:07:03 -0400 Subject: do not write a comment in /etc/hostname. Network Manager (LP: #1053048), and apparently fedora/redhat do not like comments in this file. LP: #1052664 --- cloudinit/distros/debian.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index da27f780..5b4aa9f8 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -67,8 +67,7 @@ class Distro(distros.Distro): def _write_hostname(self, hostname, out_fn): # "" gives trailing newline. - lines = ["# Created by cloud-init", str(hostname), ""] - util.write_file(out_fn, '\n'.join(lines), 0644) + util.write_file(out_fn, "%s\n" % str(hostname), 0644) def update_hostname(self, hostname, prev_fn): hostname_prev = self._read_hostname(prev_fn) -- cgit v1.2.3 From f9926d88d6396b2432ed3f9333aedfa44a670ff1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 19 Sep 2012 13:11:51 -0700 Subject: Adding a dual fallback log handler that will be monkey patched in to replace the base handler. That patching isn't quite there yet but WIP. --- cloudinit/monkey_patch.py | 74 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 cloudinit/monkey_patch.py diff --git a/cloudinit/monkey_patch.py b/cloudinit/monkey_patch.py new file mode 100644 index 00000000..24b472ba --- /dev/null +++ b/cloudinit/monkey_patch.py @@ -0,0 +1,74 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Canonical Ltd. +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Scott Moser +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import logging +import sys + +FALL_FORMAT = 'FALLBACK: %(asctime)s - %(filename)s[%(levelname)s]: %(message)s' + +Handler = logging.Handler + +class QuietStreamHandler(Handler): + def handleError(self, record): + pass + + +class FallbackHandler(Handler): + def __init__(self, level=logging.NOTSET, fb_handler=None): + super(FallbackHandler, self).__init__(level) + if not fb_handler: + self.fallback_handler = QuietStreamHandler(sys.stderr) + else: + self.fallback_handler = fb_handler + self.fallback_handler.setFormatter(logging.Formatter(FALL_FORMAT)) + self.fallback_handler.setLevel(level) + + def flush(self): + super(FallbackHandler, self).flush() + self.fallback_handler.flush() + + def close(self): + super(FallbackHandler, self).close(self) + self.fallback_handler.close() + + def setLevel(self, level): + super(FallbackHandler, self).setLevel(self, level) + self.fallback_logger.setLevel(level) + + def handleError(self, record): + try: + self.fallback_logger.handle(record) + # Always ensure this one is flushed... + self.fallback_logger.flush() + except: + pass + + +def _patch_logging(): + # Replace handler with one that will be more + # tolerant of errors in that it can avoid + # re-notifying on exceptions and when errors + # do occur, it can at least try to write to + # sys.stderr using a fallback logger + logging.Handler = FallbackHandler + + +def patch(): + _patch_logging() -- cgit v1.2.3 From a637b5f74948324173219a2e7817c7f19d864281 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Sep 2012 16:26:45 -0400 Subject: change exit messages to include signal name --- cloudinit/signal_handler.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py index b6ea592b..34e075a2 100644 --- a/cloudinit/signal_handler.py +++ b/cloudinit/signal_handler.py @@ -33,11 +33,11 @@ LOG = logging.getLogger(__name__) BACK_FRAME_TRACE_DEPTH = 3 EXIT_FOR = { - signal.SIGINT: ('Cloud-init %(version)s interrupted, exiting...', 1), - signal.SIGTERM: ('Cloud-init %(version)s terminated, exiting...', 1), + signal.SIGINT: ('Cloud-init %(version)s received SIGINT, exiting...', 1), + signal.SIGTERM: ('Cloud-init %(version)s received SIGTERM, exiting...', 1), # Can't be caught... # signal.SIGKILL: ('Cloud-init killed, exiting...', 1), - signal.SIGABRT: ('Cloud-init %(version)s aborted, exiting...', 1), + signal.SIGABRT: ('Cloud-init %(version)s received SIGABRT, exiting...', 1), } -- cgit v1.2.3 From 24d2457c2eab89f135e2b483f05d52eee0169af7 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Sep 2012 16:26:58 -0400 Subject: remove unit test. the unit test required access to /dev/console due to the logging. --- tests/unittests/test_signal.py | 47 ------------------------------------------ 1 file changed, 47 deletions(-) delete mode 100644 tests/unittests/test_signal.py diff --git a/tests/unittests/test_signal.py b/tests/unittests/test_signal.py deleted file mode 100644 index 8cb31fd7..00000000 --- a/tests/unittests/test_signal.py +++ /dev/null @@ -1,47 +0,0 @@ -"""Tests for handling of signals within cloud init.""" - -import subprocess -import sys -import time - -from StringIO import StringIO - -from mocker import MockerTestCase - - -class TestSignal(MockerTestCase): - - def test_signal_output(self): - - # This is done since nose/unittest is actually setting up - # output capturing, signal handling itself, and its easier - # to just call out to cloudinit with a loop and see what the result is - run_what = [sys.executable, - '-c', - ('import time; from cloudinit import signal_handler;' - 'signal_handler.attach_handlers(); time.sleep(120)')] - - pc_info = subprocess.Popen(run_what, - stderr=subprocess.PIPE, - stdout=subprocess.PIPE) - - # Let it start up - time.sleep(0.5) - dead = None - while dead is None: - pc_info.terminate() - # Ok not dead yet. try again - time.sleep(0.5) - dead = pc_info.poll() - - outputs = StringIO() - if pc_info.stdout: - outputs.write(pc_info.stdout.read()) - if pc_info.stderr: - outputs.write(pc_info.stderr.read()) - val = outputs.getvalue() - - # Check some of the outputs that should of happened - self.assertEquals(1, pc_info.wait()) - self.assertTrue(len(val) != 0) - self.assertTrue(val.find("terminated") != -1) -- cgit v1.2.3 From cef4e846797ed7aee29e18b00810887c67591abc Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Sep 2012 16:33:56 -0400 Subject: do not disable logging on signal I had previously asked for this, but we're hoping to handle it in a more generic way. Just because we receive a signal doesn't mean that all logging is broken. The more general solution we'll chase is to catch a failure of a log message and fall back if necessary across the board. That way cloud-init will still send logging to the right places on a user interupt. --- cloudinit/signal_handler.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py index 34e075a2..40b0c94c 100644 --- a/cloudinit/signal_handler.py +++ b/cloudinit/signal_handler.py @@ -54,17 +54,6 @@ def _pprint_frame(frame, depth, max_depth, contents): def _handle_exit(signum, frame): (msg, rc) = EXIT_FOR[signum] - # Reset logging so that only the basic logging - # is active since the state of syslog or other - # logging processes is unknown if we are being - # signaled by a reboot process which is external and - # killing other processes while this process is being - # finished off... - try: - logging.resetLogging() - logging.setupBasicLogging() - except: - pass msg = msg % ({'version': vr.version()}) contents = StringIO() contents.write("%s\n" % (msg)) -- cgit v1.2.3 From d4ca178d801983bcb43bafd18084c144b74a4c9a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 19 Sep 2012 15:33:36 -0700 Subject: Get the fallback working. --- cloudinit/monkey_patch.py | 74 ----------------------------------------------- cloudinit/patcher.py | 56 +++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 74 deletions(-) delete mode 100644 cloudinit/monkey_patch.py create mode 100644 cloudinit/patcher.py diff --git a/cloudinit/monkey_patch.py b/cloudinit/monkey_patch.py deleted file mode 100644 index 24b472ba..00000000 --- a/cloudinit/monkey_patch.py +++ /dev/null @@ -1,74 +0,0 @@ -# vi: ts=4 expandtab -# -# Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Scott Moser -# Author: Joshua Harlow -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import logging -import sys - -FALL_FORMAT = 'FALLBACK: %(asctime)s - %(filename)s[%(levelname)s]: %(message)s' - -Handler = logging.Handler - -class QuietStreamHandler(Handler): - def handleError(self, record): - pass - - -class FallbackHandler(Handler): - def __init__(self, level=logging.NOTSET, fb_handler=None): - super(FallbackHandler, self).__init__(level) - if not fb_handler: - self.fallback_handler = QuietStreamHandler(sys.stderr) - else: - self.fallback_handler = fb_handler - self.fallback_handler.setFormatter(logging.Formatter(FALL_FORMAT)) - self.fallback_handler.setLevel(level) - - def flush(self): - super(FallbackHandler, self).flush() - self.fallback_handler.flush() - - def close(self): - super(FallbackHandler, self).close(self) - self.fallback_handler.close() - - def setLevel(self, level): - super(FallbackHandler, self).setLevel(self, level) - self.fallback_logger.setLevel(level) - - def handleError(self, record): - try: - self.fallback_logger.handle(record) - # Always ensure this one is flushed... - self.fallback_logger.flush() - except: - pass - - -def _patch_logging(): - # Replace handler with one that will be more - # tolerant of errors in that it can avoid - # re-notifying on exceptions and when errors - # do occur, it can at least try to write to - # sys.stderr using a fallback logger - logging.Handler = FallbackHandler - - -def patch(): - _patch_logging() diff --git a/cloudinit/patcher.py b/cloudinit/patcher.py new file mode 100644 index 00000000..8921a79a --- /dev/null +++ b/cloudinit/patcher.py @@ -0,0 +1,56 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Canonical Ltd. +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Scott Moser +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import imp +import logging +import sys + +# Default fallback format +FALL_FORMAT = 'FALLBACK: %(asctime)s - %(filename)s[%(levelname)s]: %(message)s' + + +class QuietStreamHandler(logging.StreamHandler): + def handleError(self, record): + pass + + +def _patch_logging(): + # Replace 'handleError' with one that will be more + # tolerant of errors in that it can avoid + # re-notifying on exceptions and when errors + # do occur, it can at least try to write to + # sys.stderr using a fallback logger + fallback_handler = QuietStreamHandler(sys.stderr) + fallback_handler.setFormatter(logging.Formatter(FALL_FORMAT)) + def handleError(self, record): + try: + fallback_handler.handle(record) + fallback_handler.flush() + except IOError: + pass + setattr(logging.Handler, 'handleError', handleError) + + +def patch(): + imp.acquire_lock() + try: + _patch_logging() + finally: + imp.release_lock() -- cgit v1.2.3 From 5eef154928c75499c545c8c242bcf11d9ccf70d1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 19 Sep 2012 16:10:15 -0700 Subject: First thing that we do after we can start importing is to patch the functionality before it gets reimported. --- bin/cloud-init | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bin/cloud-init b/bin/cloud-init index 1f017475..b2a6c3ea 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -33,6 +33,9 @@ possible_topdir = os.path.normpath(os.path.join(os.path.abspath( if os.path.exists(os.path.join(possible_topdir, "cloudinit", "__init__.py")): sys.path.insert(0, possible_topdir) +from cloudinit import patcher +patcher.patch() + from cloudinit import log as logging from cloudinit import netinfo from cloudinit import sources -- cgit v1.2.3 From 3912209cdb075b7af8f87c1e41170fd8614ca520 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Sep 2012 20:19:15 -0400 Subject: doc: document 'tags' as comma delimited strings LP: #1042764 --- doc/examples/cloud-config-landscape.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/examples/cloud-config-landscape.txt b/doc/examples/cloud-config-landscape.txt index 12c8101f..e4d23cc9 100644 --- a/doc/examples/cloud-config-landscape.txt +++ b/doc/examples/cloud-config-landscape.txt @@ -4,10 +4,12 @@ # will be basically rendered into a ConfigObj formated file # under the '[client]' section of /etc/landscape/client.conf # +# Note: 'tags' should be specified as a comma delimited string +# rather than a list. landscape: client: url: "https://landscape.canonical.com/message-system" ping_url: "http://landscape.canonical.com/ping" data_path: "/var/lib/landscape/client" http_proxy: "http://my.proxy.com/foobar" - tags: [ server, cloud ] + tags: "server,cloud" -- cgit v1.2.3 From 6208fe41e4e73d1f14fd4afc152565c8908684a2 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 19 Sep 2012 18:40:20 -0700 Subject: Fix the ifup so that if a list of devices is provided then each interface is brought up individually instead of using the '--all' which isn't on rhel. The default debian behavior will be to use this still though as it overrides the new bring up interfaces function for this case. --- cloudinit/distros/__init__.py | 29 +++++++++++++++-------------- cloudinit/distros/debian.py | 6 ++++++ cloudinit/distros/fedora.py | 1 - cloudinit/distros/rhel.py | 2 ++ cloudinit/distros/ubuntu.py | 1 - 5 files changed, 23 insertions(+), 16 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 3e9d934d..442e75b4 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -34,12 +34,6 @@ from cloudinit import log as logging from cloudinit import ssh_util from cloudinit import util -# TODO(harlowja): Make this via config?? -IFACE_ACTIONS = { - 'up': ['ifup', '--all'], - 'down': ['ifdown', '--all'], -} - LOG = logging.getLogger(__name__) @@ -134,10 +128,10 @@ class Distro(object): def apply_network(self, settings, bring_up=True): # Write it out - self._write_network(settings) + dev_names = self._write_network(settings) # Now try to bring them up if bring_up: - return self._interface_action('up') + return self._bring_up_interfaces(dev_names) return False @abc.abstractmethod @@ -189,13 +183,11 @@ class Distro(object): util.write_file(self._paths.join(False, "/etc/hosts"), contents, mode=0644) - def _interface_action(self, action): - if action not in IFACE_ACTIONS: - raise NotImplementedError("Unknown interface action %s" % (action)) - cmd = IFACE_ACTIONS[action] + def _bring_up_interface(self, device_name): + cmd = ['ifup', device_name] + LOG.debug("Attempting to run bring up interface %s using command %s", + device_name, cmd) try: - LOG.debug("Attempting to run %s interface action using command %s", - action, cmd) (_out, err) = util.subp(cmd) if len(err): LOG.warn("Running %s resulted in stderr output: %s", cmd, err) @@ -204,6 +196,15 @@ class Distro(object): util.logexc(LOG, "Running interface command %s failed", cmd) return False + def _bring_up_interfaces(self, device_names): + am_failed = 0 + for d in device_names: + if not self._bring_up_interface(d): + am_failed += 1 + if am_failed == 0: + return True + return False + def isuser(self, name): try: if pwd.getpwnam(name): diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 5b4aa9f8..777c8530 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -56,6 +56,12 @@ class Distro(distros.Distro): def _write_network(self, settings): net_fn = self._paths.join(False, "/etc/network/interfaces") util.write_file(net_fn, settings) + return [] + + def _bring_up_interfaces(self, device_names): + if not device_names: + device_names = ['--all'] + return distros.Distro._bring_up_interfaces(self, device_names) def set_hostname(self, hostname): out_fn = self._paths.join(False, "/etc/hostname") diff --git a/cloudinit/distros/fedora.py b/cloudinit/distros/fedora.py index 9f76a116..f65a820d 100644 --- a/cloudinit/distros/fedora.py +++ b/cloudinit/distros/fedora.py @@ -28,5 +28,4 @@ LOG = logging.getLogger(__name__) class Distro(rhel.Distro): - distro_name = 'fedora' default_user = 'ec2-user' diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index ec4dc2cc..0e451b02 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -88,6 +88,7 @@ class Distro(distros.Distro): # Make the intermediate format as the rhel format... nameservers = [] searchservers = [] + dev_names = entries.keys() for (dev, info) in entries.iteritems(): net_fn = NETWORK_FN_TPL % (dev) net_ro_fn = self._paths.join(True, net_fn) @@ -127,6 +128,7 @@ class Distro(distros.Distro): util.write_file(net_rw_fn, w_contents, 0644) if nameservers or searchservers: self._write_resolve(nameservers, searchservers) + return dev_names def set_hostname(self, hostname): out_fn = self._paths.join(False, '/etc/sysconfig/network') diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py index 22f8c2c5..4e697f82 100644 --- a/cloudinit/distros/ubuntu.py +++ b/cloudinit/distros/ubuntu.py @@ -29,7 +29,6 @@ LOG = logging.getLogger(__name__) class Distro(debian.Distro): - distro_name = 'ubuntu' default_user = 'ubuntu' default_user_groups = ("adm,audio,cdrom,dialout,floppy,video," "plugdev,dip,netdev,sudo") -- cgit v1.2.3 From 355b0e372aec58360d4fb728088edefec27f86a1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 19 Sep 2012 20:57:30 -0700 Subject: Rework the rhel sysconfig writing/updating so that it goes through a single function which helps ensure correctness. Also write to /etc/sysconfig/network when we have written out devices to ensure that networking is on. --- cloudinit/distros/rhel.py | 123 ++++++++++++++++++++++++---------------------- 1 file changed, 64 insertions(+), 59 deletions(-) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index ec4dc2cc..c00627c6 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -26,6 +26,7 @@ from cloudinit import distros from cloudinit import helpers from cloudinit import log as logging from cloudinit import util +from cloudinit import version from cloudinit.settings import PER_INSTANCE @@ -55,6 +56,12 @@ D_QUOTE_CHARS = { '`': '\`', } +def _make_sysconfig_bool(val): + if val: + return 'yes' + else: + return 'no' + class Distro(distros.Distro): @@ -76,9 +83,8 @@ class Distro(distros.Distro): if search_servers: contents.append("search %s" % (" ".join(search_servers))) if contents: - resolve_rw_fn = self._paths.join(False, "/etc/resolv.conf") contents.insert(0, '# Created by cloud-init') - util.write_file(resolve_rw_fn, "\n".join(contents), 0644) + util.write_file("/etc/resolv.conf", "\n".join(contents), 0644) def _write_network(self, settings): # TODO(harlowja) fix this... since this is the ubuntu format @@ -88,81 +94,82 @@ class Distro(distros.Distro): # Make the intermediate format as the rhel format... nameservers = [] searchservers = [] + dev_names = entries.keys() for (dev, info) in entries.iteritems(): net_fn = NETWORK_FN_TPL % (dev) - net_ro_fn = self._paths.join(True, net_fn) - (prev_exist, net_cfg) = self._read_conf(net_ro_fn) - net_cfg['DEVICE'] = dev - boot_proto = info.get('bootproto') - if boot_proto: - net_cfg['BOOTPROTO'] = boot_proto - net_mask = info.get('netmask') - if net_mask: - net_cfg["NETMASK"] = net_mask - addr = info.get('address') - if addr: - net_cfg["IPADDR"] = addr - if info.get('auto'): - net_cfg['ONBOOT'] = 'yes' - else: - net_cfg['ONBOOT'] = 'no' - gtway = info.get('gateway') - if gtway: - net_cfg["GATEWAY"] = gtway - bcast = info.get('broadcast') - if bcast: - net_cfg["BROADCAST"] = bcast - mac_addr = info.get('hwaddress') - if mac_addr: - net_cfg["MACADDR"] = mac_addr - lines = net_cfg.write() + net_cfg = { + 'DEVICE': dev, + 'NETMASK': info.get('netmask'), + 'IPADDR': info.get('address'), + 'BOOTPROTO': info.get('bootproto'), + 'GATEWAY': info.get('gateway'), + 'BROADCAST': info.get('broadcast'), + 'MACADDR': info.get('hwaddress'), + 'ONBOOT': _make_sysconfig_bool(info.get('auto')), + } + self._update_sysconfig_file(net_fn, net_cfg) if 'dns-nameservers' in info: nameservers.extend(info['dns-nameservers']) if 'dns-search' in info: searchservers.extend(info['dns-search']) - if not prev_exist: - lines.insert(0, '# Created by cloud-init') - w_contents = "\n".join(lines) - net_rw_fn = self._paths.join(False, net_fn) - util.write_file(net_rw_fn, w_contents, 0644) if nameservers or searchservers: self._write_resolve(nameservers, searchservers) + if dev_names: + net_cfg = { + 'NETWORKING': _make_sysconfig_bool(True), + } + self._update_sysconfig_file("/etc/sysconfig/network", net_cfg) + return dev_names + + def _update_sysconfig_file(self, fn, adjustments, allow_empty=False): + if not adjustments: + return + (exists, contents) = self._read_conf(fn) + updated_am = 0 + for (k, v) in adjustments.items(): + if v is None: + continue + v = str(v) + if len(v) == 0 and not allow_empty: + continue + contents[k] = v + updated_am += 1 + if updated_am: + lines = contents.write() + if not exists: + ci_ver = version.version_string() + lines.insert(0, '# Created by cloud-init v. %s' % (ci_ver)) + util.write_file(fn, "\n".join(lines), 0644) def set_hostname(self, hostname): - out_fn = self._paths.join(False, '/etc/sysconfig/network') - self._write_hostname(hostname, out_fn) - if out_fn == '/etc/sysconfig/network': - # Only do this if we are running in non-adjusted root mode - LOG.debug("Setting hostname to %s", hostname) - util.subp(['hostname', hostname]) + self._write_hostname(hostname, '/etc/sysconfig/network') + LOG.debug("Setting hostname to %s", hostname) + util.subp(['hostname', hostname]) def apply_locale(self, locale, out_fn=None): if not out_fn: - out_fn = self._paths.join(False, '/etc/sysconfig/i18n') - ro_fn = self._paths.join(True, '/etc/sysconfig/i18n') - (_exists, contents) = self._read_conf(ro_fn) - contents['LANG'] = locale - w_contents = "\n".join(contents.write()) - util.write_file(out_fn, w_contents, 0644) + out_fn = '/etc/sysconfig/i18n' + locale_cfg = { + 'LANG': locale, + } + self._update_sysconfig_file(out_fn, locale_cfg) def _write_hostname(self, hostname, out_fn): - (_exists, contents) = self._read_conf(out_fn) - contents['HOSTNAME'] = hostname - w_contents = "\n".join(contents.write()) - util.write_file(out_fn, w_contents, 0644) + host_cfg = { + 'HOSTNAME': hostname, + } + self._update_sysconfig_file(out_fn, host_cfg) def update_hostname(self, hostname, prev_file): hostname_prev = self._read_hostname(prev_file) - read_fn = self._paths.join(True, "/etc/sysconfig/network") - hostname_in_sys = self._read_hostname(read_fn) + hostname_in_sys = self._read_hostname("/etc/sysconfig/network") update_files = [] if not hostname_prev or hostname_prev != hostname: update_files.append(prev_file) if (not hostname_in_sys or (hostname_in_sys == hostname_prev and hostname_in_sys != hostname)): - write_fn = self._paths.join(False, "/etc/sysconfig/network") - update_files.append(write_fn) + update_files.append("/etc/sysconfig/network") for fn in update_files: try: self._write_hostname(hostname, fn) @@ -200,12 +207,10 @@ class Distro(distros.Distro): raise RuntimeError(("Invalid timezone %s," " no file found at %s") % (tz, tz_file)) # Adjust the sysconfig clock zone setting - read_fn = self._paths.join(True, "/etc/sysconfig/clock") - (_exists, contents) = self._read_conf(read_fn) - contents['ZONE'] = tz - tz_contents = "\n".join(contents.write()) - write_fn = self._paths.join(False, "/etc/sysconfig/clock") - util.write_file(write_fn, tz_contents) + clock_cfg = { + 'ZONE': tz, + } + self._update_sysconfig_file("/etc/sysconfig/clock", clock_cfg) # This ensures that the correct tz will be used for the system util.copy(tz_file, self._paths.join(False, "/etc/localtime")) -- cgit v1.2.3 From f59369d1bb0eac874dcd2fa91abfad9186b22810 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 19 Sep 2012 21:13:48 -0700 Subject: Use a common header creation function to avoid duplicating the same code in this file. --- cloudinit/distros/rhel.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index c00627c6..1ea7952b 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -63,6 +63,11 @@ def _make_sysconfig_bool(val): return 'no' +def _make_header(): + ci_ver = version.version_string() + return '# Created by cloud-init v. %s' % (ci_ver) + + class Distro(distros.Distro): def __init__(self, name, cfg, paths): @@ -83,7 +88,7 @@ class Distro(distros.Distro): if search_servers: contents.append("search %s" % (" ".join(search_servers))) if contents: - contents.insert(0, '# Created by cloud-init') + contents.insert(0, _make_header()) util.write_file("/etc/resolv.conf", "\n".join(contents), 0644) def _write_network(self, settings): @@ -137,8 +142,7 @@ class Distro(distros.Distro): if updated_am: lines = contents.write() if not exists: - ci_ver = version.version_string() - lines.insert(0, '# Created by cloud-init v. %s' % (ci_ver)) + lines.insert(0, _make_header()) util.write_file(fn, "\n".join(lines), 0644) def set_hostname(self, hostname): @@ -212,7 +216,7 @@ class Distro(distros.Distro): } self._update_sysconfig_file("/etc/sysconfig/clock", clock_cfg) # This ensures that the correct tz will be used for the system - util.copy(tz_file, self._paths.join(False, "/etc/localtime")) + util.copy(tz_file, "/etc/localtime") def package_command(self, command, args=None): cmd = ['yum'] -- cgit v1.2.3 From 4b910d7bca47cb4fd26394fb4d14ebc72b1a73f8 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 20 Sep 2012 14:02:34 -0700 Subject: Add a resolv.conf parser that can be easily used for adjusting a resolv.conf formatted file and use this to adjust the resolv.conf in the redhat distro instead of replacing the previous resolv.conf completely. --- cloudinit/distros/helpers.py | 179 ++++++++++++++++++++++++++++ cloudinit/distros/rhel.py | 32 +++-- cloudinit/util.py | 10 ++ tests/unittests/test_distros/test_resolv.py | 61 ++++++++++ 4 files changed, 273 insertions(+), 9 deletions(-) create mode 100644 cloudinit/distros/helpers.py create mode 100644 tests/unittests/test_distros/test_resolv.py diff --git a/cloudinit/distros/helpers.py b/cloudinit/distros/helpers.py new file mode 100644 index 00000000..e1db74dc --- /dev/null +++ b/cloudinit/distros/helpers.py @@ -0,0 +1,179 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Canonical Ltd. +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Scott Moser +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from StringIO import StringIO + +from cloudinit import util + + +# See: man resolv.conf +class ResolvConf(object): + def __init__(self, text): + self._text = text + self._contents = None + + def parse(self): + if self._contents is None: + self._contents = self._parse(self._text) + + @property + def nameservers(self): + self.parse() + return self._retr_option('nameserver') + + @property + def local_domain(self): + self.parse() + dm = self._retr_option('domain') + if dm: + return dm[0] + return None + + @property + def search_domains(self): + self.parse() + current_sds = self._retr_option('search') + flat_sds = [] + for sdlist in current_sds: + for sd in sdlist.split(None): + if sd: + flat_sds.append(sd) + return flat_sds + + def __str__(self): + self.parse() + contents = StringIO() + for (line_type, components) in self._contents: + if line_type == 'blank': + contents.write("\n") + elif line_type == 'all_comment': + contents.write("%s\n" % (components[0])) + elif line_type == 'option': + (cfg_opt, cfg_value, comment_tail) = components + line = "%s %s" % (cfg_opt, cfg_value) + if len(comment_tail): + line += comment_tail + contents.write("%s\n" % (line)) + return contents.getvalue() + + def _retr_option(self, opt_name): + found = [] + for (line_type, components) in self._contents: + if line_type == 'option': + (cfg_opt, cfg_value, comment_tail) = components + if cfg_opt == opt_name: + found.append(cfg_value) + return found + + def add_nameserver(self, ns): + self.parse() + current_ns = self._retr_option('nameserver') + new_ns = list(current_ns) + new_ns.append(str(ns)) + new_ns = util.uniq_list(new_ns) + if len(new_ns) == len(current_ns): + return current_ns + if len(current_ns) >= 3: + # Hard restriction on only 3 name servers + raise ValueError(("Adding %r would go beyond the " + "'3' maximum name servers") % (ns)) + self._remove_option('nameserver') + for n in new_ns: + self._contents.append(('option', ['nameserver', n, ''])) + return new_ns + + def _remove_option(self, opt_name): + + def remove_opt(item): + line_type, components = item + if line_type != 'option': + return True + (cfg_opt, cfg_value, comment_tail) = components + if cfg_opt != opt_name: + return True + return False + + new_contents = filter(remove_opt, self._contents) + self._contents = new_contents + + def add_search_domain(self, search_domain): + flat_sds = self.search_domains + new_sds = list(flat_sds) + new_sds.append(str(search_domain)) + new_sds = util.uniq_list(new_sds) + if len(flat_sds) == len(new_sds): + return new_sds + if len(flat_sds) >= 6: + # Hard restriction on only 6 search domains + raise ValueError(("Adding %r would go beyond the " + "'6' maximum search domains") % (search_domain)) + s_list = " ".join(new_sds) + if len(s_list) > 256: + # Some hard limit on 256 chars total + raise ValueError(("Adding %r would go beyond the " + "256 maximum search list character limit") + % (search_domain)) + self._remove_option('search') + self._contents.append(('option', ['search', s_list, ''])) + return flat_sds + + @local_domain.setter + def local_domain(self, domain): + self.parse() + self._remove_option('domain') + self._contents.append(('option', ['domain', str(domain), ''])) + return domain + + def _parse(self, contents): + entries = [] + for (i, line) in enumerate(contents.splitlines()): + sline = line.strip() + if not sline: + entries.append(('blank', [line])) + continue + comment_s_loc = sline.find(";") + comment_h_loc = sline.find("#") + comment_loc = -1 + if comment_s_loc != -1 and comment_h_loc != -1: + comment_loc = min(comment_h_loc, comment_s_loc) + elif comment_s_loc != -1: + comment_loc = comment_s_loc + elif comment_h_loc != -1: + comment_loc = comment_h_loc + head = line + tail = None + if comment_loc != -1: + head = line[:comment_loc] + tail = line[comment_loc:] + if not len(head.strip()): + entries.append(('all_comment', [line])) + continue + if not tail: + tail = '' + try: + (cfg_opt, cfg_values) = head.split(None, 1) + except (IndexError, ValueError): + raise IOError("Incorrectly formatted resolv.conf line %s" % (i + 1)) + if cfg_opt not in ('nameserver', 'domain', 'search', 'sortlist', 'options'): + raise IOError("Unexpected resolv.conf option %s" % (cfg_opt)) + entries.append(("option", [cfg_opt, cfg_values, tail])) + return entries + + diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index ec4dc2cc..1c9d493d 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -23,6 +23,8 @@ import os from cloudinit import distros +from cloudinit.distros import helpers as d_helpers + from cloudinit import helpers from cloudinit import log as logging from cloudinit import util @@ -68,17 +70,29 @@ class Distro(distros.Distro): def install_packages(self, pkglist): self.package_command('install', pkglist) - def _write_resolve(self, dns_servers, search_servers): - contents = [] + def _adjust_resolve(self, dns_servers, search_servers): + r_conf = d_helpers.ResolvConf(util.load_file("/etc/resolv.conf")) + try: + r_conf.parse() + except IOError: + util.logexc(LOG, + "Failed at parsing %s reverting to an empty instance", + "/etc/resolv.conf") + r_conf = d_helpers.ResolvConf('') + r_conf.parse() if dns_servers: for s in dns_servers: - contents.append("nameserver %s" % (s)) + try: + r_conf.add_nameserver(s) + except ValueError: + util.logexc(LOG, "Failed at adding nameserver %s", s) if search_servers: - contents.append("search %s" % (" ".join(search_servers))) - if contents: - resolve_rw_fn = self._paths.join(False, "/etc/resolv.conf") - contents.insert(0, '# Created by cloud-init') - util.write_file(resolve_rw_fn, "\n".join(contents), 0644) + for s in search_servers: + try: + r_conf.add_search_domain(s) + except ValueError: + util.logexc(LOG, "Failed at adding search domain %s", s) + util.write_file("/etc/resolv.conf", str(r_conf), 0644) def _write_network(self, settings): # TODO(harlowja) fix this... since this is the ubuntu format @@ -126,7 +140,7 @@ class Distro(distros.Distro): net_rw_fn = self._paths.join(False, net_fn) util.write_file(net_rw_fn, w_contents, 0644) if nameservers or searchservers: - self._write_resolve(nameservers, searchservers) + self._adjust_resolve(nameservers, searchservers) def set_hostname(self, hostname): out_fn = self._paths.join(False, '/etc/sysconfig/network') diff --git a/cloudinit/util.py b/cloudinit/util.py index 33da73eb..e5fa42c6 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -952,6 +952,16 @@ def find_devs_with(criteria=None, oformat='device', return entries +def uniq_list(in_list): + out_list = [] + for i in in_list: + if i in out_list: + continue + else: + out_list.append(i) + return out_list + + def load_file(fname, read_cb=None, quiet=False): LOG.debug("Reading from %s (quiet=%s)", fname, quiet) ofh = StringIO() diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/test_distros/test_resolv.py new file mode 100644 index 00000000..5f122833 --- /dev/null +++ b/tests/unittests/test_distros/test_resolv.py @@ -0,0 +1,61 @@ +from mocker import MockerTestCase + +from cloudinit.distros import helpers + + +BASE_RESOLVE = ''' +; generated by /sbin/dhclient-script +search blah.yahoo.com yahoo.com +nameserver 10.15.44.14 +nameserver 10.15.30.92 +''' +BASE_RESOLVE = BASE_RESOLVE.strip() + + +class TestResolvHelper(MockerTestCase): + def test_parse_same(self): + rp = helpers.ResolvConf(BASE_RESOLVE) + rp_r = str(rp).strip() + self.assertEquals(BASE_RESOLVE, rp_r) + + def test_local_domain(self): + rp = helpers.ResolvConf(BASE_RESOLVE) + self.assertEquals(None, rp.local_domain) + + rp.local_domain = "bob" + self.assertEquals('bob', rp.local_domain) + self.assertIn('domain bob', str(rp)) + + def test_nameservers(self): + rp = helpers.ResolvConf(BASE_RESOLVE) + self.assertIn('10.15.44.14', rp.nameservers) + self.assertIn('10.15.30.92', rp.nameservers) + rp.add_nameserver('10.2') + self.assertIn('10.2', rp.nameservers) + self.assertIn('nameserver 10.2', str(rp)) + self.assertNotIn('10.3', rp.nameservers) + self.assertEquals(len(rp.nameservers), 3) + rp.add_nameserver('10.2') + with self.assertRaises(ValueError): + rp.add_nameserver('10.3') + self.assertNotIn('10.3', rp.nameservers) + + def test_search_domains(self): + rp = helpers.ResolvConf(BASE_RESOLVE) + self.assertIn('yahoo.com', rp.search_domains) + self.assertIn('blah.yahoo.com', rp.search_domains) + rp.add_search_domain('bbb.y.com') + self.assertIn('bbb.y.com', rp.search_domains) + self.assertRegexpMatches(str(rp), r'search(.*)bbb.y.com(.*)') + self.assertIn('bbb.y.com', rp.search_domains) + rp.add_search_domain('bbb.y.com') + self.assertEquals(len(rp.search_domains), 3) + rp.add_search_domain('bbb2.y.com') + self.assertEquals(len(rp.search_domains), 4) + rp.add_search_domain('bbb3.y.com') + self.assertEquals(len(rp.search_domains), 5) + rp.add_search_domain('bbb4.y.com') + self.assertEquals(len(rp.search_domains), 6) + with self.assertRaises(ValueError): + rp.add_search_domain('bbb5.y.com') + self.assertEquals(len(rp.search_domains), 6) -- cgit v1.2.3 From d27690ebe160bf3eae8dc92f7364daef5cf56208 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 20 Sep 2012 14:09:57 -0700 Subject: Pylint cleanups. --- cloudinit/distros/helpers.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/cloudinit/distros/helpers.py b/cloudinit/distros/helpers.py index e1db74dc..251d5051 100644 --- a/cloudinit/distros/helpers.py +++ b/cloudinit/distros/helpers.py @@ -77,7 +77,7 @@ class ResolvConf(object): found = [] for (line_type, components) in self._contents: if line_type == 'option': - (cfg_opt, cfg_value, comment_tail) = components + (cfg_opt, cfg_value, _comment_tail) = components if cfg_opt == opt_name: found.append(cfg_value) return found @@ -104,13 +104,16 @@ class ResolvConf(object): def remove_opt(item): line_type, components = item if line_type != 'option': - return True - (cfg_opt, cfg_value, comment_tail) = components + return False + (cfg_opt, _cfg_value, _comment_tail) = components if cfg_opt != opt_name: - return True - return False + return False + return True - new_contents = filter(remove_opt, self._contents) + new_contents = [] + for c in self._contents: + if not remove_opt(c): + new_contents.append(c) self._contents = new_contents def add_search_domain(self, search_domain): @@ -170,8 +173,10 @@ class ResolvConf(object): try: (cfg_opt, cfg_values) = head.split(None, 1) except (IndexError, ValueError): - raise IOError("Incorrectly formatted resolv.conf line %s" % (i + 1)) - if cfg_opt not in ('nameserver', 'domain', 'search', 'sortlist', 'options'): + raise IOError("Incorrectly formatted resolv.conf line %s" + % (i + 1)) + if cfg_opt not in ['nameserver', 'domain', + 'search', 'sortlist', 'options']: raise IOError("Unexpected resolv.conf option %s" % (cfg_opt)) entries.append(("option", [cfg_opt, cfg_values, tail])) return entries -- cgit v1.2.3 From e055c795581c2846aa6ae128de8cfd0b2c5d3f32 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 20 Sep 2012 15:55:52 -0700 Subject: Instead of special casing the empty list we are going to check for the 'all' entry and if that exists then only fire off one call (since debian supports this). --- cloudinit/distros/debian.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 777c8530..88f4e978 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -56,12 +56,17 @@ class Distro(distros.Distro): def _write_network(self, settings): net_fn = self._paths.join(False, "/etc/network/interfaces") util.write_file(net_fn, settings) - return [] + return ['all'] def _bring_up_interfaces(self, device_names): - if not device_names: - device_names = ['--all'] - return distros.Distro._bring_up_interfaces(self, device_names) + use_all = False + for d in device_names: + if d == 'all': + use_all = True + if use_all: + return distros.Distro._bring_up_interface(self, '--all') + else: + return distros.Distro._bring_up_interfaces(self, device_names) def set_hostname(self, hostname): out_fn = self._paths.join(False, "/etc/hostname") -- cgit v1.2.3 From 0049b6e7bc7f52fd442f1b8902294ca8c379df5c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 20 Sep 2012 17:26:42 -0700 Subject: Remove the need to have 'default_user' and 'default_user_groups' groups be hard coded into the distro class, instead let that set of configuration be located in the config file where it should be specified instead. --- cloudinit/distros/__init__.py | 12 ++++-------- cloudinit/distros/fedora.py | 3 +-- cloudinit/distros/ubuntu.py | 5 +---- config/cloud.cfg | 17 ++++++++++++++++- 4 files changed, 22 insertions(+), 15 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 3e9d934d..cdb72b5a 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -44,10 +44,7 @@ LOG = logging.getLogger(__name__) class Distro(object): - __metaclass__ = abc.ABCMeta - default_user = None - default_user_groups = None def __init__(self, name, cfg, paths): self._paths = paths @@ -61,7 +58,6 @@ class Distro(object): user = self.get_default_user() groups = self.get_default_user_groups() - if not user: raise NotImplementedError("No Default user") @@ -71,12 +67,12 @@ class Distro(object): 'home': "/home/%s" % user, 'shell': "/bin/bash", 'lock_passwd': True, - 'gecos': "%s%s" % (user[0:1].upper(), user[1:]), + 'gecos': user.title(), 'sudo': "ALL=(ALL) NOPASSWD:ALL", } if groups: - user_dict['groups'] = groups + user_dict['groups'] = ",".join(groups) self.create_user(**user_dict) @@ -212,10 +208,10 @@ class Distro(object): return False def get_default_user(self): - return self.default_user + return self.get_option('default_user') def get_default_user_groups(self): - return self.default_user_groups + return self.get_option('default_user_groups') def create_user(self, name, **kwargs): """ diff --git a/cloudinit/distros/fedora.py b/cloudinit/distros/fedora.py index 9f76a116..c777845d 100644 --- a/cloudinit/distros/fedora.py +++ b/cloudinit/distros/fedora.py @@ -28,5 +28,4 @@ LOG = logging.getLogger(__name__) class Distro(rhel.Distro): - distro_name = 'fedora' - default_user = 'ec2-user' + pass diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py index 22f8c2c5..428db6ad 100644 --- a/cloudinit/distros/ubuntu.py +++ b/cloudinit/distros/ubuntu.py @@ -28,8 +28,5 @@ LOG = logging.getLogger(__name__) class Distro(debian.Distro): + pass - distro_name = 'ubuntu' - default_user = 'ubuntu' - default_user_groups = ("adm,audio,cdrom,dialout,floppy,video," - "plugdev,dip,netdev,sudo") diff --git a/config/cloud.cfg b/config/cloud.cfg index b3411d11..fbf53bd5 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -1,7 +1,9 @@ # The top level settings are used as module # and system configuration. -# Implement for Ubuntu only: create the default 'ubuntu' user +# A set of users which may be applied and/or used by various modules +# when a 'default' entry is found it will reference the 'default_user' +# from the distro configuration specified below users: - default @@ -71,6 +73,19 @@ cloud_final_modules: system_info: # This will affect which distro class gets used distro: ubuntu + # Default user name + that default users groups (if added/used) + default_user: ubuntu + default_user_groups: + - adm + - audio + - cdrom + - dialout + - floppy + - video + - plugdev + - dip + - netdev + - sudo # Other config here will be given to the distro class and/or path classes paths: cloud_dir: /var/lib/cloud/ -- cgit v1.2.3 From e8a10a41d22876d555084def823817337d9c2a80 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 20 Sep 2012 17:56:22 -0700 Subject: Use only util methods for reading/loading/appending/peeking at files since it is likely soon that we will add a new way of adjusting the root of files read, also it is useful for debugging to track what is being read/written in a central fashion. --- cloudinit/distros/__init__.py | 3 +-- cloudinit/sources/DataSourceAltCloud.py | 19 ++++++++----------- cloudinit/sources/DataSourceConfigDrive.py | 20 +++++++++----------- cloudinit/sources/DataSourceMAAS.py | 6 ++---- cloudinit/sources/DataSourceOVF.py | 5 ++--- cloudinit/util.py | 10 ++++++++++ 6 files changed, 32 insertions(+), 31 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 3e9d934d..f6aa8d99 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -343,8 +343,7 @@ class Distro(object): else: try: - with open(sudo_file, 'a') as f: - f.write(content) + util.append_file(sudo_file, content) except IOError as e: util.logexc(LOG, "Failed to write %s" % sudo_file, e) raise e diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 69c376a5..d7e1204f 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -73,13 +73,11 @@ def read_user_data_callback(mount_dir): # First try deltacloud_user_data_file. On failure try user_data_file. try: - with open(deltacloud_user_data_file, 'r') as user_data_f: - user_data = user_data_f.read().strip() - except: + user_data = util.load_file(deltacloud_user_data_file).strip() + except IOError: try: - with open(user_data_file, 'r') as user_data_f: - user_data = user_data_f.read().strip() - except: + user_data = util.load_file(user_data_file).strip() + except IOError: util.logexc(LOG, ('Failed accessing user data file.')) return None @@ -157,11 +155,10 @@ class DataSourceAltCloud(sources.DataSource): if os.path.exists(CLOUD_INFO_FILE): try: - cloud_info = open(CLOUD_INFO_FILE) - cloud_type = cloud_info.read().strip().upper() - cloud_info.close() - except: - util.logexc(LOG, 'Unable to access cloud info file.') + cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper() + except IOError: + util.logexc(LOG, 'Unable to access cloud info file at %s.', + CLOUD_INFO_FILE) return False else: cloud_type = self.get_cloud_type() diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index b8154367..b477560c 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -227,19 +227,19 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"): found = False if os.path.isfile(fpath): try: - with open(fpath) as fp: - data = fp.read() - except Exception as exc: - raise BrokenConfigDriveDir("failed to read: %s" % fpath) + data = util.load_file(fpath) + except IOError: + raise BrokenConfigDriveDir("Failed to read: %s" % fpath) found = True elif required: - raise NonConfigDriveDir("missing mandatory %s" % fpath) + raise NonConfigDriveDir("Missing mandatory path: %s" % fpath) if found and process: try: data = process(data) except Exception as exc: - raise BrokenConfigDriveDir("failed to process: %s" % fpath) + raise BrokenConfigDriveDir(("Failed to process " + "path: %s") % fpath) if found: results[name] = data @@ -255,8 +255,7 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"): # do not use os.path.join here, as content_path starts with / cpath = os.path.sep.join((source_dir, "openstack", "./%s" % item['content_path'])) - with open(cpath) as fp: - return(fp.read()) + return util.load_file(cpath) files = {} try: @@ -270,7 +269,7 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"): if item: results['network_config'] = read_content_path(item) except Exception as exc: - raise BrokenConfigDriveDir("failed to read file %s: %s" % (item, exc)) + raise BrokenConfigDriveDir("Failed to read file %s: %s" % (item, exc)) # to openstack, user can specify meta ('nova boot --meta=key=value') and # those will appear under metadata['meta']. @@ -385,8 +384,7 @@ def get_previous_iid(paths): # hasn't declared itself found. fname = os.path.join(paths.get_cpath('data'), 'instance-id') try: - with open(fname) as fp: - return fp.read() + return util.load_file(fname) except IOError: return None diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index c568d365..d166e9e3 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -301,9 +301,7 @@ if __name__ == "__main__": 'token_secret': args.tsec, 'consumer_secret': args.csec} if args.config: - import yaml - with open(args.config) as fp: - cfg = yaml.safe_load(fp) + cfg = util.read_conf(args.config) if 'datasource' in cfg: cfg = cfg['datasource']['MAAS'] for key in creds.keys(): @@ -312,7 +310,7 @@ if __name__ == "__main__": def geturl(url, headers_cb): req = urllib2.Request(url, data=None, headers=headers_cb(url)) - return(urllib2.urlopen(req).read()) + return (urllib2.urlopen(req).read()) def printurl(url, headers_cb): print "== %s ==\n%s\n" % (url, geturl(url, headers_cb)) diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 771e64eb..e90150c6 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -204,9 +204,8 @@ def transport_iso9660(require_iso=True): try: # See if we can read anything at all...?? - with open(fullp, 'rb') as fp: - fp.read(512) - except: + util.peek_file(fullp, 512) + except IOError: continue try: diff --git a/cloudinit/util.py b/cloudinit/util.py index 33da73eb..18000301 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -952,6 +952,12 @@ def find_devs_with(criteria=None, oformat='device', return entries +def peek_file(fname, max_bytes): + LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes) + with open(fname, 'rb') as ifh: + return ifh.read(max_bytes) + + def load_file(fname, read_cb=None, quiet=False): LOG.debug("Reading from %s (quiet=%s)", fname, quiet) ofh = StringIO() @@ -1281,6 +1287,10 @@ def uptime(): return uptime_str +def append_file(path, content): + write_file(path, content, omode="ab", mode=None) + + def ensure_file(path, mode=0644): write_file(path, content='', omode="ab", mode=mode) -- cgit v1.2.3 From 62631d30aae55a42b77d326af75d5e476d4baf36 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 21 Sep 2012 14:15:09 -0700 Subject: 1. Cleanup the user creation so that the distro class is responsible only for creating users and groups and normalizing a input configuration into a normalized format that splits up the user list, the group list and the default user listsand let the add user/group config module handle calling those methods to add its own users/groups and the default user (if any). 2. Also add in tests for this normalization process to ensure that it is pretty bug free and works with the different types of formats that users/groups/defaults + options can take. --- cloudinit/config/cc_users_groups.py | 77 ++++--------- cloudinit/distros/__init__.py | 184 ++++++++++++++++++++++-------- cloudinit/util.py | 16 +++ doc/examples/cloud-config-user-groups.txt | 8 +- 4 files changed, 181 insertions(+), 104 deletions(-) diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index 418f3330..273c5068 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -16,63 +16,34 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from cloudinit import util + from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE def handle(name, cfg, cloud, log, _args): - user_zero = None - - if 'groups' in cfg: - for group in cfg['groups']: - if isinstance(group, dict): - for name, values in group.iteritems(): - if isinstance(values, list): - cloud.distro.create_group(name, values) - elif isinstance(values, str): - cloud.distro.create_group(name, values.split(',')) - else: - cloud.distro.create_group(group, []) - - if 'users' in cfg: - user_zero = None - - for user_config in cfg['users']: - - # Handle the default user creation - if 'default' in user_config: - log.info("Creating default user") - - # Create the default user if so defined - try: - cloud.distro.add_default_user() - - if not user_zero: - user_zero = cloud.distro.get_default_user() - - except NotImplementedError: - - if user_zero == name: - user_zero = None - - log.warn("Distro has not implemented default user " - "creation. No default user will be created") - - elif isinstance(user_config, dict) and 'name' in user_config: - - name = user_config['name'] - if not user_zero: - user_zero = name - - # Make options friendly for distro.create_user - new_opts = {} - if isinstance(user_config, dict): - for opt in user_config: - new_opts[opt.replace('-', '_')] = user_config[opt] - - cloud.distro.create_user(**new_opts) - else: - # create user with no configuration - cloud.distro.create_user(user_config) + distro = cloud.distro + ((users, default_user), groups) = distro.normalize_users_groups(cfg) + for (name, members) in groups.items(): + distro.create_group(name, members) + + if default_user: + user = default_user['name'] + config = default_user['config'] + def_base_config = { + 'name': user, + 'plain_text_passwd': user, + 'home': "/home/%s" % user, + 'shell': "/bin/bash", + 'lock_passwd': True, + 'gecos': "%s%s" % (user.title()), + 'sudo': "ALL=(ALL) NOPASSWD:ALL", + } + u_config = util.mergemanydict([def_base_config, config]) + distro.create_user(**u_config) + + for (user, config) in users.items(): + distro.create_user(user, **config) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 3e9d934d..4fb1d8c2 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -24,9 +24,7 @@ from StringIO import StringIO import abc -import grp import os -import pwd import re from cloudinit import importer @@ -54,34 +52,6 @@ class Distro(object): self._cfg = cfg self.name = name - def add_default_user(self): - # Adds the distro user using the rules: - # - Password is same as username but is locked - # - nopasswd sudo access - - user = self.get_default_user() - groups = self.get_default_user_groups() - - if not user: - raise NotImplementedError("No Default user") - - user_dict = { - 'name': user, - 'plain_text_passwd': user, - 'home': "/home/%s" % user, - 'shell': "/bin/bash", - 'lock_passwd': True, - 'gecos': "%s%s" % (user[0:1].upper(), user[1:]), - 'sudo': "ALL=(ALL) NOPASSWD:ALL", - } - - if groups: - user_dict['groups'] = groups - - self.create_user(**user_dict) - - LOG.info("Added default '%s' user with passwordless sudo", user) - @abc.abstractmethod def install_packages(self, pkglist): raise NotImplementedError() @@ -204,18 +174,19 @@ class Distro(object): util.logexc(LOG, "Running interface command %s failed", cmd) return False - def isuser(self, name): - try: - if pwd.getpwnam(name): - return True - except KeyError: - return False - def get_default_user(self): return self.default_user def get_default_user_groups(self): - return self.default_user_groups + if not self.default_user_groups: + return [] + def_groups = [] + if isinstance(self.default_user_groups, (str, basestring)): + def_groups = self.default_user_groups.split(",") + else: + def_groups = list(self.default_user_groups) + def_groups = list(sorted(set(def_groups))) + return def_groups def create_user(self, name, **kwargs): """ @@ -272,7 +243,7 @@ class Distro(object): adduser_cmd.append('-m') # Create the user - if self.isuser(name): + if util.is_user(name): LOG.warn("User %s already exists, skipping." % name) else: LOG.debug("Creating name %s" % name) @@ -323,6 +294,130 @@ class Distro(object): return True + def _normalize_groups(self, grp_cfg): + groups = {} + if isinstance(grp_cfg, (str, basestring)): + grp_cfg = grp_cfg.strip().split(",") + + if isinstance(grp_cfg, (list)): + for g in grp_cfg: + g = g.strip() + if g: + groups[g] = [] + elif isinstance(grp_cfg, (dict)): + for grp_name, grp_members in grp_cfg.items(): + if isinstance(grp_members, (str, basestring)): + r_grp_members = [] + for gc in grp_members.strip().split(','): + gc = gc.strip() + if gc and gc not in r_grp_members: + r_grp_members.append(gc) + grp_members = r_grp_members + elif not isinstance(grp_members, (list)): + raise TypeError(("Group member config must be list " + " or string types only and not %s") % + util.obj_name(grp_members)) + groups[grp_name] = grp_members + else: + raise TypeError(("Group config must be list, dict " + " or string types only and not %s") % + util.obj_name(grp_cfg)) + return groups + + def _normalize_users(self, u_cfg): + if isinstance(u_cfg, (dict)): + ad_ucfg = [] + for (k, v) in u_cfg.items(): + if isinstance(v, (bool, int, basestring, str)): + if util.is_true(v): + ad_ucfg.append(str(k)) + elif isinstance(v, (dict)): + v['name'] = k + ad_ucfg.append(v) + else: + raise TypeError(("Unmappable user value type %s" + " for key %s") % (util.obj_name(v), k)) + u_cfg = ad_ucfg + + users = {} + for user_config in u_cfg: + if isinstance(user_config, (str, basestring)): + for u in user_config.strip().split(","): + u = u.strip() + if u and u not in users: + users[u] = {} + elif isinstance(user_config, (dict)): + if 'name' in user_config: + n = user_config.pop('name') + prev_config = users.get(n) or {} + users[n] = util.mergemanydict([prev_config, + user_config]) + else: + # Assume the default user then + prev_config = users.get('default') or {} + users['default'] = util.mergemanydict([prev_config, + user_config]) + elif isinstance(user_config, (bool, int)): + pass + else: + raise TypeError(("User config must be dictionary " + " or string types only and not %s") % + util.obj_name(user_config)) + + # Ensure user options are in the right python friendly format + if users: + c_users = {} + for (uname, uconfig) in users.items(): + c_uconfig = {} + for (k, v) in uconfig.items(): + k = k.replace('-', '_').strip() + if k: + c_uconfig[k] = v + c_users[uname] = c_uconfig + users = c_users + + # Fixup the default user into the real + # default user name and extract it + default_user = {} + if users and 'default' in users: + try: + def_config = users.pop('default') + def_user = self.get_default_user() + def_groups = self.get_default_user_groups() + if def_user: + u_config = users.pop(def_user, None) or {} + u_groups = u_config.get('groups') or [] + if isinstance(u_groups, (str, basestring)): + u_groups = u_groups.strip().split(",") + u_groups.extend(def_groups) + u_groups = set([x.strip() for x in u_groups if x.strip()]) + u_config['groups'] = ",".join(sorted(u_groups)) + default_user = { + 'name': def_user, + 'config': util.mergemanydict([def_config, u_config]), + } + else: + LOG.warn(("Distro has not provided a default user " + "creation. No default user will be normalized.")) + users.pop('default', None) + except NotImplementedError: + LOG.warn(("Distro has not implemented default user " + "creation. No default user will be normalized.")) + users.pop('default', None) + + return (default_user, users) + + def normalize_users_groups(self, ug_cfg): + users = {} + groups = {} + default_user = {} + if 'groups' in ug_cfg: + groups = self._normalize_groups(ug_cfg['groups']) + + if 'users' in ug_cfg: + default_user, users = self._normalize_users(ug_cfg['users']) + return ((users, default_user), groups) + def write_sudo_rules(self, user, rules, @@ -349,18 +444,11 @@ class Distro(object): util.logexc(LOG, "Failed to write %s" % sudo_file, e) raise e - def isgroup(self, name): - try: - if grp.getgrnam(name): - return True - except: - return False - def create_group(self, name, members): group_add_cmd = ['groupadd', name] # Check if group exists, and then add it doesn't - if self.isgroup(name): + if util.is_group(name): LOG.warn("Skipping creation of existing group '%s'" % name) else: try: @@ -372,7 +460,7 @@ class Distro(object): # Add members to the group, if so defined if len(members) > 0: for member in members: - if not self.isuser(member): + if not util.is_user(member): LOG.warn("Unable to add group member '%s' to group '%s'" "; user does not exist." % (member, name)) continue diff --git a/cloudinit/util.py b/cloudinit/util.py index 33da73eb..94b17dfa 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1104,6 +1104,22 @@ def hash_blob(blob, routine, mlen=None): return digest +def is_user(name): + try: + if pwd.getpwnam(name): + return True + except KeyError: + return False + + +def is_group(name): + try: + if grp.getgrnam(name): + return True + except KeyError: + return False + + def rename(src, dest): LOG.debug("Renaming %s to %s", src, dest) # TODO(harlowja) use a se guard here?? diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt index 1da0d717..073fbd8f 100644 --- a/doc/examples/cloud-config-user-groups.txt +++ b/doc/examples/cloud-config-user-groups.txt @@ -81,14 +81,16 @@ users: # directive. # system: Create the user as a system user. This means no home directory. # -# Default user creation: Ubuntu Only -# Unless you define users, you will get a Ubuntu user on Ubuntu systems with the +# Default user creation: +# +# Unless you define users, you will get a 'ubuntu' user on buntu systems with the # legacy permission (no password sudo, locked user, etc). If however, you want # to have the ubuntu user in addition to other users, you need to instruct # cloud-init that you also want the default user. To do this use the following # syntax: # users: -# default: True +# - default +# - bob # foobar: ... # # users[0] (the first user in users) overrides the user directive. -- cgit v1.2.3 From 712d019cba5afb535033e8228ad365071661ceb2 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 21 Sep 2012 14:17:42 -0700 Subject: Actually commit the test for user/group data normalization instead of forgetting about it. --- .../test_distros/test_user_data_normalize.py | 187 +++++++++++++++++++++ 1 file changed, 187 insertions(+) create mode 100644 tests/unittests/test_distros/test_user_data_normalize.py diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py new file mode 100644 index 00000000..6ff43a76 --- /dev/null +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -0,0 +1,187 @@ +from mocker import MockerTestCase + +from cloudinit import distros +from cloudinit import helpers +from cloudinit import settings + + +class TestUGNormalize(MockerTestCase): + + def _make_distro(self, dtype, def_user=None, def_groups=None): + cfg = dict(settings.CFG_BUILTIN) + cfg['system_info']['distro'] = dtype + paths = helpers.Paths(cfg['system_info']['paths']) + distro_cls = distros.fetch(dtype) + distro = distro_cls(dtype, cfg['system_info'], paths) + if def_user: + distro.default_user = def_user + if def_groups: + distro.default_user_groups = def_groups + return distro + + def test_basic_groups(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'groups': ['bob'], + } + ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + self.assertIn('bob', groups) + self.assertEquals({}, users) + self.assertEquals({}, def_user) + + def test_csv_groups(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'groups': 'bob,joe,steve', + } + ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + self.assertIn('bob', groups) + self.assertIn('joe', groups) + self.assertIn('steve', groups) + self.assertEquals({}, users) + self.assertEquals({}, def_user) + + def test_more_groups(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'groups': ['bob', 'joe', 'steve',] + } + ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + self.assertIn('bob', groups) + self.assertIn('joe', groups) + self.assertIn('steve', groups) + self.assertEquals({}, users) + self.assertEquals({}, def_user) + + def test_member_groups(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'groups': { + 'bob': ['s'], + 'joe': [], + 'steve': [], + } + } + ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + self.assertIn('bob', groups) + self.assertEquals(['s'], groups['bob']) + self.assertEquals([], groups['joe']) + self.assertIn('joe', groups) + self.assertIn('steve', groups) + self.assertEquals({}, users) + self.assertEquals({}, def_user) + + def test_users_simple_dict(self): + distro = self._make_distro('ubuntu', 'bob') + ug_cfg = { + 'users': { + 'default': True, + } + } + ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + self.assertEquals('bob', def_user['name']) + ug_cfg = { + 'users': { + 'default': 'yes', + } + } + ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + self.assertEquals('bob', def_user['name']) + ug_cfg = { + 'users': { + 'default': '1', + } + } + ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + self.assertEquals('bob', def_user['name']) + + def test_users_simple_dict_no(self): + distro = self._make_distro('ubuntu', 'bob') + ug_cfg = { + 'users': { + 'default': False, + } + } + ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + self.assertEquals({}, def_user) + ug_cfg = { + 'users': { + 'default': 'no', + } + } + ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + self.assertEquals({}, def_user) + + def test_users_simple(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'users': [ + 'joe', + 'bob' + ], + } + ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + self.assertIn('joe', users) + self.assertIn('bob', users) + self.assertEquals({}, users['joe']) + self.assertEquals({}, users['bob']) + + def test_users_dict_default_additional(self): + distro = self._make_distro('ubuntu', 'bob') + ug_cfg = { + 'users': [ + {'name': 'default', 'blah': True} + ], + } + ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + self.assertIn('bob', def_user['name']) + self.assertEquals(",".join(distro.get_default_user_groups()), + def_user['config']['groups']) + self.assertEquals(True, + def_user['config']['blah']) + self.assertNotIn('bob', users) + + def test_users_dict_default(self): + distro = self._make_distro('ubuntu', 'bob') + ug_cfg = { + 'users': [ + 'default', + ], + } + ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + self.assertIn('bob', def_user['name']) + self.assertEquals(",".join(distro.get_default_user_groups()), + def_user['config']['groups']) + self.assertNotIn('bob', users) + + def test_users_dict_trans(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'users': [ + {'name': 'joe', + 'tr-me': True}, + {'name': 'bob'}, + ], + } + ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + self.assertIn('joe', users) + self.assertIn('bob', users) + self.assertEquals({'tr_me': True}, users['joe']) + self.assertEquals({}, users['bob']) + + def test_users_dict(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'users': [ + {'name': 'joe'}, + {'name': 'bob'}, + ], + } + ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + self.assertIn('joe', users) + self.assertIn('bob', users) + self.assertEquals({}, users['joe']) + self.assertEquals({}, users['bob']) + + + -- cgit v1.2.3 From 47d9df78264625207342e668a9120fa84b6ad355 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 21 Sep 2012 14:38:40 -0700 Subject: Also allow the user list to come in as a comma separated list so that its types match more of what the group list can be. --- cloudinit/distros/__init__.py | 2 ++ tests/unittests/test_distros/test_user_data_normalize.py | 11 +++++++++++ 2 files changed, 13 insertions(+) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 4fb1d8c2..361d2c05 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -338,6 +338,8 @@ class Distro(object): raise TypeError(("Unmappable user value type %s" " for key %s") % (util.obj_name(v), k)) u_cfg = ad_ucfg + elif isinstance(u_cfg, (str, basestring)): + u_cfg = u_cfg.strip().split(",") users = {} for user_config in u_cfg: diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index 6ff43a76..caf479cd 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -112,6 +112,17 @@ class TestUGNormalize(MockerTestCase): ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) self.assertEquals({}, def_user) + def test_users_simple_csv(self): + distro = self._make_distro('ubuntu') + ug_cfg = { + 'users': 'joe,bob', + } + ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + self.assertIn('joe', users) + self.assertIn('bob', users) + self.assertEquals({}, users['joe']) + self.assertEquals({}, users['bob']) + def test_users_simple(self): distro = self._make_distro('ubuntu') ug_cfg = { -- cgit v1.2.3 From 009faa0546ffbcadbbcaa9692d6842890e6f2e10 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 21 Sep 2012 15:12:11 -0700 Subject: Fix some docs + pylint warnings + log on default created in the module. --- cloudinit/config/cc_users_groups.py | 5 ++--- cloudinit/distros/__init__.py | 3 ++- doc/examples/cloud-config-user-groups.txt | 10 ++++++---- .../test_distros/test_user_data_normalize.py | 22 +++++++++++----------- 4 files changed, 21 insertions(+), 19 deletions(-) diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index 273c5068..a6ce49ac 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -24,7 +24,6 @@ frequency = PER_INSTANCE def handle(name, cfg, cloud, log, _args): - distro = cloud.distro ((users, default_user), groups) = distro.normalize_users_groups(cfg) for (name, members) in groups.items(): @@ -34,7 +33,6 @@ def handle(name, cfg, cloud, log, _args): user = default_user['name'] config = default_user['config'] def_base_config = { - 'name': user, 'plain_text_passwd': user, 'home': "/home/%s" % user, 'shell': "/bin/bash", @@ -43,7 +41,8 @@ def handle(name, cfg, cloud, log, _args): 'sudo': "ALL=(ALL) NOPASSWD:ALL", } u_config = util.mergemanydict([def_base_config, config]) - distro.create_user(**u_config) + distro.create_user(user, **u_config) + log.info("Added default '%s' user with passwordless sudo", user) for (user, config) in users.items(): distro.create_user(user, **config) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 361d2c05..3de5be36 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -400,7 +400,8 @@ class Distro(object): } else: LOG.warn(("Distro has not provided a default user " - "creation. No default user will be normalized.")) + "for creation. No default user will be " + "normalized.")) users.pop('default', None) except NotImplementedError: LOG.warn(("Distro has not implemented default user " diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt index 073fbd8f..1a46c540 100644 --- a/doc/examples/cloud-config-user-groups.txt +++ b/doc/examples/cloud-config-user-groups.txt @@ -1,11 +1,11 @@ -# add groups to the system +# Add groups to the system # The following example adds the ubuntu group with members foo and bar and # the group cloud-users. groups: - ubuntu: [foo,bar] - cloud-users -# add users to the system. Users are added after groups are added. +# Add users to the system. Users are added after groups are added. users: - default - name: foobar @@ -81,16 +81,18 @@ users: # directive. # system: Create the user as a system user. This means no home directory. # + # Default user creation: # -# Unless you define users, you will get a 'ubuntu' user on buntu systems with the +# Unless you define users, you will get a 'ubuntu' user on ubuntu systems with the # legacy permission (no password sudo, locked user, etc). If however, you want -# to have the ubuntu user in addition to other users, you need to instruct +# to have the 'ubuntu' user in addition to other users, you need to instruct # cloud-init that you also want the default user. To do this use the following # syntax: # users: # - default # - bob +# - .... # foobar: ... # # users[0] (the first user in users) overrides the user directive. diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index caf479cd..d636bb84 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -78,21 +78,21 @@ class TestUGNormalize(MockerTestCase): 'default': True, } } - ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + ((_users, def_user), _groups) = distro.normalize_users_groups(ug_cfg) self.assertEquals('bob', def_user['name']) ug_cfg = { 'users': { 'default': 'yes', } } - ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + ((_users, def_user), _groups) = distro.normalize_users_groups(ug_cfg) self.assertEquals('bob', def_user['name']) ug_cfg = { 'users': { 'default': '1', } } - ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + ((_users, def_user), _groups) = distro.normalize_users_groups(ug_cfg) self.assertEquals('bob', def_user['name']) def test_users_simple_dict_no(self): @@ -102,14 +102,14 @@ class TestUGNormalize(MockerTestCase): 'default': False, } } - ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + ((_users, def_user), _groups) = distro.normalize_users_groups(ug_cfg) self.assertEquals({}, def_user) ug_cfg = { 'users': { 'default': 'no', } } - ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + ((_users, def_user), _groups) = distro.normalize_users_groups(ug_cfg) self.assertEquals({}, def_user) def test_users_simple_csv(self): @@ -117,7 +117,7 @@ class TestUGNormalize(MockerTestCase): ug_cfg = { 'users': 'joe,bob', } - ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + ((users, _def_user), _groups) = distro.normalize_users_groups(ug_cfg) self.assertIn('joe', users) self.assertIn('bob', users) self.assertEquals({}, users['joe']) @@ -131,7 +131,7 @@ class TestUGNormalize(MockerTestCase): 'bob' ], } - ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + ((users, _def_user), _groups) = distro.normalize_users_groups(ug_cfg) self.assertIn('joe', users) self.assertIn('bob', users) self.assertEquals({}, users['joe']) @@ -144,7 +144,7 @@ class TestUGNormalize(MockerTestCase): {'name': 'default', 'blah': True} ], } - ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + ((users, def_user), _groups) = distro.normalize_users_groups(ug_cfg) self.assertIn('bob', def_user['name']) self.assertEquals(",".join(distro.get_default_user_groups()), def_user['config']['groups']) @@ -159,7 +159,7 @@ class TestUGNormalize(MockerTestCase): 'default', ], } - ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + ((users, def_user), _groups) = distro.normalize_users_groups(ug_cfg) self.assertIn('bob', def_user['name']) self.assertEquals(",".join(distro.get_default_user_groups()), def_user['config']['groups']) @@ -174,7 +174,7 @@ class TestUGNormalize(MockerTestCase): {'name': 'bob'}, ], } - ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + ((users, _def_user), _groups) = distro.normalize_users_groups(ug_cfg) self.assertIn('joe', users) self.assertIn('bob', users) self.assertEquals({'tr_me': True}, users['joe']) @@ -188,7 +188,7 @@ class TestUGNormalize(MockerTestCase): {'name': 'bob'}, ], } - ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + ((users, _def_user), _groups) = distro.normalize_users_groups(ug_cfg) self.assertIn('joe', users) self.assertIn('bob', users) self.assertEquals({}, users['joe']) -- cgit v1.2.3 From 4b159cdb12d1ceb3ed7a7a2b044182890618889f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 21 Sep 2012 17:59:14 -0700 Subject: Add in another helper that can understand the 'etc/hosts' format and add in a unit test to make sure that format can be correctly handled and added onto in a nice manner + update the distro code to use this new code instead of the previous function that did the same thing. --- cloudinit/distros/__init__.py | 69 ++++++++++++++++--------------- cloudinit/distros/helpers.py | 96 ++++++++++++++++++++++++++++++++++++------- 2 files changed, 118 insertions(+), 47 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 3e9d934d..bf4317f1 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -34,6 +34,8 @@ from cloudinit import log as logging from cloudinit import ssh_util from cloudinit import util +from cloudinit.distros import helpers + # TODO(harlowja): Make this via config?? IFACE_ACTIONS = { 'up': ['ifup', '--all'], @@ -152,42 +154,43 @@ class Distro(object): return "127.0.0.1" def update_etc_hosts(self, hostname, fqdn): - # Format defined at - # http://unixhelp.ed.ac.uk/CGI/man-cgi?hosts - header = "# Added by cloud-init" - real_header = "%s on %s" % (header, util.time_rfc2822()) + header = '' + if os.path.exists('/etc/hosts'): + eh = helpers.HostsConf(util.load_file("/etc/hosts")) + else: + eh = helpers.HostsConf('') + header = "# Added by cloud-init" + header = "%s on %s" % (header, util.time_rfc2822()) local_ip = self._get_localhost_ip() - hosts_line = "%s\t%s %s" % (local_ip, fqdn, hostname) - new_etchosts = StringIO() - need_write = False - need_change = True - hosts_ro_fn = self._paths.join(True, "/etc/hosts") - for line in util.load_file(hosts_ro_fn).splitlines(): - if line.strip().startswith(header): - continue - if not line.strip() or line.strip().startswith("#"): - new_etchosts.write("%s\n" % (line)) - continue - split_line = [s.strip() for s in line.split()] - if len(split_line) < 2: - new_etchosts.write("%s\n" % (line)) - continue - (ip, hosts) = split_line[0], split_line[1:] - if ip == local_ip: - if sorted([hostname, fqdn]) == sorted(hosts): - need_change = False - if need_change: - line = "%s\n%s" % (real_header, hosts_line) + prev_info = eh.get_entry(local_ip) + need_change = False + if not prev_info: + eh.add_entry(local_ip, fqdn, hostname) + need_change = True + else: + need_change = True + for entry in prev_info: + if sorted(entry) == sorted([fqdn, hostname]): + # Exists already, leave it be need_change = False - need_write = True - new_etchosts.write("%s\n" % (line)) + break + if need_change: + # Doesn't exist, change the first + # entry to be this entry + new_entries = list(prev_info) + new_entries[0] = [fqdn, hostname] + eh.del_entries(local_ip) + for entry in new_entries: + if len(entry) == 1: + eh.add_entry(local_ip, entry[0]) + elif len(entry) >= 2: + eh.add_entry(local_ip, *entry) if need_change: - new_etchosts.write("%s\n%s\n" % (real_header, hosts_line)) - need_write = True - if need_write: - contents = new_etchosts.getvalue() - util.write_file(self._paths.join(False, "/etc/hosts"), - contents, mode=0644) + contents = StringIO() + if header: + contents.write("%s\n" % (header)) + contents.write("%s\n" % (eh)) + util.write_file("/etc/hosts", contents.getvalue(), mode=0644) def _interface_action(self, action): if action not in IFACE_ACTIONS: diff --git a/cloudinit/distros/helpers.py b/cloudinit/distros/helpers.py index 251d5051..916935eb 100644 --- a/cloudinit/distros/helpers.py +++ b/cloudinit/distros/helpers.py @@ -23,6 +23,87 @@ from StringIO import StringIO from cloudinit import util +def _chop_comment(text, comment_chars): + comment_locations = [text.find(c) for c in comment_chars] + comment_locations = [c for c in comment_locations if c != -1] + if not comment_locations: + return (text, '') + min_comment = min(comment_locations) + before_comment = text[0:min_comment] + comment = text[min_comment:] + return (before_comment, comment) + + +# See: man hosts +# or http://unixhelp.ed.ac.uk/CGI/man-cgi?hosts +class HostsConf(object): + def __init__(self, text): + self._text = text + self._contents = None + + def parse(self): + if self._contents is None: + self._contents = self._parse(self._text) + + def get_entry(self, ip): + self.parse() + options = [] + for (line_type, components) in self._contents: + if line_type == 'option': + (pieces, _tail) = components + if len(pieces) and pieces[0] == ip: + options.append(pieces[1:]) + return options + + def del_entries(self, ip): + self.parse() + n_entries = [] + for (line_type, components) in self._contents: + if line_type != 'option': + n_entries.append((line_type, components)) + continue + else: + (pieces, _tail) = components + if len(pieces) and pieces[0] == ip: + pass + elif len(pieces): + n_entries.append((line_type, list(components))) + self._contents = n_entries + + def add_entry(self, ip, canonical_hostname, *aliases): + self.parse() + self._contents.append(('option', + ([ip, canonical_hostname] + list(aliases), ''))) + + def _parse(self, contents): + entries = [] + for line in contents.splitlines(): + if not len(line.strip()): + entries.append(('blank', [line])) + continue + (head, tail) = _chop_comment(line.strip(), '#') + if not len(head): + entries.append(('all_comment', [line])) + continue + entries.append(('option', [head.split(None), tail])) + return entries + + def __str__(self): + self.parse() + contents = StringIO() + for (line_type, components) in self._contents: + if line_type == 'blank': + contents.write("%s\n") + elif line_type == 'all_comment': + contents.write("%s\n" % (components[0])) + elif line_type == 'option': + (pieces, tail) = components + pieces = [str(p) for p in pieces] + pieces = "\t".join(pieces) + contents.write("%s%s\n" % (pieces, tail)) + return contents.getvalue() + + # See: man resolv.conf class ResolvConf(object): def __init__(self, text): @@ -151,20 +232,7 @@ class ResolvConf(object): if not sline: entries.append(('blank', [line])) continue - comment_s_loc = sline.find(";") - comment_h_loc = sline.find("#") - comment_loc = -1 - if comment_s_loc != -1 and comment_h_loc != -1: - comment_loc = min(comment_h_loc, comment_s_loc) - elif comment_s_loc != -1: - comment_loc = comment_s_loc - elif comment_h_loc != -1: - comment_loc = comment_h_loc - head = line - tail = None - if comment_loc != -1: - head = line[:comment_loc] - tail = line[comment_loc:] + (head, tail) = _chop_comment(line, ';#') if not len(head.strip()): entries.append(('all_comment', [line])) continue -- cgit v1.2.3 From 6e0f6f3db662751c2b0bf837f2affe51b1f328a3 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 21 Sep 2012 18:05:17 -0700 Subject: Don't forget the etc/hosts test. --- tests/unittests/test_distros/test_hosts.py | 41 ++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 tests/unittests/test_distros/test_hosts.py diff --git a/tests/unittests/test_distros/test_hosts.py b/tests/unittests/test_distros/test_hosts.py new file mode 100644 index 00000000..621837ab --- /dev/null +++ b/tests/unittests/test_distros/test_hosts.py @@ -0,0 +1,41 @@ +from mocker import MockerTestCase + +from cloudinit.distros import helpers + + +BASE_ETC = ''' +# Example +127.0.0.1 localhost +192.168.1.10 foo.mydomain.org foo +192.168.1.10 bar.mydomain.org bar +146.82.138.7 master.debian.org master +209.237.226.90 www.opensource.org +''' +BASE_ETC = BASE_ETC.strip() + + +class TestHostsHelper(MockerTestCase): + def test_parse(self): + eh = helpers.HostsConf(BASE_ETC) + self.assertEquals(eh.get_entry('127.0.0.1'), [['localhost']]) + self.assertEquals(eh.get_entry('192.168.1.10'), + [['foo.mydomain.org', 'foo'], + ['bar.mydomain.org', 'bar']]) + eh = str(eh) + self.assertTrue(eh.startswith('# Example')) + + def test_add(self): + eh = helpers.HostsConf(BASE_ETC) + eh.add_entry('127.0.0.0', 'blah') + self.assertEquals(eh.get_entry('127.0.0.0'), [['blah']]) + eh.add_entry('127.0.0.3', 'blah', 'blah2', 'blah3') + self.assertEquals(eh.get_entry('127.0.0.3'), + [['blah', 'blah2', 'blah3']]) + + def test_del(self): + eh = helpers.HostsConf(BASE_ETC) + eh.add_entry('127.0.0.0', 'blah') + self.assertEquals(eh.get_entry('127.0.0.0'), [['blah']]) + + eh.del_entries('127.0.0.0') + self.assertEquals(eh.get_entry('127.0.0.0'), []) -- cgit v1.2.3 From d852495da6ae8eefee0ff7d2de14c398c0a87a51 Mon Sep 17 00:00:00 2001 From: Garrett Holmstrom Date: Sat, 22 Sep 2012 19:35:07 -0700 Subject: Use 0440 permissions on sudoers, not 0644 --- cloudinit/distros/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 3e9d934d..612d44af 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -339,7 +339,7 @@ class Distro(object): content += "\n" if not os.path.exists(sudo_file): - util.write_file(sudo_file, content, 0644) + util.write_file(sudo_file, content, 0440) else: try: -- cgit v1.2.3 From 1278285241d017affa2d03f8023afaf2d35a9543 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 23 Sep 2012 12:39:22 -0700 Subject: Make the normalization a module level function since it has little dependence on the distros class itself. Readjust the using code to use this new module level function instead. --- cloudinit/config/cc_users_groups.py | 21 +- cloudinit/distros/__init__.py | 258 ++++++++++----------- .../test_distros/test_user_data_normalize.py | 35 +-- 3 files changed, 160 insertions(+), 154 deletions(-) diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index a6ce49ac..13eb1102 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -16,6 +16,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from cloudinit import distros from cloudinit import util from cloudinit.settings import PER_INSTANCE @@ -24,10 +25,20 @@ frequency = PER_INSTANCE def handle(name, cfg, cloud, log, _args): - distro = cloud.distro - ((users, default_user), groups) = distro.normalize_users_groups(cfg) + def_u = None + def_u_gs = None + try: + def_u = cloud.distro.get_default_user() + def_u_gs = cloud.distro.get_default_user_groups() + except NotImplementedError: + log.warn(("Distro has not implemented default user " + "creation. No default user will be added.")) + + ((users, default_user), groups) = distros.normalize_users_groups(cfg, + def_u, + def_u_gs) for (name, members) in groups.items(): - distro.create_group(name, members) + cloud.distro.create_group(name, members) if default_user: user = default_user['name'] @@ -41,8 +52,8 @@ def handle(name, cfg, cloud, log, _args): 'sudo': "ALL=(ALL) NOPASSWD:ALL", } u_config = util.mergemanydict([def_base_config, config]) - distro.create_user(user, **u_config) + cloud.distro.create_user(user, **u_config) log.info("Added default '%s' user with passwordless sudo", user) for (user, config) in users.items(): - distro.create_user(user, **config) + cloud.distro.create_user(user, **config) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 3de5be36..b43d662e 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -180,13 +180,7 @@ class Distro(object): def get_default_user_groups(self): if not self.default_user_groups: return [] - def_groups = [] - if isinstance(self.default_user_groups, (str, basestring)): - def_groups = self.default_user_groups.split(",") - else: - def_groups = list(self.default_user_groups) - def_groups = list(sorted(set(def_groups))) - return def_groups + return _uniq_merge_sorted(self.default_user_groups) def create_user(self, name, **kwargs): """ @@ -294,133 +288,6 @@ class Distro(object): return True - def _normalize_groups(self, grp_cfg): - groups = {} - if isinstance(grp_cfg, (str, basestring)): - grp_cfg = grp_cfg.strip().split(",") - - if isinstance(grp_cfg, (list)): - for g in grp_cfg: - g = g.strip() - if g: - groups[g] = [] - elif isinstance(grp_cfg, (dict)): - for grp_name, grp_members in grp_cfg.items(): - if isinstance(grp_members, (str, basestring)): - r_grp_members = [] - for gc in grp_members.strip().split(','): - gc = gc.strip() - if gc and gc not in r_grp_members: - r_grp_members.append(gc) - grp_members = r_grp_members - elif not isinstance(grp_members, (list)): - raise TypeError(("Group member config must be list " - " or string types only and not %s") % - util.obj_name(grp_members)) - groups[grp_name] = grp_members - else: - raise TypeError(("Group config must be list, dict " - " or string types only and not %s") % - util.obj_name(grp_cfg)) - return groups - - def _normalize_users(self, u_cfg): - if isinstance(u_cfg, (dict)): - ad_ucfg = [] - for (k, v) in u_cfg.items(): - if isinstance(v, (bool, int, basestring, str)): - if util.is_true(v): - ad_ucfg.append(str(k)) - elif isinstance(v, (dict)): - v['name'] = k - ad_ucfg.append(v) - else: - raise TypeError(("Unmappable user value type %s" - " for key %s") % (util.obj_name(v), k)) - u_cfg = ad_ucfg - elif isinstance(u_cfg, (str, basestring)): - u_cfg = u_cfg.strip().split(",") - - users = {} - for user_config in u_cfg: - if isinstance(user_config, (str, basestring)): - for u in user_config.strip().split(","): - u = u.strip() - if u and u not in users: - users[u] = {} - elif isinstance(user_config, (dict)): - if 'name' in user_config: - n = user_config.pop('name') - prev_config = users.get(n) or {} - users[n] = util.mergemanydict([prev_config, - user_config]) - else: - # Assume the default user then - prev_config = users.get('default') or {} - users['default'] = util.mergemanydict([prev_config, - user_config]) - elif isinstance(user_config, (bool, int)): - pass - else: - raise TypeError(("User config must be dictionary " - " or string types only and not %s") % - util.obj_name(user_config)) - - # Ensure user options are in the right python friendly format - if users: - c_users = {} - for (uname, uconfig) in users.items(): - c_uconfig = {} - for (k, v) in uconfig.items(): - k = k.replace('-', '_').strip() - if k: - c_uconfig[k] = v - c_users[uname] = c_uconfig - users = c_users - - # Fixup the default user into the real - # default user name and extract it - default_user = {} - if users and 'default' in users: - try: - def_config = users.pop('default') - def_user = self.get_default_user() - def_groups = self.get_default_user_groups() - if def_user: - u_config = users.pop(def_user, None) or {} - u_groups = u_config.get('groups') or [] - if isinstance(u_groups, (str, basestring)): - u_groups = u_groups.strip().split(",") - u_groups.extend(def_groups) - u_groups = set([x.strip() for x in u_groups if x.strip()]) - u_config['groups'] = ",".join(sorted(u_groups)) - default_user = { - 'name': def_user, - 'config': util.mergemanydict([def_config, u_config]), - } - else: - LOG.warn(("Distro has not provided a default user " - "for creation. No default user will be " - "normalized.")) - users.pop('default', None) - except NotImplementedError: - LOG.warn(("Distro has not implemented default user " - "creation. No default user will be normalized.")) - users.pop('default', None) - - return (default_user, users) - - def normalize_users_groups(self, ug_cfg): - users = {} - groups = {} - default_user = {} - if 'groups' in ug_cfg: - groups = self._normalize_groups(ug_cfg['groups']) - - if 'users' in ug_cfg: - default_user, users = self._normalize_users(ug_cfg['users']) - return ((users, default_user), groups) - def write_sudo_rules(self, user, rules, @@ -521,6 +388,129 @@ def _get_arch_package_mirror_info(package_mirrors, arch): return default +def _uniq_merge_sorted(*lists): + return sorted(_uniq_merge(*lists)) + + +def _uniq_merge(*lists): + combined_list = [] + for a_list in lists: + if isinstance(a_list, (str, basestring)): + a_list = a_list.strip().split(",") + else: + a_list = [str(a) for a in a_list] + a_list = [a.strip() for a in a_list if a.strip()] + combined_list.extend(a_list) + uniq_list = [] + for a in combined_list: + if a in uniq_list: + continue + else: + uniq_list.append(a) + return uniq_list + + +def _normalize_groups(grp_cfg): + if isinstance(grp_cfg, (str, basestring, list)): + c_grp_cfg = {} + for i in _uniq_merge(grp_cfg): + c_grp_cfg[i] = [] + grp_cfg = c_grp_cfg + + groups = {} + if isinstance(grp_cfg, (dict)): + for (grp_name, grp_members) in grp_cfg.items(): + groups[grp_name] = _uniq_merge_sorted(grp_members) + else: + raise TypeError(("Group config must be list, dict " + " or string types only and not %s") % + util.obj_name(grp_cfg)) + return groups + + +def _normalize_users(u_cfg, def_user=None, def_user_groups=None): + if isinstance(u_cfg, (dict)): + ad_ucfg = [] + for (k, v) in u_cfg.items(): + if isinstance(v, (bool, int, basestring, str, float)): + if util.is_true(v): + ad_ucfg.append(str(k)) + elif isinstance(v, (dict)): + v['name'] = k + ad_ucfg.append(v) + else: + raise TypeError(("Unmappable user value type %s" + " for key %s") % (util.obj_name(v), k)) + u_cfg = ad_ucfg + elif isinstance(u_cfg, (str, basestring)): + u_cfg = _uniq_merge_sorted(u_cfg) + + users = {} + for user_config in u_cfg: + if isinstance(user_config, (str, basestring, list)): + for u in _uniq_merge(user_config): + if u and u not in users: + users[u] = {} + elif isinstance(user_config, (dict)): + if 'name' in user_config: + n = user_config.pop('name') + prev_config = users.get(n) or {} + users[n] = util.mergemanydict([prev_config, + user_config]) + else: + # Assume the default user then + prev_config = users.get('default') or {} + users['default'] = util.mergemanydict([prev_config, + user_config]) + else: + raise TypeError(("User config must be dictionary/list " + " or string types only and not %s") % + util.obj_name(user_config)) + + # Ensure user options are in the right python friendly format + if users: + c_users = {} + for (uname, uconfig) in users.items(): + c_uconfig = {} + for (k, v) in uconfig.items(): + k = k.replace('-', '_').strip() + if k: + c_uconfig[k] = v + c_users[uname] = c_uconfig + users = c_users + + # Fixup the default user into the real + # default user name and extract it + default_user = {} + if users and 'default' in users: + def_config = users.pop('default') + def_groups = def_user_groups or [] + if def_user: + u_config = users.pop(def_user, None) or {} + u_groups = u_config.get('groups') or [] + u_groups = _uniq_merge_sorted(u_groups, def_groups) + u_config['groups'] = ",".join(u_groups) + default_user = { + 'name': def_user, + 'config': util.mergemanydict([def_config, u_config]), + } + + return (default_user, users) + + +def normalize_users_groups(cfg, def_user=None, def_user_groups=None): + users = {} + groups = {} + default_user = {} + if 'groups' in cfg: + groups = _normalize_groups(cfg['groups']) + if 'users' in cfg: + (default_user, users) = _normalize_users(cfg['users'], + def_user, + def_user_groups) + return ((users, default_user), groups) + + def fetch(name): locs = importer.find_module(name, ['', __name__], diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index d636bb84..46733452 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -19,12 +19,17 @@ class TestUGNormalize(MockerTestCase): distro.default_user_groups = def_groups return distro + def _norm(self, cfg, distro): + def_u = distro.get_default_user() + def_u_gs = distro.get_default_user_groups() + return distros.normalize_users_groups(cfg, def_u, def_u_gs) + def test_basic_groups(self): distro = self._make_distro('ubuntu') ug_cfg = { 'groups': ['bob'], } - ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + ((users, def_user), groups) = self._norm(ug_cfg, distro) self.assertIn('bob', groups) self.assertEquals({}, users) self.assertEquals({}, def_user) @@ -34,7 +39,7 @@ class TestUGNormalize(MockerTestCase): ug_cfg = { 'groups': 'bob,joe,steve', } - ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + ((users, def_user), groups) = self._norm(ug_cfg, distro) self.assertIn('bob', groups) self.assertIn('joe', groups) self.assertIn('steve', groups) @@ -46,7 +51,7 @@ class TestUGNormalize(MockerTestCase): ug_cfg = { 'groups': ['bob', 'joe', 'steve',] } - ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + ((users, def_user), groups) = self._norm(ug_cfg, distro) self.assertIn('bob', groups) self.assertIn('joe', groups) self.assertIn('steve', groups) @@ -62,7 +67,7 @@ class TestUGNormalize(MockerTestCase): 'steve': [], } } - ((users, def_user), groups) = distro.normalize_users_groups(ug_cfg) + ((users, def_user), groups) = self._norm(ug_cfg, distro) self.assertIn('bob', groups) self.assertEquals(['s'], groups['bob']) self.assertEquals([], groups['joe']) @@ -78,21 +83,21 @@ class TestUGNormalize(MockerTestCase): 'default': True, } } - ((_users, def_user), _groups) = distro.normalize_users_groups(ug_cfg) + ((_users, def_user), _groups) = self._norm(ug_cfg, distro) self.assertEquals('bob', def_user['name']) ug_cfg = { 'users': { 'default': 'yes', } } - ((_users, def_user), _groups) = distro.normalize_users_groups(ug_cfg) + ((_users, def_user), _groups) = self._norm(ug_cfg, distro) self.assertEquals('bob', def_user['name']) ug_cfg = { 'users': { 'default': '1', } } - ((_users, def_user), _groups) = distro.normalize_users_groups(ug_cfg) + ((_users, def_user), _groups) = self._norm(ug_cfg, distro) self.assertEquals('bob', def_user['name']) def test_users_simple_dict_no(self): @@ -102,14 +107,14 @@ class TestUGNormalize(MockerTestCase): 'default': False, } } - ((_users, def_user), _groups) = distro.normalize_users_groups(ug_cfg) + ((_users, def_user), _groups) = self._norm(ug_cfg, distro) self.assertEquals({}, def_user) ug_cfg = { 'users': { 'default': 'no', } } - ((_users, def_user), _groups) = distro.normalize_users_groups(ug_cfg) + ((_users, def_user), _groups) = self._norm(ug_cfg, distro) self.assertEquals({}, def_user) def test_users_simple_csv(self): @@ -117,7 +122,7 @@ class TestUGNormalize(MockerTestCase): ug_cfg = { 'users': 'joe,bob', } - ((users, _def_user), _groups) = distro.normalize_users_groups(ug_cfg) + ((users, _def_user), _groups) = self._norm(ug_cfg, distro) self.assertIn('joe', users) self.assertIn('bob', users) self.assertEquals({}, users['joe']) @@ -131,7 +136,7 @@ class TestUGNormalize(MockerTestCase): 'bob' ], } - ((users, _def_user), _groups) = distro.normalize_users_groups(ug_cfg) + ((users, _def_user), _groups) = self._norm(ug_cfg, distro) self.assertIn('joe', users) self.assertIn('bob', users) self.assertEquals({}, users['joe']) @@ -144,7 +149,7 @@ class TestUGNormalize(MockerTestCase): {'name': 'default', 'blah': True} ], } - ((users, def_user), _groups) = distro.normalize_users_groups(ug_cfg) + ((users, def_user), _groups) = self._norm(ug_cfg, distro) self.assertIn('bob', def_user['name']) self.assertEquals(",".join(distro.get_default_user_groups()), def_user['config']['groups']) @@ -159,7 +164,7 @@ class TestUGNormalize(MockerTestCase): 'default', ], } - ((users, def_user), _groups) = distro.normalize_users_groups(ug_cfg) + ((users, def_user), _groups) = self._norm(ug_cfg, distro) self.assertIn('bob', def_user['name']) self.assertEquals(",".join(distro.get_default_user_groups()), def_user['config']['groups']) @@ -174,7 +179,7 @@ class TestUGNormalize(MockerTestCase): {'name': 'bob'}, ], } - ((users, _def_user), _groups) = distro.normalize_users_groups(ug_cfg) + ((users, _def_user), _groups) = self._norm(ug_cfg, distro) self.assertIn('joe', users) self.assertIn('bob', users) self.assertEquals({'tr_me': True}, users['joe']) @@ -188,7 +193,7 @@ class TestUGNormalize(MockerTestCase): {'name': 'bob'}, ], } - ((users, _def_user), _groups) = distro.normalize_users_groups(ug_cfg) + ((users, _def_user), _groups) = self._norm(ug_cfg, distro) self.assertIn('joe', users) self.assertIn('bob', users) self.assertEquals({}, users['joe']) -- cgit v1.2.3 From 0a4f91c84c096e0b0df2e5a1d42c38609ae7fa93 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Sep 2012 14:40:41 -0400 Subject: send stderr from write-ssh-key-fingerprints to stdout This changes all output write-ssh-key-fingerprints to go to its stdout by redirecting stderr to stdout. The reason for this is that cc_keys_to_console.py was swallowing stderr and not replaying it to /dev/console. Ideally, we'd have a way in 'util.subp' to do effectively the same thing as we're doing here in the shell script. LP: #1055688 --- tools/write-ssh-key-fingerprints | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/write-ssh-key-fingerprints b/tools/write-ssh-key-fingerprints index 5723c989..130fc0c1 100755 --- a/tools/write-ssh-key-fingerprints +++ b/tools/write-ssh-key-fingerprints @@ -1,4 +1,5 @@ #!/bin/sh +exec 2>&1 fp_blist=",${1}," key_blist=",${2}," { @@ -15,8 +16,6 @@ done echo "-----END SSH HOST KEY FINGERPRINTS-----" echo "#############################################################" -} | logger -p user.info -s -t "ec2" - echo -----BEGIN SSH HOST KEY KEYS----- for f in /etc/ssh/ssh_host_*key.pub; do [ -f "$f" ] || continue @@ -26,3 +25,5 @@ for f in /etc/ssh/ssh_host_*key.pub; do cat $f done echo -----END SSH HOST KEY KEYS----- + +} | logger -p user.info --stderr -t "ec2" -- cgit v1.2.3 From 54e11417c93b646746a704a5ea85b47c3831fed4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Sep 2012 14:45:31 -0400 Subject: do not create 'sems' directory. 'sem' is proper instance/ path --- cloudinit/stages.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index af902925..4ed1a750 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -240,7 +240,7 @@ class Init(object): return ds def _get_instance_subdirs(self): - return ['handlers', 'scripts', 'sems'] + return ['handlers', 'scripts', 'sem'] def _get_ipath(self, subname=None): # Force a check to see if anything -- cgit v1.2.3 From ad22d407085009dcd1c860185e29f21858cdd968 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Sep 2012 14:48:44 -0400 Subject: write-ssh-key-fingerprints: do not send HOST KEYS through logger In the previous commit to htis file I had wrapped the writing of 'BEGIN SSH HOST KEY KEYS' to go through logger. This would cause the keys to be prefixed with 'ec2:' which, previously they were not. That would break existing users *and* make it more difficult to consume that data, which was explicitly added to be easy to consume. --- tools/write-ssh-key-fingerprints | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/write-ssh-key-fingerprints b/tools/write-ssh-key-fingerprints index 130fc0c1..aa1f3c38 100755 --- a/tools/write-ssh-key-fingerprints +++ b/tools/write-ssh-key-fingerprints @@ -16,6 +16,8 @@ done echo "-----END SSH HOST KEY FINGERPRINTS-----" echo "#############################################################" +} | logger -p user.info --stderr -t "ec2" + echo -----BEGIN SSH HOST KEY KEYS----- for f in /etc/ssh/ssh_host_*key.pub; do [ -f "$f" ] || continue @@ -25,5 +27,3 @@ for f in /etc/ssh/ssh_host_*key.pub; do cat $f done echo -----END SSH HOST KEY KEYS----- - -} | logger -p user.info --stderr -t "ec2" -- cgit v1.2.3 From cde52cc8449d82d5bdce2fbb73516bee144e293c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Sep 2012 16:54:51 -0400 Subject: fix make pep8 --- cloudinit/distros/fedora.py | 2 +- cloudinit/patcher.py | 1 + cloudinit/sources/__init__.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/cloudinit/distros/fedora.py b/cloudinit/distros/fedora.py index 9f76a116..e7092dd8 100644 --- a/cloudinit/distros/fedora.py +++ b/cloudinit/distros/fedora.py @@ -28,5 +28,5 @@ LOG = logging.getLogger(__name__) class Distro(rhel.Distro): - distro_name = 'fedora' + distro_name = 'fedora' default_user = 'ec2-user' diff --git a/cloudinit/patcher.py b/cloudinit/patcher.py index 8921a79a..fa140f04 100644 --- a/cloudinit/patcher.py +++ b/cloudinit/patcher.py @@ -39,6 +39,7 @@ def _patch_logging(): # sys.stderr using a fallback logger fallback_handler = QuietStreamHandler(sys.stderr) fallback_handler.setFormatter(logging.Formatter(FALL_FORMAT)) + def handleError(self, record): try: fallback_handler.handle(record) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 6f126091..04083d0c 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -173,7 +173,7 @@ class DataSource(object): # make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx lhost = self.metadata['local-hostname'] if util.is_ipv4(lhost): - toks = [ "ip-%s" % lhost.replace(".", "-") ] + toks = ["ip-%s" % lhost.replace(".", "-")] else: toks = lhost.split(".") -- cgit v1.2.3 From e3b29659672acd757122bc5f90a13670b96b6952 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Sep 2012 16:59:23 -0400 Subject: fix pylint --- cloudinit/patcher.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cloudinit/patcher.py b/cloudinit/patcher.py index fa140f04..0f3c034e 100644 --- a/cloudinit/patcher.py +++ b/cloudinit/patcher.py @@ -23,7 +23,8 @@ import logging import sys # Default fallback format -FALL_FORMAT = 'FALLBACK: %(asctime)s - %(filename)s[%(levelname)s]: %(message)s' +FALL_FORMAT = ('FALLBACK: %(asctime)s - %(filename)s[%(levelname)s]: ' + + '%(message)s') class QuietStreamHandler(logging.StreamHandler): @@ -40,7 +41,7 @@ def _patch_logging(): fallback_handler = QuietStreamHandler(sys.stderr) fallback_handler.setFormatter(logging.Formatter(FALL_FORMAT)) - def handleError(self, record): + def handleError(self, record): # pylint: disable=W0613 try: fallback_handler.handle(record) fallback_handler.flush() -- cgit v1.2.3 From 70cc7536f45a8d7052617ad88e2816291db0a309 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Sep 2012 17:13:38 -0400 Subject: DataSourceMAAS: if a oauth request fails due to 403 try updating local time In the event of a 403 (Unauthorized) in oauth, try set a 'oauth_clockskew' variable. In future headers, use a time created by 'time.time() + self.oauth_clockskew'. The idea here is that if the local time is bad (or even if the server time is bad) we will essentially use something that should be similar to the remote clock. This fixes LP: #978127. LP: #978127 --- cloudinit/sources/DataSourceMAAS.py | 43 +++++++++++++++++++++++++++++++++---- cloudinit/url_helper.py | 11 ++++++++-- 2 files changed, 48 insertions(+), 6 deletions(-) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index c568d365..581e9a4b 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -18,6 +18,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from email.utils import parsedate import errno import oauth.oauth as oauth import os @@ -46,6 +47,7 @@ class DataSourceMAAS(sources.DataSource): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.base_url = None self.seed_dir = os.path.join(paths.seed_dir, 'maas') + self.oauth_clockskew = None def __str__(self): return "%s [%s]" % (util.obj_name(self), self.base_url) @@ -95,11 +97,17 @@ class DataSourceMAAS(sources.DataSource): return {} consumer_secret = mcfg.get('consumer_secret', "") + + timestamp = None + if self.oauth_clockskew: + timestamp = int(time.time()) + self.oauth_clockskew + return oauth_headers(url=url, consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'], token_secret=mcfg['token_secret'], - consumer_secret=consumer_secret) + consumer_secret=consumer_secret, + timestamp=timestamp) def wait_for_metadata_service(self, url): mcfg = self.ds_cfg @@ -124,7 +132,7 @@ class DataSourceMAAS(sources.DataSource): check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION) urls = [check_url] url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, - timeout=timeout, status_cb=LOG.warn, + timeout=timeout, exception_cb=self._except_cb, headers_cb=self.md_headers) if url: @@ -135,6 +143,26 @@ class DataSourceMAAS(sources.DataSource): return bool(url) + def _except_cb(self, msg, exception): + if not (isinstance(exception, urllib2.HTTPError) and + exception.code == 403): + return + if 'date' not in exception.headers: + LOG.warn("date field not in 403 headers") + return + + date = exception.headers['date'] + + try: + ret_time = time.mktime(parsedate(date)) + except: + LOG.warn("failed to convert datetime '%s'") + return + + self.oauth_clockskew = int(ret_time - time.time()) + LOG.warn("set oauth clockskew to %d" % self.oauth_clockskew) + return + def read_maas_seed_dir(seed_d): """ @@ -229,13 +257,20 @@ def check_seed_contents(content, seed): return (userdata, md) -def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret): +def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, + timestamp=None): consumer = oauth.OAuthConsumer(consumer_key, consumer_secret) token = oauth.OAuthToken(token_key, token_secret) + + if timestamp is None: + ts = int(time.time()) + else: + ts = timestamp + params = { 'oauth_version': "1.0", 'oauth_nonce': oauth.generate_nonce(), - 'oauth_timestamp': int(time.time()), + 'oauth_timestamp': ts, 'oauth_token': token.key, 'oauth_consumer_key': consumer.key, } diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 732d6aec..f3e3fd7e 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -136,7 +136,8 @@ def readurl(url, data=None, timeout=None, def wait_for_url(urls, max_wait=None, timeout=None, - status_cb=None, headers_cb=None, sleep_time=1): + status_cb=None, headers_cb=None, sleep_time=1, + exception_cb=None): """ urls: a list of urls to try max_wait: roughly the maximum time to wait before giving up @@ -146,6 +147,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, status_cb: call method with string message when a url is not available headers_cb: call method with single argument of url to get headers for request. + exception_cb: call method with 2 arguments 'msg' (per status_cb) and + 'exception', the exception that occurred. the idea of this routine is to wait for the EC2 metdata service to come up. On both Eucalyptus and EC2 we have seen the case where @@ -164,7 +167,7 @@ def wait_for_url(urls, max_wait=None, timeout=None, """ start_time = time.time() - def log_status_cb(msg): + def log_status_cb(msg, exc=None): LOG.debug(msg) if status_cb is None: @@ -196,8 +199,10 @@ def wait_for_url(urls, max_wait=None, timeout=None, resp = readurl(url, headers=headers, timeout=timeout) if not resp.contents: reason = "empty response [%s]" % (resp.code) + e = ValueError(reason) elif not resp.ok(): reason = "bad status code [%s]" % (resp.code) + e = ValueError(reason) else: return url except urllib2.HTTPError as e: @@ -214,6 +219,8 @@ def wait_for_url(urls, max_wait=None, timeout=None, time_taken, max_wait, reason) status_cb(status_msg) + if exception_cb: + exception_cb(msg=status_msg, exception=e) if timeup(max_wait, start_time): break -- cgit v1.2.3 From c780ad5898f54741037ad66c28c3423aa0495fba Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 24 Sep 2012 17:56:42 -0700 Subject: Oopies, missed u. --- cloudinit/distros/rhel.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 67d80bf4..1ea7952b 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -144,8 +144,6 @@ class Distro(distros.Distro): if not exists: lines.insert(0, _make_header()) util.write_file(fn, "\n".join(lines), 0644) -======= ->>>>>>> MERGE-SOURCE def set_hostname(self, hostname): self._write_hostname(hostname, '/etc/sysconfig/network') -- cgit v1.2.3 From 58be2ec4a3a3213700905f6bd3e6752775369471 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Sep 2012 20:58:05 -0400 Subject: fix pep8 --- cloudinit/distros/rhel.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 1ea7952b..1a47fc33 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -1,4 +1,4 @@ -# vi: ts=4 expandtab + # vi: ts=4 expandtab # # Copyright (C) 2012 Canonical Ltd. # Copyright (C) 2012 Hewlett-Packard Development Company, L.P. @@ -56,6 +56,7 @@ D_QUOTE_CHARS = { '`': '\`', } + def _make_sysconfig_bool(val): if val: return 'yes' @@ -160,7 +161,7 @@ class Distro(distros.Distro): def _write_hostname(self, hostname, out_fn): host_cfg = { - 'HOSTNAME': hostname, + 'HOSTNAME': hostname, } self._update_sysconfig_file(out_fn, host_cfg) -- cgit v1.2.3 From 0be941f74f54ecafcb628451f531b90f30723fbc Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 24 Sep 2012 18:30:35 -0700 Subject: Refactor the ug normalization to take in a distro and produce a user and group list. Clean this up to be simpler as well as handle the old 'user' case when it exists in configuration. --- cloudinit/config/cc_users_groups.py | 29 +----- cloudinit/distros/__init__.py | 80 ++++++++++------ .../test_distros/test_user_data_normalize.py | 103 ++++++++++++++------- 3 files changed, 122 insertions(+), 90 deletions(-) diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index 13eb1102..464f55c3 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -25,35 +25,8 @@ frequency = PER_INSTANCE def handle(name, cfg, cloud, log, _args): - def_u = None - def_u_gs = None - try: - def_u = cloud.distro.get_default_user() - def_u_gs = cloud.distro.get_default_user_groups() - except NotImplementedError: - log.warn(("Distro has not implemented default user " - "creation. No default user will be added.")) - - ((users, default_user), groups) = distros.normalize_users_groups(cfg, - def_u, - def_u_gs) + (users, groups) = distros.normalize_users_groups(cfg, cloud.distro) for (name, members) in groups.items(): cloud.distro.create_group(name, members) - - if default_user: - user = default_user['name'] - config = default_user['config'] - def_base_config = { - 'plain_text_passwd': user, - 'home': "/home/%s" % user, - 'shell': "/bin/bash", - 'lock_passwd': True, - 'gecos': "%s%s" % (user.title()), - 'sudo': "ALL=(ALL) NOPASSWD:ALL", - } - u_config = util.mergemanydict([def_base_config, config]) - cloud.distro.create_user(user, **u_config) - log.info("Added default '%s' user with passwordless sudo", user) - for (user, config) in users.items(): cloud.distro.create_user(user, **config) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index b43d662e..7340fa84 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -175,12 +175,20 @@ class Distro(object): return False def get_default_user(self): - return self.default_user - - def get_default_user_groups(self): - if not self.default_user_groups: - return [] - return _uniq_merge_sorted(self.default_user_groups) + if not self.default_user: + return None + user_cfg = { + 'name': self.default_user, + 'plain_text_passwd': self.default_user, + 'home': "/home/%s" % (self.default_user), + 'shell': "/bin/bash", + 'lock_passwd': True, + 'gecos': "%s" % (self.default_user.title()), + 'sudo': "ALL=(ALL) NOPASSWD:ALL", + } + if self.default_user_groups: + user_cfg['groups'] = _uniq_merge_sorted(self.default_user_groups) + return user_cfg def create_user(self, name, **kwargs): """ @@ -428,7 +436,7 @@ def _normalize_groups(grp_cfg): return groups -def _normalize_users(u_cfg, def_user=None, def_user_groups=None): +def _normalize_users(u_cfg, def_user_cfg=None): if isinstance(u_cfg, (dict)): ad_ucfg = [] for (k, v) in u_cfg.items(): @@ -480,35 +488,55 @@ def _normalize_users(u_cfg, def_user=None, def_user_groups=None): users = c_users # Fixup the default user into the real - # default user name and extract it - default_user = {} + # default user name and replace it... if users and 'default' in users: def_config = users.pop('default') - def_groups = def_user_groups or [] - if def_user: - u_config = users.pop(def_user, None) or {} - u_groups = u_config.get('groups') or [] - u_groups = _uniq_merge_sorted(u_groups, def_groups) - u_config['groups'] = ",".join(u_groups) - default_user = { - 'name': def_user, - 'config': util.mergemanydict([def_config, u_config]), - } + if def_user_cfg: + def_user = def_user_cfg.pop('name') + def_groups = def_user_cfg.pop('groups', []) + parsed_config = users.pop(def_user, {}) + users_groups = _uniq_merge_sorted(parsed_config.get('groups', []), + def_groups) + parsed_config['groups'] = ",".join(users_groups) + users[def_user] = util.mergemanydict([def_user_cfg, + def_config, + parsed_config]) - return (default_user, users) + return users -def normalize_users_groups(cfg, def_user=None, def_user_groups=None): +def normalize_users_groups(cfg, distro): users = {} groups = {} - default_user = {} if 'groups' in cfg: groups = _normalize_groups(cfg['groups']) + old_user = None + if 'user' in cfg: + old_user = str(cfg['user']) if 'users' in cfg: - (default_user, users) = _normalize_users(cfg['users'], - def_user, - def_user_groups) - return ((users, default_user), groups) + default_user_config = None + try: + default_user_config = distro.get_default_user() + except NotImplementedError: + LOG.warn(("Distro has not implemented default user " + "access. No default user will be normalized.")) + base_users = cfg['users'] + if isinstance(base_users, (list)): + if len(base_users) and old_user: + # The old user replaces user[0] + base_users[0] = {'name': old_user} + elif not base_users and old_user: + base.append({'name': old_user}) + elif isinstance(base_users, (dict)): + # Sorry order not possible + if old_user and old_user not in base_users: + base_users[old_user] = True + elif isinstance(base_users, (str, basestring)): + # Just append it on to be re-parsed later + if old_user: + base_users += ",%s" % (old_user) + users = _normalize_users(base_users, default_user_config) + return (users, groups) def fetch(name): diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index 46733452..4a4e1a29 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -20,43 +20,38 @@ class TestUGNormalize(MockerTestCase): return distro def _norm(self, cfg, distro): - def_u = distro.get_default_user() - def_u_gs = distro.get_default_user_groups() - return distros.normalize_users_groups(cfg, def_u, def_u_gs) + return distros.normalize_users_groups(cfg, distro) def test_basic_groups(self): distro = self._make_distro('ubuntu') ug_cfg = { 'groups': ['bob'], } - ((users, def_user), groups) = self._norm(ug_cfg, distro) + (users, groups) = self._norm(ug_cfg, distro) self.assertIn('bob', groups) self.assertEquals({}, users) - self.assertEquals({}, def_user) def test_csv_groups(self): distro = self._make_distro('ubuntu') ug_cfg = { 'groups': 'bob,joe,steve', } - ((users, def_user), groups) = self._norm(ug_cfg, distro) + (users, groups) = self._norm(ug_cfg, distro) self.assertIn('bob', groups) self.assertIn('joe', groups) self.assertIn('steve', groups) self.assertEquals({}, users) - self.assertEquals({}, def_user) def test_more_groups(self): distro = self._make_distro('ubuntu') ug_cfg = { 'groups': ['bob', 'joe', 'steve',] } - ((users, def_user), groups) = self._norm(ug_cfg, distro) + (users, groups) = self._norm(ug_cfg, distro) self.assertIn('bob', groups) self.assertIn('joe', groups) self.assertIn('steve', groups) self.assertEquals({}, users) - self.assertEquals({}, def_user) def test_member_groups(self): distro = self._make_distro('ubuntu') @@ -67,14 +62,13 @@ class TestUGNormalize(MockerTestCase): 'steve': [], } } - ((users, def_user), groups) = self._norm(ug_cfg, distro) + (users, groups) = self._norm(ug_cfg, distro) self.assertIn('bob', groups) self.assertEquals(['s'], groups['bob']) self.assertEquals([], groups['joe']) self.assertIn('joe', groups) self.assertIn('steve', groups) self.assertEquals({}, users) - self.assertEquals({}, def_user) def test_users_simple_dict(self): distro = self._make_distro('ubuntu', 'bob') @@ -83,22 +77,22 @@ class TestUGNormalize(MockerTestCase): 'default': True, } } - ((_users, def_user), _groups) = self._norm(ug_cfg, distro) - self.assertEquals('bob', def_user['name']) + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) ug_cfg = { 'users': { 'default': 'yes', } } - ((_users, def_user), _groups) = self._norm(ug_cfg, distro) - self.assertEquals('bob', def_user['name']) + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) ug_cfg = { 'users': { 'default': '1', } } - ((_users, def_user), _groups) = self._norm(ug_cfg, distro) - self.assertEquals('bob', def_user['name']) + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) def test_users_simple_dict_no(self): distro = self._make_distro('ubuntu', 'bob') @@ -107,22 +101,22 @@ class TestUGNormalize(MockerTestCase): 'default': False, } } - ((_users, def_user), _groups) = self._norm(ug_cfg, distro) - self.assertEquals({}, def_user) + (users, _groups) = self._norm(ug_cfg, distro) + self.assertEquals({}, users) ug_cfg = { 'users': { 'default': 'no', } } - ((_users, def_user), _groups) = self._norm(ug_cfg, distro) - self.assertEquals({}, def_user) + (users, _groups) = self._norm(ug_cfg, distro) + self.assertEquals({}, users) def test_users_simple_csv(self): distro = self._make_distro('ubuntu') ug_cfg = { 'users': 'joe,bob', } - ((users, _def_user), _groups) = self._norm(ug_cfg, distro) + (users, _groups) = self._norm(ug_cfg, distro) self.assertIn('joe', users) self.assertIn('bob', users) self.assertEquals({}, users['joe']) @@ -136,12 +130,51 @@ class TestUGNormalize(MockerTestCase): 'bob' ], } - ((users, _def_user), _groups) = self._norm(ug_cfg, distro) + (users, _groups) = self._norm(ug_cfg, distro) self.assertIn('joe', users) self.assertIn('bob', users) self.assertEquals({}, users['joe']) self.assertEquals({}, users['bob']) + def test_users_old_user(self): + distro = self._make_distro('ubuntu', 'bob') + ug_cfg = { + 'user': 'zetta', + 'users': 'default' + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) + self.assertIn('zetta', users) + self.assertNotIn('default', users) + ug_cfg = { + 'user': 'zetta', + 'users': 'default, joe' + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) + self.assertIn('joe', users) + self.assertIn('zetta', users) + self.assertNotIn('default', users) + ug_cfg = { + 'user': 'zetta', + 'users': ['bob', 'joe'] + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertNotIn('bob', users) + self.assertIn('joe', users) + self.assertIn('zetta', users) + ug_cfg = { + 'user': 'zetta', + 'users': { + 'bob': True, + 'joe': True, + } + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) + self.assertIn('joe', users) + self.assertIn('zetta', users) + def test_users_dict_default_additional(self): distro = self._make_distro('ubuntu', 'bob') ug_cfg = { @@ -149,13 +182,12 @@ class TestUGNormalize(MockerTestCase): {'name': 'default', 'blah': True} ], } - ((users, def_user), _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', def_user['name']) - self.assertEquals(",".join(distro.get_default_user_groups()), - def_user['config']['groups']) + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) + self.assertEquals(",".join(distro.get_default_user()['groups']), + users['bob']['groups']) self.assertEquals(True, - def_user['config']['blah']) - self.assertNotIn('bob', users) + users['bob']['blah']) def test_users_dict_default(self): distro = self._make_distro('ubuntu', 'bob') @@ -164,11 +196,10 @@ class TestUGNormalize(MockerTestCase): 'default', ], } - ((users, def_user), _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', def_user['name']) - self.assertEquals(",".join(distro.get_default_user_groups()), - def_user['config']['groups']) - self.assertNotIn('bob', users) + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) + self.assertEquals(",".join(distro.get_default_user()['groups']), + users['bob']['groups']) def test_users_dict_trans(self): distro = self._make_distro('ubuntu') @@ -179,7 +210,7 @@ class TestUGNormalize(MockerTestCase): {'name': 'bob'}, ], } - ((users, _def_user), _groups) = self._norm(ug_cfg, distro) + (users, _groups) = self._norm(ug_cfg, distro) self.assertIn('joe', users) self.assertIn('bob', users) self.assertEquals({'tr_me': True}, users['joe']) @@ -193,7 +224,7 @@ class TestUGNormalize(MockerTestCase): {'name': 'bob'}, ], } - ((users, _def_user), _groups) = self._norm(ug_cfg, distro) + (users, _groups) = self._norm(ug_cfg, distro) self.assertIn('joe', users) self.assertIn('bob', users) self.assertEquals({}, users['joe']) -- cgit v1.2.3 From df80168450faa013ce50893e3c62979829b1fea8 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 24 Sep 2012 19:24:25 -0700 Subject: Add a netconfig format test + if rhel is passed the 'all' device name, throw an error since rhel can not currently handle this case. --- cloudinit/distros/rhel.py | 6 + tests/unittests/test_distros/test_netconfig.py | 146 +++++++++++++++++++++++++ 2 files changed, 152 insertions(+) create mode 100644 tests/unittests/test_distros/test_netconfig.py diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 0e451b02..752bb294 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -196,6 +196,12 @@ class Distro(distros.Distro): contents = [] return (exists, QuotingConfigObj(contents)) + def _bring_up_interfaces(self, device_names): + if device_names and 'all' in device_names: + raise RuntimeError(('Distro %s can not translate ' + 'the device name "all"') % (self.name)) + return distros.Distro._bring_up_interfaces(self, device_names) + def set_timezone(self, tz): tz_file = os.path.join("/usr/share/zoneinfo", tz) if not os.path.isfile(tz_file): diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py new file mode 100644 index 00000000..f2cac2fe --- /dev/null +++ b/tests/unittests/test_distros/test_netconfig.py @@ -0,0 +1,146 @@ +from mocker import MockerTestCase + +import mocker + +import os + +from cloudinit import distros +from cloudinit import helpers +from cloudinit import settings +from cloudinit import util + +from StringIO import StringIO + + +BASE_NET_CFG =''' +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet static + address 192.168.1.5 + netmask 255.255.255.0 + network 192.168.0.0 + broadcast 192.168.1.0 + gateway 192.168.1.254 + +auto eth1 +iface eth1 inet dhcp +''' + + +class WriteBuffer(object): + def __init__(self): + self.buffer = StringIO() + self.mode = None + self.omode = None + + def write(self, text): + self.buffer.write(text) + + def __str__(self): + return self.buffer.getvalue() + + +class TestNetCfgDistro(MockerTestCase): + + def _get_distro(self, dname): + cls = distros.fetch(dname) + cfg = settings.CFG_BUILTIN + cfg['system_info']['distro'] = dname + paths = helpers.Paths({}) + return cls(dname, cfg, paths) + + def test_simple_write_ub(self): + ub_distro = self._get_distro('ubuntu') + util_mock = self.mocker.replace(util.write_file, spec=False, passthrough=False) + exists_mock = self.mocker.replace(os.path.isfile, spec=False, passthrough=False) + + exists_mock(mocker.ARGS) + self.mocker.count(0, None) + self.mocker.result(False) + + write_bufs = {} + def replace_write(filename, content, mode=0644, omode="wb"): + buf = WriteBuffer() + buf.mode = mode + buf.omode = omode + buf.write(content) + write_bufs[filename] = buf + + util_mock(mocker.ARGS) + self.mocker.call(replace_write) + self.mocker.replay() + ub_distro.apply_network(BASE_NET_CFG, False) + + self.assertEquals(len(write_bufs), 1) + self.assertIn('/etc/network/interfaces', write_bufs) + write_buf = write_bufs['/etc/network/interfaces'] + self.assertEquals(str(write_buf).strip(), BASE_NET_CFG.strip()) + self.assertEquals(write_buf.mode, 0644) + + def test_simple_write_rh(self): + rh_distro = self._get_distro('rhel') + write_mock = self.mocker.replace(util.write_file, spec=False, passthrough=False) + load_mock = self.mocker.replace(util.load_file, spec=False, passthrough=False) + exists_mock = self.mocker.replace(os.path.isfile, spec=False, passthrough=False) + + write_bufs = {} + def replace_write(filename, content, mode=0644, omode="wb"): + buf = WriteBuffer() + buf.mode = mode + buf.omode = omode + buf.write(content) + write_bufs[filename] = buf + + exists_mock(mocker.ARGS) + self.mocker.count(0, None) + self.mocker.result(False) + + load_mock(mocker.ARGS) + self.mocker.count(0, None) + self.mocker.result('') + + for i in range(0, 3): + write_mock(mocker.ARGS) + self.mocker.call(replace_write) + + self.mocker.replay() + rh_distro.apply_network(BASE_NET_CFG, False) + + self.assertEquals(len(write_bufs), 3) + self.assertIn('/etc/sysconfig/network-scripts/ifcfg-lo', write_bufs) + write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-lo'] + expected_buf = ''' +# Created by cloud-init +DEVICE="lo" +ONBOOT=yes +''' + self.assertEquals(str(write_buf).strip(), expected_buf.strip()) + self.assertEquals(write_buf.mode, 0644) + + self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', write_bufs) + write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] + expected_buf = ''' +# Created by cloud-init +DEVICE="eth0" +BOOTPROTO="static" +NETMASK="255.255.255.0" +IPADDR="192.168.1.5" +ONBOOT=yes +GATEWAY="192.168.1.254" +BROADCAST="192.168.1.0" +''' + self.assertEquals(str(write_buf).strip(), expected_buf.strip()) + self.assertEquals(write_buf.mode, 0644) + + self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', write_bufs) + write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] + expected_buf = ''' +# Created by cloud-init +DEVICE="eth1" +BOOTPROTO="dhcp" +ONBOOT=yes +''' + self.assertEquals(str(write_buf).strip(), expected_buf.strip()) + self.assertEquals(write_buf.mode, 0644) -- cgit v1.2.3 From a092e6e2eaa5dc848d71f16671c61fa6014ce0b2 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 24 Sep 2012 19:27:40 -0700 Subject: Fix pylint issues created. --- tests/unittests/test_distros/test_netconfig.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index f2cac2fe..083ae34a 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -12,7 +12,7 @@ from cloudinit import util from StringIO import StringIO -BASE_NET_CFG =''' +BASE_NET_CFG = ''' auto lo iface lo inet loopback @@ -53,8 +53,10 @@ class TestNetCfgDistro(MockerTestCase): def test_simple_write_ub(self): ub_distro = self._get_distro('ubuntu') - util_mock = self.mocker.replace(util.write_file, spec=False, passthrough=False) - exists_mock = self.mocker.replace(os.path.isfile, spec=False, passthrough=False) + util_mock = self.mocker.replace(util.write_file, + spec=False, passthrough=False) + exists_mock = self.mocker.replace(os.path.isfile, + spec=False, passthrough=False) exists_mock(mocker.ARGS) self.mocker.count(0, None) @@ -81,9 +83,12 @@ class TestNetCfgDistro(MockerTestCase): def test_simple_write_rh(self): rh_distro = self._get_distro('rhel') - write_mock = self.mocker.replace(util.write_file, spec=False, passthrough=False) - load_mock = self.mocker.replace(util.load_file, spec=False, passthrough=False) - exists_mock = self.mocker.replace(os.path.isfile, spec=False, passthrough=False) + write_mock = self.mocker.replace(util.write_file, + spec=False, passthrough=False) + load_mock = self.mocker.replace(util.load_file, + spec=False, passthrough=False) + exists_mock = self.mocker.replace(os.path.isfile, + spec=False, passthrough=False) write_bufs = {} def replace_write(filename, content, mode=0644, omode="wb"): @@ -101,7 +106,7 @@ class TestNetCfgDistro(MockerTestCase): self.mocker.count(0, None) self.mocker.result('') - for i in range(0, 3): + for _i in range(0, 3): write_mock(mocker.ARGS) self.mocker.call(replace_write) -- cgit v1.2.3 From a2c6279d303a3b85625404653c7ab8081281ee18 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 24 Sep 2012 20:33:13 -0700 Subject: Handle the case where 'user' is defined but 'users' isn't. --- cloudinit/distros/__init__.py | 11 +++++++++-- tests/unittests/test_distros/test_user_data_normalize.py | 10 ++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 13e4fd44..4cf8f745 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -507,13 +507,20 @@ def _normalize_users(u_cfg, def_user_cfg=None): def normalize_users_groups(cfg, distro): + if not cfg: + cfg = {} users = {} groups = {} if 'groups' in cfg: groups = _normalize_groups(cfg['groups']) + + # Handle the previous style of doing this... old_user = None - if 'user' in cfg: + if 'user' in cfg and cfg['user']: old_user = str(cfg['user']) + if not 'users' in cfg: + cfg['users'] = old_user + old_user = None if 'users' in cfg: default_user_config = None try: @@ -527,7 +534,7 @@ def normalize_users_groups(cfg, distro): # The old user replaces user[0] base_users[0] = {'name': old_user} elif not base_users and old_user: - base.append({'name': old_user}) + base_users.append({'name': old_user}) elif isinstance(base_users, (dict)): # Sorry order not possible if old_user and old_user not in base_users: diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index 4a4e1a29..890d8f05 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -174,6 +174,16 @@ class TestUGNormalize(MockerTestCase): self.assertIn('bob', users) self.assertIn('joe', users) self.assertIn('zetta', users) + ug_cfg = { + 'user': 'zetta', + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('zetta', users) + ug_cfg = { + } + (users, groups) = self._norm(ug_cfg, distro) + self.assertEquals({}, users) + self.assertEquals({}, groups) def test_users_dict_default_additional(self): distro = self._make_distro('ubuntu', 'bob') -- cgit v1.2.3 From 12495e3e597541ad411a656c980b9e405c2a902d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 24 Sep 2012 20:45:56 -0700 Subject: Avoid rechecking if old users is availabile. --- cloudinit/distros/__init__.py | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 4cf8f745..f07ba3fa 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -529,19 +529,19 @@ def normalize_users_groups(cfg, distro): LOG.warn(("Distro has not implemented default user " "access. No default user will be normalized.")) base_users = cfg['users'] - if isinstance(base_users, (list)): - if len(base_users) and old_user: - # The old user replaces user[0] - base_users[0] = {'name': old_user} - elif not base_users and old_user: - base_users.append({'name': old_user}) - elif isinstance(base_users, (dict)): - # Sorry order not possible - if old_user and old_user not in base_users: - base_users[old_user] = True - elif isinstance(base_users, (str, basestring)): - # Just append it on to be re-parsed later - if old_user: + if old_user: + if isinstance(base_users, (list)): + if len(base_users): + # The old user replaces user[0] + base_users[0] = {'name': old_user} + else: + # Just add it on at the end... + base_users.append({'name': old_user}) + elif isinstance(base_users, (dict)): + if old_user not in base_users: + base_users[old_user] = True + elif isinstance(base_users, (str, basestring)): + # Just append it on to be re-parsed later base_users += ",%s" % (old_user) users = _normalize_users(base_users, default_user_config) return (users, groups) -- cgit v1.2.3 From 56d0585fd7d9804b82a1eb22faff8a6554b100b8 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 24 Sep 2012 20:49:29 -0700 Subject: Adjust the fingerprints to use this new user normalization function instead of the previous 'user' extraction. --- cloudinit/config/cc_ssh_authkey_fingerprints.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 23f5755a..36d439d2 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -23,6 +23,7 @@ from prettytable import PrettyTable from cloudinit import ssh_util from cloudinit import util +from cloudinit import distros def _split_hash(bin_hash): @@ -89,8 +90,9 @@ def handle(name, cfg, cloud, log, _args): log.debug(("Skipping module named %s, " "logging of ssh fingerprints disabled"), name) - user_name = util.get_cfg_option_str(cfg, "user", "ubuntu") hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") - extract = ssh_util.extract_authorized_keys - (auth_key_fn, auth_key_entries) = extract(user_name, cloud.paths) - _pprint_key_entries(user_name, auth_key_fn, auth_key_entries, hash_meth) + extract_func = ssh_util.extract_authorized_keys + (users, _groups) = distros.normalize_users_groups(cfg, cloud.distro) + for (user_name, _cfg) in users.items(): + (auth_key_fn, auth_key_entries) = extract_func(user_name, cloud.paths) + _pprint_key_entries(user_name, auth_key_fn, auth_key_entries, hash_meth) -- cgit v1.2.3 From 4e36bf2225d2b6e7a7ac1821e1d00ac72abb5856 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 25 Sep 2012 13:19:36 -0700 Subject: Add the ability to pass patches to the rpm builder utility. --- packages/brpm | 7 +++++++ packages/redhat/cloud-init.spec.in | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/packages/brpm b/packages/brpm index 77de0cf2..616ae021 100755 --- a/packages/brpm +++ b/packages/brpm @@ -149,6 +149,7 @@ def generate_spec_contents(args, tmpl_fn, arc_fn): subs['systemd'] = False subs['init_sys'] = args.boot + subs['patches'] = [os.path.basename(p) for p in args.patches] return templater.render_from_file(tmpl_fn, params=subs) @@ -164,6 +165,10 @@ def main(): " (default: %(default)s)"), default=False, action='store_true') + parser.add_argument("-p", "--patch", dest="patches", + help=("include the following patch when building"), + default=[], + action='append') args = parser.parse_args() capture = True if args.verbose: @@ -197,6 +202,8 @@ def main(): spec_fn = util.abs_join(root_dir, 'cloud-init.spec') util.write_file(spec_fn, contents) print("Created spec file at %r" % (spec_fn)) + for p in args.patches: + util.copy(p, util.abs_join(arc_dir, os.path.basename(p)) # Now build it! print("Running 'rpmbuild' in %r" % (root_dir)) diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in index 35b27beb..6604b1ac 100644 --- a/packages/redhat/cloud-init.spec.in +++ b/packages/redhat/cloud-init.spec.in @@ -36,6 +36,13 @@ Requires: shadow-utils Requires: ${r} #end for +# Custom patches +#set $size 0 +#for $p in $patches +Patch${size}: $p +#set $size += 1 +#end for + #if $sysvinit Requires(post): chkconfig Requires(postun): initscripts -- cgit v1.2.3 From f74c738fa76db65e45d2bdb19257fe0a78dcd0e0 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 25 Sep 2012 13:20:33 -0700 Subject: Fix syntax error. --- packages/brpm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/brpm b/packages/brpm index 616ae021..3870e188 100755 --- a/packages/brpm +++ b/packages/brpm @@ -203,7 +203,7 @@ def main(): util.write_file(spec_fn, contents) print("Created spec file at %r" % (spec_fn)) for p in args.patches: - util.copy(p, util.abs_join(arc_dir, os.path.basename(p)) + util.copy(p, util.abs_join(arc_dir, os.path.basename(p))) # Now build it! print("Running 'rpmbuild' in %r" % (root_dir)) -- cgit v1.2.3 From cdba22dd0fc249124dfb4e578f44898ba044925a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 25 Sep 2012 13:29:07 -0700 Subject: Allow package_mirrors to be non-existent. --- cloudinit/distros/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 549c1612..6df843ad 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -112,7 +112,7 @@ class Distro(object): return arch def _get_arch_package_mirror_info(self, arch=None): - mirror_info = self.get_option("package_mirrors", None) + mirror_info = self.get_option("package_mirrors") or [] if arch == None: arch = self.get_primary_arch() return _get_arch_package_mirror_info(mirror_info, arch) @@ -122,7 +122,6 @@ class Distro(object): # this resolves the package_mirrors config option # down to a single dict of {mirror_name: mirror_url} arch_info = self._get_arch_package_mirror_info(arch) - return _get_package_mirror_info(availability_zone=availability_zone, mirror_info=arch_info) -- cgit v1.2.3 From ca9251d74a18b47abacff2b63328061b9e484246 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 25 Sep 2012 13:37:09 -0700 Subject: Fix cheetah syntax error. --- packages/redhat/cloud-init.spec.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in index 6604b1ac..9cfe56f5 100644 --- a/packages/redhat/cloud-init.spec.in +++ b/packages/redhat/cloud-init.spec.in @@ -37,7 +37,7 @@ Requires: ${r} #end for # Custom patches -#set $size 0 +#set $size = 0 #for $p in $patches Patch${size}: $p #set $size += 1 -- cgit v1.2.3 From 94ff94e299257d22c0f45f980ce1a24b569b0b61 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 25 Sep 2012 13:56:31 -0700 Subject: Fix tests after resync with head. --- tests/unittests/test_distros/test_netconfig.py | 40 ++++++++++++++++++++------ 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 083ae34a..9954841b 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -37,7 +37,7 @@ class WriteBuffer(object): def write(self, text): self.buffer.write(text) - + def __str__(self): return self.buffer.getvalue() @@ -63,6 +63,7 @@ class TestNetCfgDistro(MockerTestCase): self.mocker.result(False) write_bufs = {} + def replace_write(filename, content, mode=0644, omode="wb"): buf = WriteBuffer() buf.mode = mode @@ -81,6 +82,18 @@ class TestNetCfgDistro(MockerTestCase): self.assertEquals(str(write_buf).strip(), BASE_NET_CFG.strip()) self.assertEquals(write_buf.mode, 0644) + def assertCfgEquals(self, blob1, blob2): + cfg_tester = distros.rhel.QuotingConfigObj + b1 = dict(cfg_tester(blob1.strip().splitlines())) + b2 = dict(cfg_tester(blob2.strip().splitlines())) + self.assertEquals(b1, b2) + for (k, v) in b1.items(): + self.assertIn(k, b2) + for (k, v) in b2.items(): + self.assertIn(k, b1) + for (k, v) in b1.items(): + self.assertEquals(v, b2[k]) + def test_simple_write_rh(self): rh_distro = self._get_distro('rhel') write_mock = self.mocker.replace(util.write_file, @@ -91,6 +104,7 @@ class TestNetCfgDistro(MockerTestCase): spec=False, passthrough=False) write_bufs = {} + def replace_write(filename, content, mode=0644, omode="wb"): buf = WriteBuffer() buf.mode = mode @@ -109,25 +123,27 @@ class TestNetCfgDistro(MockerTestCase): for _i in range(0, 3): write_mock(mocker.ARGS) self.mocker.call(replace_write) + + write_mock(mocker.ARGS) + self.mocker.call(replace_write) self.mocker.replay() rh_distro.apply_network(BASE_NET_CFG, False) - self.assertEquals(len(write_bufs), 3) + + self.assertEquals(len(write_bufs), 4) self.assertIn('/etc/sysconfig/network-scripts/ifcfg-lo', write_bufs) write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-lo'] expected_buf = ''' -# Created by cloud-init DEVICE="lo" ONBOOT=yes ''' - self.assertEquals(str(write_buf).strip(), expected_buf.strip()) + self.assertCfgEquals(expected_buf, str(write_buf)) self.assertEquals(write_buf.mode, 0644) self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', write_bufs) write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0'] expected_buf = ''' -# Created by cloud-init DEVICE="eth0" BOOTPROTO="static" NETMASK="255.255.255.0" @@ -136,16 +152,24 @@ ONBOOT=yes GATEWAY="192.168.1.254" BROADCAST="192.168.1.0" ''' - self.assertEquals(str(write_buf).strip(), expected_buf.strip()) + self.assertCfgEquals(expected_buf, str(write_buf)) self.assertEquals(write_buf.mode, 0644) self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', write_bufs) write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1'] expected_buf = ''' -# Created by cloud-init DEVICE="eth1" BOOTPROTO="dhcp" ONBOOT=yes ''' - self.assertEquals(str(write_buf).strip(), expected_buf.strip()) + self.assertCfgEquals(expected_buf, str(write_buf)) + self.assertEquals(write_buf.mode, 0644) + + self.assertIn('/etc/sysconfig/network', write_bufs) + write_buf = write_bufs['/etc/sysconfig/network'] + expected_buf = ''' +# Created by cloud-init v. 0.7 +NETWORKING=yes +''' + self.assertCfgEquals(expected_buf, str(write_buf)) self.assertEquals(write_buf.mode, 0644) -- cgit v1.2.3 From 018ec6faa0a9d0c8aa584bae2e0798ed7e3e021a Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 25 Sep 2012 18:12:26 -0700 Subject: Add a top_dir define. --- packages/brpm | 5 +++-- packages/redhat/cloud-init.spec.in | 4 ++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/packages/brpm b/packages/brpm index 3870e188..e344926f 100755 --- a/packages/brpm +++ b/packages/brpm @@ -91,7 +91,7 @@ def format_change_line(ds, who, comment=None): return "* %s" % (d) -def generate_spec_contents(args, tmpl_fn, arc_fn): +def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn): # Figure out the version and revno cmd = [util.abs_join(find_root(), 'tools', 'read-version')] @@ -148,6 +148,7 @@ def generate_spec_contents(args, tmpl_fn, arc_fn): else: subs['systemd'] = False + subs['defines'] = ["_topdir %s" % (top_dir)] subs['init_sys'] = args.boot subs['patches'] = [os.path.basename(p) for p in args.patches] return templater.render_from_file(tmpl_fn, params=subs) @@ -197,7 +198,7 @@ def main(): # Form the spec file to be used tmpl_fn = util.abs_join(find_root(), 'packages', 'redhat', 'cloud-init.spec.in') - contents = generate_spec_contents(args, tmpl_fn, + contents = generate_spec_contents(args, tmpl_fn, root_dir, os.path.basename(archive_fn)) spec_fn = util.abs_join(root_dir, 'cloud-init.spec') util.write_file(spec_fn, contents) diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in index 9cfe56f5..f5339640 100644 --- a/packages/redhat/cloud-init.spec.in +++ b/packages/redhat/cloud-init.spec.in @@ -5,6 +5,10 @@ # Or: http://fedoraproject.org/wiki/Packaging:ScriptletSnippets # Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html +#for $d in $defines +%define ${d} +#end for + Name: cloud-init Version: ${version} Release: ${release}%{?dist} -- cgit v1.2.3 From c62675900a96dd645a1ff2ee0c316ef9232f443d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 25 Sep 2012 18:28:12 -0700 Subject: Clean doesn't seem needed. --- packages/brpm | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/brpm b/packages/brpm index e344926f..1735f5bb 100755 --- a/packages/brpm +++ b/packages/brpm @@ -208,8 +208,7 @@ def main(): # Now build it! print("Running 'rpmbuild' in %r" % (root_dir)) - cmd = ['rpmbuild', '--clean', - '-ba', spec_fn] + cmd = ['rpmbuild', '-ba', spec_fn] util.subp(cmd, capture=capture) # Copy the items built to our local dir -- cgit v1.2.3 From f31a4e80aaaafca79eb6c808c81a6067f0c493b8 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 26 Sep 2012 16:29:50 -0700 Subject: Add a new example test that will patch utils and os functions so that they can be 'retargeted' to a temporary directory, which allows us the ability to run a full set of cloud-init stages. Neat things: 1. All cloud-init code is unchanged (as long as it goes through the utils functions for most functionality) 2. Allows for a natural way to setup a temporary directory then patch the new directory as the new 'root' and then run cloud-init stages and then check the contents of what was placed as desired. --- .../roots/simple_ubuntu/etc/networks/interfaces | 3 + tests/unittests/helpers.py | 106 +++++++++++++++++++++ tests/unittests/test_runs/test_simple_run.py | 89 +++++++++++++++++ 3 files changed, 198 insertions(+) create mode 100644 tests/data/roots/simple_ubuntu/etc/networks/interfaces create mode 100644 tests/unittests/test_runs/test_simple_run.py diff --git a/tests/data/roots/simple_ubuntu/etc/networks/interfaces b/tests/data/roots/simple_ubuntu/etc/networks/interfaces new file mode 100644 index 00000000..77efa67d --- /dev/null +++ b/tests/data/roots/simple_ubuntu/etc/networks/interfaces @@ -0,0 +1,3 @@ +auto lo +iface lo inet loopback + diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index d0f09e70..d7aebdee 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -1,8 +1,42 @@ import os +import mocker + from mocker import MockerTestCase from cloudinit import helpers as ch +from cloudinit import util + +import shutil + +# Makes the old path start +# with new base instead of whatever +# it previously had +def rebase_path(old_path, new_base): + if old_path.startswith(new_base): + # Already handled... + return old_path + # Retarget the base of that path + # to the new base instead of the + # old one... + path = os.path.join(new_base, old_path.lstrip("/")) + path = os.path.abspath(path) + return path + + +# Can work on anything that takes a path +# as first argument +def retarget_many_wrapper(new_base, am, old_func): + def wrapper(*args, **kwds): + n_args = list(args) + nam = am + if am == -1: + nam = len(n_args) + for i in range(0, nam): + path = args[i] + n_args[i] = rebase_path(path, new_base) + return old_func(*n_args, **kwds) + return wrapper class ResourceUsingTestCase(MockerTestCase): @@ -40,3 +74,75 @@ class ResourceUsingTestCase(MockerTestCase): 'templates_dir': self.resourceLocation(), }) return cp + + +class FilesystemMockingTestCase(ResourceUsingTestCase): + def __init__(self, methodName="runTest"): + ResourceUsingTestCase.__init__(self, methodName) + self.patched_funcs = [] + + def replicateTestRoot(self, example_root, target_root): + real_root = self.resourceLocation() + real_root = os.path.join(real_root, 'roots', example_root) + for (dir_path, dirnames, filenames) in os.walk(real_root): + real_path = dir_path + make_path = rebase_path(real_path[len(real_root):], target_root) + util.ensure_dir(make_path) + for f in filenames: + real_path = util.abs_join(real_path, f) + make_path = util.abs_join(make_path, f) + shutil.copy(real_path, make_path) + + def tearDown(self): + self.restore() + ResourceUsingTestCase.tearDown(self) + + def restore(self): + for (mod, f, func) in self.patched_funcs: + setattr(mod, f, func) + self.patched_funcs = [] + + def patchUtils(self, new_root): + patch_funcs = { + util: [('write_file', 1), + ('load_file', 1), + ('ensure_dir', 1), + ('chmod', 1), + ('delete_dir_contents', 1), + ('del_file', 1), + ('sym_link', -1)], + } + for (mod, funcs) in patch_funcs.items(): + for (f, am) in funcs: + func = getattr(mod, f) + trap_func = retarget_many_wrapper(new_root, am, func) + setattr(mod, f, trap_func) + self.patched_funcs.append((mod, f, func)) + + # Handle subprocess calls + func = getattr(util, 'subp') + + def nsubp(*args, **kwargs): + return ('', '') + + setattr(util, 'subp', nsubp) + self.patched_funcs.append((util, 'subp', func)) + + def null_func(*args, **kwargs): + return None + + for f in ['chownbyid', 'chownbyname']: + func = getattr(util, f) + setattr(util, f, null_func) + self.patched_funcs.append((util, f, func)) + + def patchOS(self, new_root): + patch_funcs = { + os.path: ['isfile', 'exists', 'islink', 'isdir'], + } + for (mod, funcs) in patch_funcs.items(): + for f in funcs: + func = getattr(mod, f) + trap_func = retarget_many_wrapper(new_root, 1, func) + setattr(mod, f, trap_func) + self.patched_funcs.append((mod, f, func)) diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py new file mode 100644 index 00000000..33dd3ca2 --- /dev/null +++ b/tests/unittests/test_runs/test_simple_run.py @@ -0,0 +1,89 @@ +from mocker import MockerTestCase + +import mocker +import sys +import os + +# Allow running this test individually +top_dir = os.path.join(os.path.dirname(__file__), os.pardir, "helpers.py") +top_dir = os.path.abspath(top_dir) +if os.path.exists(top_dir): + sys.path.insert(0, os.path.dirname(top_dir)) + + +import helpers + +from cloudinit import util +from cloudinit import stages + +from cloudinit.settings import (PER_INSTANCE) + + +class TestSimpleRunDistro(helpers.FilesystemMockingTestCase): + def _patchIn(self, root): + self.restore() + self.patchOS(root) + self.patchUtils(root) + + def _pp_root(self, root, repatch=True): + self.restore() + for (dirpath, dirnames, filenames) in os.walk(root): + print(dirpath) + for f in filenames: + joined = os.path.join(dirpath, f) + if os.path.islink(joined): + print("f %s - (symlink)" % (f)) + else: + print("f %s" % (f)) + for d in dirnames: + joined = os.path.join(dirpath, d) + if os.path.islink(joined): + print("d %s - (symlink)" % (d)) + else: + print("d %s" % (d)) + if repatch: + self._patchIn(root) + + def test_none_ds(self): + new_root = self.makeDir() + self.replicateTestRoot('simple_ubuntu', new_root) + cfg = { + 'datasource_list': ['None'], + 'write_files': [{ + 'path': '/etc/blah.ini', + 'content': 'blah', + 'permissions': 0755, + }], + 'cloud_init_modules': ['write-files'], + } + cloud_cfg = util.yaml_dumps(cfg) + util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) + util.write_file(os.path.join(new_root, 'etc', + 'cloud', 'cloud.cfg'), cloud_cfg) + self._patchIn(new_root) + + # Now start verifying whats created + initer = stages.Init() + initer.read_cfg() + initer.initialize() + self.assertTrue(os.path.exists("/var/lib/cloud")) + for d in ['scripts', 'seed', 'instances', 'handlers', 'sem', 'data']: + self.assertTrue(os.path.isdir(os.path.join("/var/lib/cloud", d))) + + initer.fetch() + iid = initer.instancify() + self.assertEquals(iid, 'iid-datasource-none') + initer.update() + self.assertTrue(os.path.islink("var/lib/cloud/instance")) + + (ran, results) = initer.cloudify().run('consume_userdata', + initer.consume_userdata, + args=[PER_INSTANCE], + freq=PER_INSTANCE) + + mods = stages.Modules(initer) + (which_ran, failures) = mods.run_section('cloud_init_modules') + self.assertTrue(os.path.exists('/etc/blah.ini')) + self.assertIn('write-files', which_ran) + contents = util.load_file('/etc/blah.ini') + self.assertEquals(contents, 'blah') -- cgit v1.2.3 From 4a5d5a044db9e77b931fb4df93f7c01b02021f44 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 26 Sep 2012 16:40:07 -0700 Subject: Fixup some pylint warnings. --- tests/unittests/helpers.py | 8 +++----- tests/unittests/test_filters/test_launch_index.py | 12 ++++++++++-- tests/unittests/test_runs/test_simple_run.py | 14 ++++++-------- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index d7aebdee..7b829f24 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -1,7 +1,5 @@ import os -import mocker - from mocker import MockerTestCase from cloudinit import helpers as ch @@ -84,7 +82,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): def replicateTestRoot(self, example_root, target_root): real_root = self.resourceLocation() real_root = os.path.join(real_root, 'roots', example_root) - for (dir_path, dirnames, filenames) in os.walk(real_root): + for (dir_path, _dirnames, filenames) in os.walk(real_root): real_path = dir_path make_path = rebase_path(real_path[len(real_root):], target_root) util.ensure_dir(make_path) @@ -122,13 +120,13 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): # Handle subprocess calls func = getattr(util, 'subp') - def nsubp(*args, **kwargs): + def nsubp(*_args, **_kwargs): return ('', '') setattr(util, 'subp', nsubp) self.patched_funcs.append((util, 'subp', func)) - def null_func(*args, **kwargs): + def null_func(*_args, **_kwargs): return None for f in ['chownbyid', 'chownbyname']: diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py index 7ca7cbb6..1e9b9053 100644 --- a/tests/unittests/test_filters/test_launch_index.py +++ b/tests/unittests/test_filters/test_launch_index.py @@ -1,6 +1,14 @@ import copy +import os +import sys -import helpers as th +top_dir = os.path.join(os.path.dirname(__file__), os.pardir, "helpers.py") +top_dir = os.path.abspath(top_dir) +if os.path.exists(top_dir): + sys.path.insert(0, os.path.dirname(top_dir)) + + +import helpers import itertools @@ -18,7 +26,7 @@ def count_messages(root): return am -class TestLaunchFilter(th.ResourceUsingTestCase): +class TestLaunchFilter(helpers.ResourceUsingTestCase): def assertCounts(self, message, expected_counts): orig_message = copy.deepcopy(message) diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index 33dd3ca2..7f646b54 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -1,6 +1,3 @@ -from mocker import MockerTestCase - -import mocker import sys import os @@ -19,7 +16,7 @@ from cloudinit import stages from cloudinit.settings import (PER_INSTANCE) -class TestSimpleRunDistro(helpers.FilesystemMockingTestCase): +class TestSimpleRun(helpers.FilesystemMockingTestCase): def _patchIn(self, root): self.restore() self.patchOS(root) @@ -76,13 +73,14 @@ class TestSimpleRunDistro(helpers.FilesystemMockingTestCase): initer.update() self.assertTrue(os.path.islink("var/lib/cloud/instance")) - (ran, results) = initer.cloudify().run('consume_userdata', - initer.consume_userdata, - args=[PER_INSTANCE], - freq=PER_INSTANCE) + initer.cloudify().run('consume_userdata', + initer.consume_userdata, + args=[PER_INSTANCE], + freq=PER_INSTANCE) mods = stages.Modules(initer) (which_ran, failures) = mods.run_section('cloud_init_modules') + self.assertTrue(len(failures) == 0) self.assertTrue(os.path.exists('/etc/blah.ini')) self.assertIn('write-files', which_ran) contents = util.load_file('/etc/blah.ini') -- cgit v1.2.3 From b541f738616349a028c5e54754ea83e439d82734 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 26 Sep 2012 16:42:15 -0700 Subject: Adjust comment. --- tests/unittests/helpers.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 7b829f24..d5df580b 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -22,8 +22,7 @@ def rebase_path(old_path, new_base): return path -# Can work on anything that takes a path -# as first argument +# Can work on anything that takes a path as arguments def retarget_many_wrapper(new_base, am, old_func): def wrapper(*args, **kwds): n_args = list(args) -- cgit v1.2.3 From 179f48250952501c8ad36ac0ed6389159a25bf82 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 26 Sep 2012 16:59:28 -0700 Subject: Ensure the patches get activated. --- packages/redhat/cloud-init.spec.in | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in index f5339640..90e85a05 100644 --- a/packages/redhat/cloud-init.spec.in +++ b/packages/redhat/cloud-init.spec.in @@ -69,6 +69,13 @@ ssh keys and to let the user run various scripts. %prep %setup -q -n %{name}-%{version}~${release} +# Custom patches activation +#set $size = 0 +#for $p in $patches +%patch${size} +#set $size += 1 +#end for + %build %{__python} setup.py build -- cgit v1.2.3 From 68114a57d776585865056ad5fb71d41bb741cd06 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 26 Sep 2012 17:03:07 -0700 Subject: Add a nice '-p1' --- packages/redhat/cloud-init.spec.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in index 90e85a05..12f0b144 100644 --- a/packages/redhat/cloud-init.spec.in +++ b/packages/redhat/cloud-init.spec.in @@ -72,7 +72,7 @@ ssh keys and to let the user run various scripts. # Custom patches activation #set $size = 0 #for $p in $patches -%patch${size} +%patch${size} -p1 #set $size += 1 #end for -- cgit v1.2.3 From dfa62e70bd9942fd3c82d77217d48615a78bbcfc Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 28 Sep 2012 09:03:54 -0400 Subject: restart salt-minion instead of start. Packages on debian/ubuntu should start on installation. As a result, if we want to get config changes we've inserted to be read, we need to restart. Note one interesting thing here. upstart considers 'restart' as "restart only if currently running", while 'service' considers restart to be "stop if running, then start". So the use of 'service' here is important, rather than just 'restart' --- cloudinit/config/cc_salt_minion.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py index 79ed8807..8a1440d9 100644 --- a/cloudinit/config/cc_salt_minion.py +++ b/cloudinit/config/cc_salt_minion.py @@ -56,5 +56,6 @@ def handle(name, cfg, cloud, log, _args): util.write_file(pub_name, salt_cfg['public_key']) util.write_file(pem_name, salt_cfg['private_key']) - # Start salt-minion - util.subp(['service', 'salt-minion', 'start'], capture=False) + # restart salt-minion. 'service' will start even if not started. if it + # was started, it needs to be restarted for config change. + util.subp(['service', 'salt-minion', 'restart'], capture=False) -- cgit v1.2.3 From 332ef83d77853714b88358de301bda30e82aaf62 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 28 Sep 2012 11:06:16 -0700 Subject: Stop the bad habit of using 'or'. --- cloudinit/distros/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 6df843ad..55e4b8db 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -112,7 +112,7 @@ class Distro(object): return arch def _get_arch_package_mirror_info(self, arch=None): - mirror_info = self.get_option("package_mirrors") or [] + mirror_info = self.get_option("package_mirrors", []) if arch == None: arch = self.get_primary_arch() return _get_arch_package_mirror_info(mirror_info, arch) -- cgit v1.2.3 From 46be69003044a7d60d9566dbddd1b7fd93054c8f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 28 Sep 2012 11:08:46 -0700 Subject: Also ensure that if mirror info is not invalid before we start iterating over it (it could be sent is as none). --- cloudinit/distros/__init__.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 55e4b8db..55ab1f10 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -386,6 +386,8 @@ def _get_package_mirror_info(mirror_info, availability_zone=None, # given a arch specific 'mirror_info' entry (from package_mirrors) # search through the 'search' entries, and fallback appropriately # return a dict with only {name: mirror} entries. + if not mirror_info: + mirror_info = {} ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % "north|northeast|east|southeast|south|southwest|west|northwest") -- cgit v1.2.3 From a7a9de1a079a70f5c8290ab5158661d3a33e5552 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 28 Sep 2012 16:31:50 -0400 Subject: add 'safeyaml' to cloudinit In 0.7.0 we started using yaml.safe_load to load data rather than yaml.load. Some producers (namely, ubuntu MAAS created) have produced cloud-config data in the past that included python unicode types. This creates a specialized safe_loader that is basically safe_load + support for python unicode. --- cloudinit/safeyaml.py | 31 +++++++++++++++++++++++++++++++ cloudinit/util.py | 3 ++- tests/unittests/test_util.py | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 1 deletion(-) create mode 100644 cloudinit/safeyaml.py diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py new file mode 100644 index 00000000..8b4da1fa --- /dev/null +++ b/cloudinit/safeyaml.py @@ -0,0 +1,31 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Canonical Ltd. +# +# Author: Scott Moser +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import yaml + + +class _CustomSafeLoader(yaml.SafeLoader): + def construct_python_unicode(self, node): + return self.construct_scalar(node) + +_CustomSafeLoader.add_constructor( + u'tag:yaml.org,2002:python/unicode', + _CustomSafeLoader.construct_python_unicode) + +def load(blob): + return(yaml.load(blob, Loader=_CustomSafeLoader)) diff --git a/cloudinit/util.py b/cloudinit/util.py index 94b17dfa..46d490f7 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -50,6 +50,7 @@ import yaml from cloudinit import importer from cloudinit import log as logging +from cloudinit import safeyaml from cloudinit import url_helper as uhelp from cloudinit.settings import (CFG_BUILTIN) @@ -612,7 +613,7 @@ def load_yaml(blob, default=None, allowed=(dict,)): LOG.debug(("Attempting to load yaml from string " "of length %s with allowed root types %s"), len(blob), allowed) - converted = yaml.safe_load(blob) + converted = safeyaml.load(blob) if not isinstance(converted, allowed): # Yes this will just be caught, but thats ok for now... raise TypeError(("Yaml load allows %s root types," diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 15fcbd26..96962b91 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -1,5 +1,6 @@ import os import stat +import yaml from mocker import MockerTestCase from unittest import TestCase @@ -268,4 +269,42 @@ class TestGetCmdline(TestCase): os.environ['DEBUG_PROC_CMDLINE'] = 'abcd 123' self.assertEqual(os.environ['DEBUG_PROC_CMDLINE'], util.get_cmdline()) + +class TestLoadYaml(TestCase): + mydefault = "7b03a8ebace993d806255121073fed52" + + def test_simple(self): + mydata = {'1': "one", '2': "two"} + self.assertEqual(util.load_yaml(yaml.dump(mydata)), mydata) + + def test_nonallowed_returns_default(self): + # for now, anything not in the allowed list just returns the default. + myyaml = yaml.dump({'1': "one"}) + self.assertEqual(util.load_yaml(blob=myyaml, + default=self.mydefault, + allowed=(str,)), + self.mydefault) + + def test_bogus_returns_default(self): + badyaml = "1\n 2:" + self.assertEqual(util.load_yaml(blob=badyaml, + default=self.mydefault), + self.mydefault) + + def test_unsafe_types(self): + # should not load complex types + unsafe_yaml = yaml.dump((1, 2, 3,)) + self.assertEqual(util.load_yaml(blob=unsafe_yaml, + default=self.mydefault), + self.mydefault) + + def test_python_unicode(self): + # complex type of python/unicde is explicitly allowed + myobj = {'1': unicode("FOOBAR")} + safe_yaml = yaml.dump(myobj) + self.assertEqual(util.load_yaml(blob=safe_yaml, + default=self.mydefault), + myobj) + + # vi: ts=4 expandtab -- cgit v1.2.3 From f1e3ae3c49b9424a7e3cdbf835651720cc60e143 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 28 Sep 2012 16:35:53 -0400 Subject: fix pep8 and pylint --- cloudinit/config/cc_ssh_authkey_fingerprints.py | 3 ++- cloudinit/config/cc_users_groups.py | 3 +-- cloudinit/safeyaml.py | 1 + 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 2b9a6e0e..f9bdf5fc 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -95,4 +95,5 @@ def handle(name, cfg, cloud, log, _args): (users, _groups) = distros.normalize_users_groups(cfg, cloud.distro) for (user_name, _cfg) in users.items(): (auth_key_fn, auth_key_entries) = extract_func(user_name, cloud.paths) - _pprint_key_entries(user_name, auth_key_fn, auth_key_entries, hash_meth) + _pprint_key_entries(user_name, auth_key_fn, auth_key_entries, + hash_meth) diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index 464f55c3..aa6e0579 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -17,14 +17,13 @@ # along with this program. If not, see . from cloudinit import distros -from cloudinit import util from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE -def handle(name, cfg, cloud, log, _args): +def handle(name, cfg, cloud, _log, _args): (users, groups) = distros.normalize_users_groups(cfg, cloud.distro) for (name, members) in groups.items(): cloud.distro.create_group(name, members) diff --git a/cloudinit/safeyaml.py b/cloudinit/safeyaml.py index 8b4da1fa..eba5d056 100644 --- a/cloudinit/safeyaml.py +++ b/cloudinit/safeyaml.py @@ -27,5 +27,6 @@ _CustomSafeLoader.add_constructor( u'tag:yaml.org,2002:python/unicode', _CustomSafeLoader.construct_python_unicode) + def load(blob): return(yaml.load(blob, Loader=_CustomSafeLoader)) -- cgit v1.2.3 From cf3dd1ba86d4ddde149f451e026c697c07b4d732 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 28 Sep 2012 13:53:56 -0700 Subject: Rework the rest of the locations that used the previous 'user' and make those locations go through the new distros functions to select the default user or the user list (depending on usage). Adjust the tests to check the new 'default' field that signifies the default user + test the new method to extract just the default user from a normalized user dictionary. --- cloudinit/config/cc_byobu.py | 6 +- cloudinit/config/cc_set_passwords.py | 13 +-- cloudinit/config/cc_ssh.py | 13 +-- cloudinit/config/cc_ssh_authkey_fingerprints.py | 7 +- cloudinit/config/cc_ssh_import_id.py | 33 +++--- cloudinit/config/cc_users_groups.py | 7 +- cloudinit/distros/__init__.py | 130 ++++++++++++++++----- cloudinit/util.py | 30 +++++ .../test_distros/test_user_data_normalize.py | 50 ++++++-- 9 files changed, 202 insertions(+), 87 deletions(-) diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py index 4e2e06bb..e1ec5af5 100644 --- a/cloudinit/config/cc_byobu.py +++ b/cloudinit/config/cc_byobu.py @@ -18,12 +18,13 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from cloudinit import distros as ds from cloudinit import util distros = ['ubuntu', 'debian'] -def handle(name, cfg, _cloud, log, args): +def handle(name, cfg, cloud, log, args): if len(args) != 0: value = args[0] else: @@ -56,7 +57,8 @@ def handle(name, cfg, _cloud, log, args): shcmd = "" if mod_user: - user = util.get_cfg_option_str(cfg, "user", "ubuntu") + (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) + (user, _user_config) = ds.extract_default(users, 'ubuntu') shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst) shcmd += " || X=$(($X+1)); " if mod_sys: diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index a017e6b6..bb95f948 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -20,6 +20,7 @@ import sys +from cloudinit import distros as ds from cloudinit import ssh_util from cloudinit import util @@ -50,18 +51,10 @@ def handle(_name, cfg, cloud, log, args): expire = util.get_cfg_option_bool(chfg, 'expire', expire) if not plist and password: - user = cloud.distro.get_default_user() - - if 'users' in cfg: - - user_zero = cfg['users'][0] - - if isinstance(user_zero, dict) and 'name' in user_zero: - user = user_zero['name'] - + (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) + (user, _user_config) = ds.extract_default(users) if user: plist = "%s:%s" % (user, password) - else: log.warn("No default or defined user to change password for.") diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 0ded62ba..c2ee4635 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -21,6 +21,7 @@ import glob import os +from cloudinit import distros as ds from cloudinit import ssh_util from cloudinit import util @@ -102,16 +103,8 @@ def handle(_name, cfg, cloud, log, _args): " %s to file %s"), keytype, keyfile) try: - # TODO(utlemming): consolidate this stanza that occurs in: - # cc_ssh_import_id, cc_set_passwords, maybe cc_users_groups.py - user = cloud.distro.get_default_user() - - if 'users' in cfg: - user_zero = cfg['users'][0] - - if user_zero != "default": - user = user_zero - + (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) + (user, _user_config) = ds.extract_default(users) disable_root = util.get_cfg_option_bool(cfg, "disable_root", True) disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts", DISABLE_ROOT_OPTS) diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 2b9a6e0e..32214fba 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -41,8 +41,10 @@ def _gen_fingerprint(b64_text, hash_meth='md5'): hasher = hashlib.new(hash_meth) hasher.update(base64.b64decode(b64_text)) return ":".join(_split_hash(hasher.hexdigest())) - except TypeError: + except (TypeError, ValueError): # Raised when b64 not really b64... + # or when the hash type is not really + # a known/supported hash type... return '?' @@ -95,4 +97,5 @@ def handle(name, cfg, cloud, log, _args): (users, _groups) = distros.normalize_users_groups(cfg, cloud.distro) for (user_name, _cfg) in users.items(): (auth_key_fn, auth_key_entries) = extract_func(user_name, cloud.paths) - _pprint_key_entries(user_name, auth_key_fn, auth_key_entries, hash_meth) + _pprint_key_entries(user_name, auth_key_fn, + auth_key_entries, hash_meth) diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py index 08fb63c6..a781cd7c 100644 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -18,6 +18,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from cloudinit import distros as ds from cloudinit import util import pwd @@ -39,33 +40,27 @@ def handle(_name, cfg, cloud, log, args): return # import for cloudinit created users + (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) elist = [] - for user_cfg in cfg['users']: - user = None + for (user, user_cfg) in users.items(): import_ids = [] - - if isinstance(user_cfg, str) and user_cfg == "default": - user = cloud.distro.get_default_user() - if not user: - continue - + if user_cfg['default']: import_ids = util.get_cfg_option_list(cfg, "ssh_import_id", []) - - elif isinstance(user_cfg, dict): - user = None - import_ids = [] - + else: try: - user = user_cfg['name'] import_ids = user_cfg['ssh_import_id'] - - if import_ids and isinstance(import_ids, str): - import_ids = str(import_ids).split(',') - except: - log.debug("user %s is not configured for ssh_import" % user) + log.debug("User %s is not configured for ssh_import_id", user) continue + try: + import_ids = util.uniq_merge(import_ids) + import_ids = [str(i) for i in import_ids] + except: + log.debug("User %s is not correctly configured for ssh_import_id", + user) + continue + if not len(import_ids): continue diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index 464f55c3..da587fb3 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -16,16 +16,15 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from cloudinit import distros -from cloudinit import util +from cloudinit import distros as ds from cloudinit.settings import PER_INSTANCE frequency = PER_INSTANCE -def handle(name, cfg, cloud, log, _args): - (users, groups) = distros.normalize_users_groups(cfg, cloud.distro) +def handle(name, cfg, cloud, _log, _args): + (users, groups) = ds.normalize_users_groups(cfg, cloud.distro) for (name, members) in groups.items(): cloud.distro.create_group(name, members) for (user, config) in users.items(): diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index f07ba3fa..6b458d06 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -24,6 +24,7 @@ from StringIO import StringIO import abc +import itertools import os import re @@ -187,8 +188,10 @@ class Distro(object): 'gecos': "%s" % (self.default_user.title()), 'sudo': "ALL=(ALL) NOPASSWD:ALL", } - if self.default_user_groups: - user_cfg['groups'] = _uniq_merge_sorted(self.default_user_groups) + def_groups = self.default_user_groups + if not def_groups: + def_groups = [] + user_cfg['groups'] = util.uniq_merge_sorted(def_groups) return user_cfg def create_user(self, name, **kwargs): @@ -397,39 +400,27 @@ def _get_arch_package_mirror_info(package_mirrors, arch): return default -def _uniq_merge_sorted(*lists): - return sorted(_uniq_merge(*lists)) - - -def _uniq_merge(*lists): - combined_list = [] - for a_list in lists: - if isinstance(a_list, (str, basestring)): - a_list = a_list.strip().split(",") - else: - a_list = [str(a) for a in a_list] - a_list = [a.strip() for a in a_list if a.strip()] - combined_list.extend(a_list) - uniq_list = [] - for a in combined_list: - if a in uniq_list: - continue - else: - uniq_list.append(a) - return uniq_list - - +# Normalizes a input group configuration +# which can be a comma seperated list of +# group names, or a list of group names +# or a python dictionary of group names +# to a list of members of that group. +# +# The output is a dictionary of group +# names => members of that group which +# is the standard form used in the rest +# of cloud-init def _normalize_groups(grp_cfg): if isinstance(grp_cfg, (str, basestring, list)): c_grp_cfg = {} - for i in _uniq_merge(grp_cfg): + for i in util.uniq_merge(grp_cfg): c_grp_cfg[i] = [] grp_cfg = c_grp_cfg groups = {} if isinstance(grp_cfg, (dict)): for (grp_name, grp_members) in grp_cfg.items(): - groups[grp_name] = _uniq_merge_sorted(grp_members) + groups[grp_name] = util.uniq_merge_sorted(grp_members) else: raise TypeError(("Group config must be list, dict " " or string types only and not %s") % @@ -437,6 +428,21 @@ def _normalize_groups(grp_cfg): return groups +# Normalizes a input group configuration +# which can be a comma seperated list of +# user names, or a list of string user names +# or a list of dictionaries with components +# that define the user config + 'name' (if +# a 'name' field does not exist then the +# default user is assumed to 'own' that +# configuration. +# +# The output is a dictionary of user +# names => user config which is the standard +# form used in the rest of cloud-init. Note +# the default user will have a special config +# entry 'default' which will be marked as true +# all other users will be marked as false. def _normalize_users(u_cfg, def_user_cfg=None): if isinstance(u_cfg, (dict)): ad_ucfg = [] @@ -452,12 +458,12 @@ def _normalize_users(u_cfg, def_user_cfg=None): " for key %s") % (util.obj_name(v), k)) u_cfg = ad_ucfg elif isinstance(u_cfg, (str, basestring)): - u_cfg = _uniq_merge_sorted(u_cfg) + u_cfg = util.uniq_merge_sorted(u_cfg) users = {} for user_config in u_cfg: if isinstance(user_config, (str, basestring, list)): - for u in _uniq_merge(user_config): + for u in util.uniq_merge(user_config): if u and u not in users: users[u] = {} elif isinstance(user_config, (dict)): @@ -490,22 +496,59 @@ def _normalize_users(u_cfg, def_user_cfg=None): # Fixup the default user into the real # default user name and replace it... + def_user = None if users and 'default' in users: def_config = users.pop('default') if def_user_cfg: + # Pickup what the default 'real name' is + # and any groups that are provided by the + # default config def_user = def_user_cfg.pop('name') def_groups = def_user_cfg.pop('groups', []) + # Pickup any config + groups for that user name + # that we may have previously extracted parsed_config = users.pop(def_user, {}) - users_groups = _uniq_merge_sorted(parsed_config.get('groups', []), - def_groups) + parsed_groups = parsed_config.get('groups', []) + # Now merge our extracted groups with + # anything the default config provided + users_groups = util.uniq_merge_sorted(parsed_groups, def_groups) parsed_config['groups'] = ",".join(users_groups) + # The real config for the default user is the + # combination of the default user config provided + # by the distro, the default user config provided + # by the above merging for the user 'default' and + # then the parsed config from the user's 'real name' + # which does not have to be 'default' (but could be) users[def_user] = util.mergemanydict([def_user_cfg, def_config, parsed_config]) + # Ensure that only the default user that we + # found (if any) is actually marked as being + # the default user + if users: + for (uname, uconfig) in users.items(): + if def_user and uname == def_user: + uconfig['default'] = True + else: + uconfig['default'] = False + return users +# Normalizes a set of user/users and group +# dictionary configuration into a useable +# format that the rest of cloud-init can +# understand using the default user +# provided by the input distrobution (if any) +# to allow for mapping of the 'default' user. +# +# Output is a dictionary of group names -> [member] (list) +# and a dictionary of user names -> user configuration (dict) +# +# If 'user' exists it will override +# the 'users'[0] entry (if a list) otherwise it will +# just become an entry in the returned dictionary (no override) def normalize_users_groups(cfg, distro): if not cfg: cfg = {} @@ -547,6 +590,33 @@ def normalize_users_groups(cfg, distro): return (users, groups) +# Given a user dictionary config it will +# extract the default user name and user config +# from that list and return that tuple or +# return (None, None) if no default user is +# found in the given input +def extract_default(users, default_name=None, default_config=None): + if not users: + users = {} + + def safe_find(entry): + config = entry[1] + if not config or 'default' not in config: + return False + else: + return config['default'] + + tmp_users = users.items() + tmp_users = dict(itertools.ifilter(safe_find, tmp_users)) + if not tmp_users: + return (default_name, default_config) + else: + name = tmp_users.keys()[0] + config = tmp_users[name] + config.pop('default', None) + return (name, config) + + def fetch(name): locs = importer.find_module(name, ['', __name__], diff --git a/cloudinit/util.py b/cloudinit/util.py index 94b17dfa..184b37a4 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -248,6 +248,36 @@ def read_conf(fname): raise +# Merges X lists, and then keeps the +# unique ones, but orders by sort order +# instead of by the original order +def uniq_merge_sorted(*lists): + return sorted(uniq_merge(*lists)) + + +# Merges X lists and then iterates over those +# and only keeps the unique items (order preserving) +# and returns that merged and uniqued list as the +# final result. +# +# Note: if any entry is a string it will be +# split on commas and empty entries will be +# evicted and merged in accordingly. +def uniq_merge(*lists): + combined_list = [] + for a_list in lists: + if isinstance(a_list, (str, basestring)): + a_list = a_list.strip().split(",") + # Kickout the empty ones + a_list = [a for a in a_list if len(a)] + combined_list.extend(a_list) + uniq_list = [] + for i in combined_list: + if i not in uniq_list: + uniq_list.append(i) + return uniq_list + + def clean_filename(fn): for (k, v) in FN_REPLACEMENTS.iteritems(): fn = fn.replace(k, v) diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index b319b673..9d6fb996 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -119,8 +119,8 @@ class TestUGNormalize(MockerTestCase): (users, _groups) = self._norm(ug_cfg, distro) self.assertIn('joe', users) self.assertIn('bob', users) - self.assertEquals({}, users['joe']) - self.assertEquals({}, users['bob']) + self.assertEquals({'default': False}, users['joe']) + self.assertEquals({'default': False}, users['bob']) def test_users_simple(self): distro = self._make_distro('ubuntu') @@ -133,8 +133,8 @@ class TestUGNormalize(MockerTestCase): (users, _groups) = self._norm(ug_cfg, distro) self.assertIn('joe', users) self.assertIn('bob', users) - self.assertEquals({}, users['joe']) - self.assertEquals({}, users['bob']) + self.assertEquals({'default': False}, users['joe']) + self.assertEquals({'default': False}, users['bob']) def test_users_old_user(self): distro = self._make_distro('ubuntu', 'bob') @@ -179,8 +179,7 @@ class TestUGNormalize(MockerTestCase): } (users, _groups) = self._norm(ug_cfg, distro) self.assertIn('zetta', users) - ug_cfg = { - } + ug_cfg = {} (users, groups) = self._norm(ug_cfg, distro) self.assertEquals({}, users) self.assertEquals({}, groups) @@ -198,6 +197,35 @@ class TestUGNormalize(MockerTestCase): users['bob']['groups']) self.assertEquals(True, users['bob']['blah']) + self.assertEquals(True, + users['bob']['default']) + + def test_users_dict_extract(self): + distro = self._make_distro('ubuntu', 'bob') + ug_cfg = { + 'users': [ + 'default', + ], + } + (users, _groups) = self._norm(ug_cfg, distro) + self.assertIn('bob', users) + (name, config) = distros.extract_default(users) + self.assertEquals(name, 'bob') + expected_config = {} + def_config = None + try: + def_config = distro.get_default_user() + except NotImplementedError: + pass + if not def_config: + def_config = {} + expected_config.update(def_config) + + # Ignore these for now + expected_config.pop('name', None) + expected_config.pop('groups', None) + config.pop('groups', None) + self.assertEquals(config, expected_config) def test_users_dict_default(self): distro = self._make_distro('ubuntu', 'bob') @@ -210,6 +238,8 @@ class TestUGNormalize(MockerTestCase): self.assertIn('bob', users) self.assertEquals(",".join(distro.get_default_user()['groups']), users['bob']['groups']) + self.assertEquals(True, + users['bob']['default']) def test_users_dict_trans(self): distro = self._make_distro('ubuntu') @@ -223,8 +253,8 @@ class TestUGNormalize(MockerTestCase): (users, _groups) = self._norm(ug_cfg, distro) self.assertIn('joe', users) self.assertIn('bob', users) - self.assertEquals({'tr_me': True}, users['joe']) - self.assertEquals({}, users['bob']) + self.assertEquals({'tr_me': True, 'default': False}, users['joe']) + self.assertEquals({'default': False}, users['bob']) def test_users_dict(self): distro = self._make_distro('ubuntu') @@ -237,5 +267,5 @@ class TestUGNormalize(MockerTestCase): (users, _groups) = self._norm(ug_cfg, distro) self.assertIn('joe', users) self.assertIn('bob', users) - self.assertEquals({}, users['joe']) - self.assertEquals({}, users['bob']) + self.assertEquals({'default': False}, users['joe']) + self.assertEquals({'default': False}, users['bob']) -- cgit v1.2.3 From d285a0463b6d16487eb5859373ccfd27eaec8b90 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 28 Sep 2012 16:54:22 -0400 Subject: make DataSourceMAAS 'main()' use load_yaml --- cloudinit/sources/DataSourceMAAS.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 581e9a4b..c172150b 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -338,7 +338,7 @@ if __name__ == "__main__": if args.config: import yaml with open(args.config) as fp: - cfg = yaml.safe_load(fp) + cfg = util.load_yaml(fp.read()) if 'datasource' in cfg: cfg = cfg['datasource']['MAAS'] for key in creds.keys(): -- cgit v1.2.3 From 7ab25c779433a614a6e4101ddfe852fc25f39c01 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 28 Sep 2012 14:06:22 -0700 Subject: Add a comment as to why distros can't be imported without being renamed due to previous usage of the attribute 'distros' --- cloudinit/config/cc_byobu.py | 4 ++++ cloudinit/config/cc_set_passwords.py | 4 ++++ cloudinit/config/cc_ssh.py | 4 ++++ cloudinit/config/cc_ssh_authkey_fingerprints.py | 8 ++++++-- cloudinit/config/cc_ssh_import_id.py | 4 ++++ cloudinit/config/cc_users_groups.py | 3 +++ 6 files changed, 25 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py index e1ec5af5..e38fccdd 100644 --- a/cloudinit/config/cc_byobu.py +++ b/cloudinit/config/cc_byobu.py @@ -18,7 +18,11 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +# Ensure this is aliased to a name not 'distros' +# since the module attribute 'distros' +# is a list of distros that are supported, not a sub-module from cloudinit import distros as ds + from cloudinit import util distros = ['ubuntu', 'debian'] diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index bb95f948..26c558ad 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -20,7 +20,11 @@ import sys +# Ensure this is aliased to a name not 'distros' +# since the module attribute 'distros' +# is a list of distros that are supported, not a sub-module from cloudinit import distros as ds + from cloudinit import ssh_util from cloudinit import util diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index c2ee4635..32e48c30 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -21,7 +21,11 @@ import glob import os +# Ensure this is aliased to a name not 'distros' +# since the module attribute 'distros' +# is a list of distros that are supported, not a sub-module from cloudinit import distros as ds + from cloudinit import ssh_util from cloudinit import util diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 32214fba..8c9a8806 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -21,7 +21,11 @@ import hashlib from prettytable import PrettyTable -from cloudinit import distros +# Ensure this is aliased to a name not 'distros' +# since the module attribute 'distros' +# is a list of distros that are supported, not a sub-module +from cloudinit import distros as ds + from cloudinit import ssh_util from cloudinit import util @@ -94,7 +98,7 @@ def handle(name, cfg, cloud, log, _args): hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") extract_func = ssh_util.extract_authorized_keys - (users, _groups) = distros.normalize_users_groups(cfg, cloud.distro) + (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) for (user_name, _cfg) in users.items(): (auth_key_fn, auth_key_entries) = extract_func(user_name, cloud.paths) _pprint_key_entries(user_name, auth_key_fn, diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py index a781cd7c..83af36e9 100644 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -18,7 +18,11 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +# Ensure this is aliased to a name not 'distros' +# since the module attribute 'distros' +# is a list of distros that are supported, not a sub-module from cloudinit import distros as ds + from cloudinit import util import pwd diff --git a/cloudinit/config/cc_users_groups.py b/cloudinit/config/cc_users_groups.py index da587fb3..bf5b4581 100644 --- a/cloudinit/config/cc_users_groups.py +++ b/cloudinit/config/cc_users_groups.py @@ -16,6 +16,9 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +# Ensure this is aliased to a name not 'distros' +# since the module attribute 'distros' +# is a list of distros that are supported, not a sub-module from cloudinit import distros as ds from cloudinit.settings import PER_INSTANCE -- cgit v1.2.3 From 5233b6edb70702476463b47c06cb02b3c7f74c51 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 28 Sep 2012 14:17:42 -0700 Subject: Make byobu more tolerant of the user not being located and warn when it is not found + only run the shell command when actual contents exist to run. --- cloudinit/config/cc_byobu.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py index e38fccdd..92d428b7 100644 --- a/cloudinit/config/cc_byobu.py +++ b/cloudinit/config/cc_byobu.py @@ -62,16 +62,19 @@ def handle(name, cfg, cloud, log, args): shcmd = "" if mod_user: (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) - (user, _user_config) = ds.extract_default(users, 'ubuntu') - shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst) - shcmd += " || X=$(($X+1)); " + (user, _user_config) = ds.extract_default(users) + if not user: + log.warn(("No default byobu user provided, " + "can not launch %s for the default user"), bl_inst) + else: + shcmd += " sudo -Hu \"%s\" byobu-launcher-%s" % (user, bl_inst) + shcmd += " || X=$(($X+1)); " if mod_sys: shcmd += "echo \"%s\" | debconf-set-selections" % dc_val shcmd += " && dpkg-reconfigure byobu --frontend=noninteractive" shcmd += " || X=$(($X+1)); " - cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")] - - log.debug("Setting byobu to %s", value) - - util.subp(cmd, capture=False) + if len(shcmd): + cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")] + log.debug("Setting byobu to %s", value) + util.subp(cmd, capture=False) -- cgit v1.2.3 From f255d068c5d4251762b83467d1927ab72da57482 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 28 Sep 2012 18:39:46 -0700 Subject: Ensure that the directory where the sudoers file is being added actually exists before it is written into and ensure that the directory is included in the main sudoers file. --- cloudinit/distros/__init__.py | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 86ab557c..11422644 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -296,6 +296,38 @@ class Distro(object): return True + def ensure_sudo_dir(self, path, sudo_base='/etc/sudoers'): + # Ensure the dir is included and that + # it actually exists as a directory + sudoers_contents = '' + if os.path.exists(sudo_base): + sudoers_contents = util.load_file(sudo_base) + found_include = False + for line in sudoers_contents.splitlines(): + line = line.strip() + mtch = re.search(r"#includedir\s+(.*)$", line) + if not mtch: + continue + included_dir = mtch.group(1).strip() + if not included_dir: + continue + included_dir = os.path.abspath(included_dir) + if included_dir == path: + found_include = True + break + if not found_include: + sudoers_contents += "\n#includedir %s\n" % (path) + try: + if not os.path.exists(sudo_base): + util.write_file(sudo_base, sudoers_contents, 0440) + else: + with open(sudo_base, 'a') as f: + f.write(sudoers_contents) + except IOError as e: + util.logexc(LOG, "Failed to write %s" % sudo_base, e) + raise e + util.ensure_dir(path, 0440) + def write_sudo_rules(self, user, rules, @@ -311,9 +343,10 @@ class Distro(object): content += "%s %s\n" % (user, rule) content += "\n" + self.ensure_sudo_dir(os.path.dirname(sudo_file)) + if not os.path.exists(sudo_file): util.write_file(sudo_file, content, 0440) - else: try: with open(sudo_file, 'a') as f: -- cgit v1.2.3 From 9e7320df7a6fb6f1e9a5401735c471467b1fe365 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 28 Sep 2012 20:32:00 -0700 Subject: Dir should be 0755, not 0440 --- cloudinit/distros/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 11422644..301eeed2 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -326,7 +326,7 @@ class Distro(object): except IOError as e: util.logexc(LOG, "Failed to write %s" % sudo_base, e) raise e - util.ensure_dir(path, 0440) + util.ensure_dir(path, 0755) def write_sudo_rules(self, user, -- cgit v1.2.3 From 9a252b3a41ad757587cba729b31079c04b253faa Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 28 Sep 2012 21:04:00 -0700 Subject: Update the log statement used here to be a little more relevant. --- cloudinit/distros/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 301eeed2..500147c6 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -248,7 +248,7 @@ class Distro(object): if util.is_user(name): LOG.warn("User %s already exists, skipping." % name) else: - LOG.debug("Creating name %s" % name) + LOG.debug("Adding user named %s", name) try: util.subp(adduser_cmd, logstring=x_adduser_cmd) except Exception as e: -- cgit v1.2.3 From a28d7fe46cf8e3277a13c35c5dd0185f65ab1d0c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sun, 30 Sep 2012 09:20:59 -0400 Subject: [pylint]: remove unused import --- cloudinit/sources/DataSourceMAAS.py | 1 - 1 file changed, 1 deletion(-) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index c172150b..ec52d775 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -336,7 +336,6 @@ if __name__ == "__main__": 'token_secret': args.tsec, 'consumer_secret': args.csec} if args.config: - import yaml with open(args.config) as fp: cfg = util.load_yaml(fp.read()) if 'datasource' in cfg: -- cgit v1.2.3 From e2b85e2087b796c7a130ee20f688dbd5d161739f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sun, 30 Sep 2012 13:47:00 -0700 Subject: Ensure that the include dir starts the line and is not a part of a comment or other part of the line. --- cloudinit/distros/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 500147c6..cdae1add 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -305,10 +305,10 @@ class Distro(object): found_include = False for line in sudoers_contents.splitlines(): line = line.strip() - mtch = re.search(r"#includedir\s+(.*)$", line) - if not mtch: + include_match = re.search(r"^#includedir\s+(.*)$", line) + if not include_match: continue - included_dir = mtch.group(1).strip() + included_dir = include_match.group(1).strip() if not included_dir: continue included_dir = os.path.abspath(included_dir) -- cgit v1.2.3 From f8b23b39bdf8753986df9ecf5948ffd8e8fdee74 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 1 Oct 2012 11:50:48 -0400 Subject: fix oauth time skew. actual implementation was returning 401 not 403. This fixes (tested) bug 978127. The server was actually returning a 401 not a 403. As such, the fix here was insufficient. This will now take either of those 2 error codes. I've also tested it by changing the clock in the cloud-init upstart job with a stanza like below, and verifying that we do see the problem and then it resolve itself: pre-start script offset="10 minutes ago" past=$(date -R --date "$offset") date --set "$past" && echo ===== "set date to $past [$offset]" ===== || echo ===== "failed to set date to $past [$offset]" ==== end script LP: #978127 --- ChangeLog | 4 ++++ cloudinit/sources/DataSourceMAAS.py | 4 ++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index c5dcd418..cbfba6d0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,8 @@ 0.7.0: + - add a 'exception_cb' argument to 'wait_for_url'. If provided, this + method will be called back with the exception received and the message. + - utilize the 'exception_cb' above to modify the oauth timestamp in + DataSourceMAAS requests if a 401 or 403 is received. (LP: #978127) - catch signals and exit rather than stack tracing - if logging fails, enable a fallback logger by patching the logging module - do not 'start networking' in cloud-init-nonet, but add diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index ec52d775..e187aec9 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -145,10 +145,10 @@ class DataSourceMAAS(sources.DataSource): def _except_cb(self, msg, exception): if not (isinstance(exception, urllib2.HTTPError) and - exception.code == 403): + (exception.code == 403 or exception.code == 401)): return if 'date' not in exception.headers: - LOG.warn("date field not in 403 headers") + LOG.warn("date field not in %d headers" % exception.code) return date = exception.headers['date'] -- cgit v1.2.3 From df00a64c662a916fab18602a294a0443545a1990 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 1 Oct 2012 13:50:56 -0400 Subject: fix tools/make-dist-tarball --- tools/make-dist-tarball | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/make-dist-tarball b/tools/make-dist-tarball index 622283bd..7742caea 100755 --- a/tools/make-dist-tarball +++ b/tools/make-dist-tarball @@ -9,7 +9,7 @@ Usage: ${0##*/} version EOF } -topdir="../$PWD" +topdir="$PWD" tag=${1} [ -n "$tag" ] || { Usage 1>&2 ; exit 1; } -- cgit v1.2.3 From d65ed7106072b9a882592de69ebfe36ba69d6fbc Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 4 Oct 2012 15:14:47 -0700 Subject: For the cloud-init rc.d ensure that the cloud-init-local mode is attempted as a hard requirement (instead of the previous soft requirement) which in the rhel5.6 case makes it not run in the right order. --- sysvinit/cloud-init | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sysvinit/cloud-init b/sysvinit/cloud-init index 4b44a615..f8ab5d5f 100755 --- a/sysvinit/cloud-init +++ b/sysvinit/cloud-init @@ -25,8 +25,8 @@ ### BEGIN INIT INFO # Provides: cloud-init -# Required-Start: $local_fs $network $named $remote_fs -# Should-Start: $time cloud-init-local +# Required-Start: $local_fs $network $named $remote_fs cloud-init-local +# Should-Start: $time # Required-Stop: # Should-Stop: # Default-Start: 3 5 -- cgit v1.2.3 From ab1b27294ce852a5d67b230971a1c28c99940e50 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 4 Oct 2012 17:32:26 -0700 Subject: Ensure that for config drive that we map 'hostname' to 'local-hostname' so that the modules work correctly with the cfgdrive style of data. --- cloudinit/sources/DataSourceConfigDrive.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index b8154367..b1cf942e 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -85,6 +85,16 @@ class DataSourceConfigDrive(sources.DataSource): md = results['metadata'] md = util.mergedict(md, DEFAULT_METADATA) + # Perform some metadata 'fixups' + # + # OpenStack uses the 'hostname' key + # while most of cloud-init uses the metadata + # 'local-hostname' key instead so if it doesn't + # exist we need to make sure its copied over. + for (tgt, src) in [('local-hostname', 'hostname')]: + if tgt not in md and src in md: + md[tgt] = md[src] + user_dsmode = results.get('dsmode', None) if user_dsmode not in VALID_DSMODES + (None,): LOG.warn("user specified invalid mode: %s" % user_dsmode) -- cgit v1.2.3 From 4c7de624659d4b6b3d073ef5c8e730dbac638135 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 5 Oct 2012 10:42:40 -0400 Subject: update ChangeLog for previous 2 commits. Note, I've marked this as fixing LP: #1061964, but clearly it does not. that was fixed under revision 680. LP: #1061964 --- ChangeLog | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ChangeLog b/ChangeLog index cbfba6d0..672482ba 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,6 @@ +0.7.1: + - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 + - config-drive: map hostname to local-hostname (LP: #1061964) 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. -- cgit v1.2.3 From f73abb10210e21da858b99238e2945e5bf1cfac2 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 5 Oct 2012 13:07:25 -0400 Subject: bump version to 0.7.1 --- cloudinit/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/version.py b/cloudinit/version.py index 4599910c..12ff620a 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -20,7 +20,7 @@ from distutils import version as vr def version(): - return vr.StrictVersion("0.7.0") + return vr.StrictVersion("0.7.1") def version_string(): -- cgit v1.2.3 From 130d90cc675db6f33d294bf04e9c74227c07db50 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 5 Oct 2012 11:22:31 -0700 Subject: Leave off creating fake headers for unknown versions since this causes rpmbuild to croak with errors like 'changelog not in descending chronological order'. --- packages/brpm | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/brpm b/packages/brpm index 1735f5bb..5aecad65 100755 --- a/packages/brpm +++ b/packages/brpm @@ -58,8 +58,7 @@ def get_log_header(version): a_rev = rev break if not a_rev: - return format_change_line(datetime.now(), - '??', version) + return None # Extract who made that tag as the header cmd = ['bzr', 'log', '-r%s' % (a_rev), '--timezone=utc'] @@ -133,7 +132,8 @@ def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn): if re.match(r"^\s*[\d][.][\d][.][\d]:\s*", line): line = line.strip(":") header = get_log_header(line) - changelog_lines.append(header) + if header: + changelog_lines.append(header) else: changelog_lines.append(line) subs['changelog'] = "\n".join(changelog_lines) -- cgit v1.2.3 From 6cfb1d869614e8ceee45c4443ec65603cce72b78 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 5 Oct 2012 11:30:36 -0700 Subject: Only allow the first missing version to use the datetime.now(), the others get a warning message. --- packages/brpm | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/packages/brpm b/packages/brpm index 5aecad65..28136d74 100755 --- a/packages/brpm +++ b/packages/brpm @@ -126,13 +126,22 @@ def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn): # Format a nice changelog (as best as we can) changelog = util.load_file(util.abs_join(find_root(), 'ChangeLog')) changelog_lines = [] + missing_versions = 0 for line in changelog.splitlines(): if not line.strip(): continue if re.match(r"^\s*[\d][.][\d][.][\d]:\s*", line): line = line.strip(":") header = get_log_header(line) - if header: + if not header: + missing_versions += 1 + if missing_versions == 1: + # Must be using a new 'dev'/'trunk' release + changelog_lines.append(format_change_line(datetime.now(), '??')) + else: + sys.stderr.write(("Changelog version line %s " + "does not have a corresponding tag!") % (line)) + else: changelog_lines.append(header) else: changelog_lines.append(line) -- cgit v1.2.3 From 7a2e678685dbc94e13f6d8ca6a5e92000e1f974f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 5 Oct 2012 11:31:56 -0700 Subject: Ensure stderr newline. --- packages/brpm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/brpm b/packages/brpm index 28136d74..7975e05c 100755 --- a/packages/brpm +++ b/packages/brpm @@ -140,7 +140,7 @@ def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn): changelog_lines.append(format_change_line(datetime.now(), '??')) else: sys.stderr.write(("Changelog version line %s " - "does not have a corresponding tag!") % (line)) + "does not have a corresponding tag!\n") % (line)) else: changelog_lines.append(header) else: -- cgit v1.2.3 From ffa19cbd80ba10453f0ef448c1c10dbcbf5be504 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 5 Oct 2012 13:38:54 -0700 Subject: Ensure that config drive datasource attempts to translate the device name to a actual device using logic that will try the ec2 metadata (if avail) or will try using 'blkid' to find a corresponding label. LP: #1062540 --- cloudinit/sources/DataSourceConfigDrive.py | 58 ++++++++++++++++++++++++++++++ cloudinit/sources/DataSourceEc2.py | 16 --------- cloudinit/sources/__init__.py | 17 +++++++++ 3 files changed, 75 insertions(+), 16 deletions(-) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index b1cf942e..495eee82 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -48,6 +48,7 @@ class DataSourceConfigDrive(sources.DataSource): self.dsmode = 'local' self.seed_dir = os.path.join(paths.seed_dir, 'config_drive') self.version = None + self.ec2_metadata = None def __str__(self): mstr = "%s [%s,ver=%s]" % (util.obj_name(self), self.dsmode, @@ -55,6 +56,62 @@ class DataSourceConfigDrive(sources.DataSource): mstr += "[source=%s]" % (self.source) return mstr + def _ec2_name_to_device(self, name): + if not self.ec2_metadata: + return None + bdm = self.ec2_metadata.get('block-device-mapping', {}) + for (ent_name, device) in bdm.items(): + if name == ent_name: + return device + return None + + def _os_name_to_device(self, name): + device = None + try: + dev_entries = util.find_devs_with('LABEL=%s' % (name)) + if dev_entries: + device = dev_entries[0] + except util.ProcessExecutionError: + pass + return device + + def device_name_to_device(self, name): + # Translate a 'name' to a 'physical' device + if not name: + return None + # Try the ec2 mapping first + names = [name] + if name == 'root': + names.insert(0, 'ami') + if name == 'ami': + names.append('root') + device = None + for n in names: + device = self._ec2_name_to_device(n) + if device: + break + # Try the openstack way second + if not device: + for n in names: + device = self._os_name_to_device(n) + if device: + break + # Ok give up... + if not device: + return None + # Ensure translated ok + if not device.startswith("/"): + device = "/dev/%s" % device + if os.path.exists(device): + return device + # Durn, try adjusting the mapping + remapped = self._remap_device(os.path.basename(device)) + if remapped: + LOG.debug("Remapped device name %s => %s", device, remapped) + return remapped + # Really give up now + return None + def get_data(self): found = None md = {} @@ -143,6 +200,7 @@ class DataSourceConfigDrive(sources.DataSource): self.source = found self.metadata = md + self.ec2_metadata = results.get('ec2-metadata') self.userdata_raw = results.get('userdata') self.version = results['cfgdrive_ver'] diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index c7ad6d54..3686fa10 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -151,22 +151,6 @@ class DataSourceEc2(sources.DataSource): self.metadata_address = url2base.get(url) return bool(url) - def _remap_device(self, short_name): - # LP: #611137 - # the metadata service may believe that devices are named 'sda' - # when the kernel named them 'vda' or 'xvda' - # we want to return the correct value for what will actually - # exist in this instance - mappings = {"sd": ("vd", "xvd")} - for (nfrom, tlist) in mappings.iteritems(): - if not short_name.startswith(nfrom): - continue - for nto in tlist: - cand = "/dev/%s%s" % (nto, short_name[len(nfrom):]) - if os.path.exists(cand): - return cand - return None - def device_name_to_device(self, name): # Consult metadata service, that has # ephemeral0: sdb diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 04083d0c..b22369a8 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -23,6 +23,7 @@ from email.mime.multipart import MIMEMultipart import abc +import os from cloudinit import importer from cloudinit import log as logging @@ -128,6 +129,22 @@ class DataSource(object): return keys + def _remap_device(self, short_name): + # LP: #611137 + # the metadata service may believe that devices are named 'sda' + # when the kernel named them 'vda' or 'xvda' + # we want to return the correct value for what will actually + # exist in this instance + mappings = {"sd": ("vd", "xvd")} + for (nfrom, tlist) in mappings.iteritems(): + if not short_name.startswith(nfrom): + continue + for nto in tlist: + cand = "/dev/%s%s" % (nto, short_name[len(nfrom):]) + if os.path.exists(cand): + return cand + return None + def device_name_to_device(self, _name): # translate a 'name' to a device # the primary function at this point is on ec2 -- cgit v1.2.3 From 5b4fa81016f487fb6e041cef5a3b4ac0bd0863c5 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 5 Oct 2012 14:50:22 -0700 Subject: Add tests to show that the assigned bug is fixed. Also fix the extraction of the metadata key name since it actually uses 'dashes' instead of being a single word. --- cloudinit/sources/DataSourceConfigDrive.py | 2 +- .../unittests/test_datasource/test_configdrive.py | 147 +++++++++++++++++++-- 2 files changed, 140 insertions(+), 9 deletions(-) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 495eee82..eebe44ec 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -285,7 +285,7 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"): ('metadata', "openstack/%s/meta_data.json" % version, True, json.loads), ('userdata', "openstack/%s/user_data" % version, False, None), - ('ec2-metadata', "ec2/latest/metadata.json", False, json.loads), + ('ec2-metadata', "ec2/latest/meta-data.json", False, json.loads), ) results = {'userdata': None} diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 55573114..99936d92 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -4,10 +4,15 @@ import os import os.path import shutil import tempfile -from unittest import TestCase + +import mocker +from mocker import MockerTestCase from cloudinit.sources import DataSourceConfigDrive as ds +from cloudinit import settings from cloudinit import util +from cloudinit import helpers + PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' @@ -60,17 +65,143 @@ CFG_DRIVE_FILES_V2 = { 'openstack/latest/user_data': USER_DATA} -class TestConfigDriveDataSource(TestCase): +class TestConfigDriveDataSource(MockerTestCase): def setUp(self): super(TestConfigDriveDataSource, self).setUp() - self.tmp = tempfile.mkdtemp() + self.tmp = self.makeDir() - def tearDown(self): - try: - shutil.rmtree(self.tmp) - except OSError: - pass + def test_ec2_metadata(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive_dir(self.tmp) + self.assertTrue('ec2-metadata' in found) + ec2_md = found['ec2-metadata'] + self.assertEqual(EC2_META, ec2_md) + + def test_dev_os_remap(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive_dir(self.tmp) + cfg_ds.metadata = found['metadata'] + name_tests = { + 'ami': '/dev/vda1', + 'root': '/dev/vda1', + 'ephemeral0': '/dev/vda2', + 'swap': '/dev/vda3', + } + for name, dev_name in name_tests.items(): + my_mock = mocker.Mocker() + find_mock = my_mock.replace(util.find_devs_with, + spec=False, passthrough=False) + provided_name = dev_name[len('/dev/'):] + provided_name = "s" + provided_name[1:] + find_mock(mocker.ARGS) + my_mock.result([provided_name]) + exists_mock = my_mock.replace(os.path.exists, + spec=False, passthrough=False) + exists_mock(mocker.ARGS) + my_mock.result(False) + exists_mock(mocker.ARGS) + my_mock.result(True) + my_mock.replay() + device = cfg_ds.device_name_to_device(name) + my_mock.restore() + self.assertEquals(dev_name, device) + + def test_dev_os_map(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive_dir(self.tmp) + os_md = found['metadata'] + cfg_ds.metadata = os_md + name_tests = { + 'ami': '/dev/vda1', + 'root': '/dev/vda1', + 'ephemeral0': '/dev/vda2', + 'swap': '/dev/vda3', + } + for name, dev_name in name_tests.items(): + my_mock = mocker.Mocker() + find_mock = my_mock.replace(util.find_devs_with, + spec=False, passthrough=False) + find_mock(mocker.ARGS) + my_mock.result([dev_name]) + exists_mock = my_mock.replace(os.path.exists, + spec=False, passthrough=False) + exists_mock(mocker.ARGS) + my_mock.result(True) + my_mock.replay() + device = cfg_ds.device_name_to_device(name) + my_mock.restore() + self.assertEquals(dev_name, device) + + def test_dev_ec2_remap(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive_dir(self.tmp) + ec2_md = found['ec2-metadata'] + os_md = found['metadata'] + cfg_ds.ec2_metadata = ec2_md + cfg_ds.metadata = os_md + name_tests = { + 'ami': '/dev/vda1', + 'root': '/dev/vda1', + 'ephemeral0': '/dev/vda2', + 'swap': '/dev/vda3', + None: None, + 'bob': None, + 'root2k': None, + } + for name, dev_name in name_tests.items(): + my_mock = mocker.Mocker() + exists_mock = my_mock.replace(os.path.exists, + spec=False, passthrough=False) + exists_mock(mocker.ARGS) + my_mock.result(False) + exists_mock(mocker.ARGS) + my_mock.result(True) + my_mock.replay() + device = cfg_ds.device_name_to_device(name) + self.assertEquals(dev_name, device) + my_mock.restore() + + def test_dev_ec2_map(self): + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, + None, + helpers.Paths({})) + found = ds.read_config_drive_dir(self.tmp) + exists_mock = self.mocker.replace(os.path.exists, + spec=False, passthrough=False) + exists_mock(mocker.ARGS) + self.mocker.count(0, None) + self.mocker.result(True) + self.mocker.replay() + ec2_md = found['ec2-metadata'] + os_md = found['metadata'] + cfg_ds.ec2_metadata = ec2_md + cfg_ds.metadata = os_md + name_tests = { + 'ami': '/dev/sda1', + 'root': '/dev/sda1', + 'ephemeral0': '/dev/sda2', + 'swap': '/dev/sda3', + None: None, + 'bob': None, + 'root2k': None, + } + for name, dev_name in name_tests.items(): + device = cfg_ds.device_name_to_device(name) + self.assertEquals(dev_name, device) def test_dir_valid(self): """Verify a dir is read as such.""" -- cgit v1.2.3 From f510d8f5762f3c9d27afcc57f63d7614ec6c05cd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 5 Oct 2012 15:43:54 -0700 Subject: Add checks around the device names that are found to ensure that even if they are found that they are also valid, before they are assumed to be the correct device name. --- cloudinit/sources/DataSourceConfigDrive.py | 36 ++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index eebe44ec..4af2e5ae 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -68,13 +68,30 @@ class DataSourceConfigDrive(sources.DataSource): def _os_name_to_device(self, name): device = None try: - dev_entries = util.find_devs_with('LABEL=%s' % (name)) + criteria = 'LABEL=%s' % (name) + if name in ['swap']: + criteria = 'TYPE=%s' % (name) + dev_entries = util.find_devs_with(criteria) if dev_entries: device = dev_entries[0] except util.ProcessExecutionError: pass return device + def _validate_device_name(self, device): + if not device: + return None + if not device.startswith("/"): + device = "/dev/%s" % device + if os.path.exists(device): + return device + # Durn, try adjusting the mapping + remapped = self._remap_device(os.path.basename(device)) + if remapped: + LOG.debug("Remapped device name %s => %s", device, remapped) + return remapped + return None + def device_name_to_device(self, name): # Translate a 'name' to a 'physical' device if not name: @@ -86,31 +103,26 @@ class DataSourceConfigDrive(sources.DataSource): if name == 'ami': names.append('root') device = None + LOG.debug("Using ec2 metadata lookup to find device %s", names) for n in names: device = self._ec2_name_to_device(n) + device = self._validate_device_name(device) if device: break # Try the openstack way second if not device: + LOG.debug("Using os lookup to find device %s", names) for n in names: device = self._os_name_to_device(n) + device = self._validate_device_name(device) if device: break # Ok give up... if not device: return None - # Ensure translated ok - if not device.startswith("/"): - device = "/dev/%s" % device - if os.path.exists(device): + else: + LOG.debug("Using cfg drive lookup mapped to device %s", device) return device - # Durn, try adjusting the mapping - remapped = self._remap_device(os.path.basename(device)) - if remapped: - LOG.debug("Remapped device name %s => %s", device, remapped) - return remapped - # Really give up now - return None def get_data(self): found = None -- cgit v1.2.3 From 40a54a4f4a486bc196ee0eac53ef630c828aef8e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 5 Oct 2012 15:46:20 -0700 Subject: Pylint cleanups. --- tests/unittests/test_datasource/test_configdrive.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 99936d92..4fa13db8 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -2,8 +2,6 @@ from copy import copy import json import os import os.path -import shutil -import tempfile import mocker from mocker import MockerTestCase @@ -73,9 +71,6 @@ class TestConfigDriveDataSource(MockerTestCase): def test_ec2_metadata(self): populate_dir(self.tmp, CFG_DRIVE_FILES_V2) - cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, - None, - helpers.Paths({})) found = ds.read_config_drive_dir(self.tmp) self.assertTrue('ec2-metadata' in found) ec2_md = found['ec2-metadata'] -- cgit v1.2.3 From aae71d04bf042ac20f9027a43f5b269a6f7c19fc Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 8 Oct 2012 16:23:09 -0700 Subject: Add the ability to have a 'private' release number which can be to increment cloud-init while still maintaining the 'major' cloud-init version number from bzr. --- packages/brpm | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/packages/brpm b/packages/brpm index 7975e05c..fec6acd8 100755 --- a/packages/brpm +++ b/packages/brpm @@ -106,6 +106,8 @@ def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn): subs['version'] = version subs['revno'] = revno subs['release'] = "bzr%s" % (revno) + if args.sub_release is not None: + subs['release'] += ".%s" % (args.sub_release) subs['archive_name'] = arc_fn cmd = [util.abs_join(find_root(), 'tools', 'read-dependencies')] @@ -175,6 +177,13 @@ def main(): " (default: %(default)s)"), default=False, action='store_true') + parser.add_argument('-s', "--sub-release", dest="sub_release", + metavar="RELEASE", + help=("a 'internal' release number to concat" + " with the bzr version number to form" + " the final version number"), + type=int, + default=None) parser.add_argument("-p", "--patch", dest="patches", help=("include the following patch when building"), default=[], -- cgit v1.2.3 From 8b7b89a7aa0e1110ef8eb6ab46160dbc0a646019 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 8 Oct 2012 16:33:43 -0700 Subject: Make the subrelease a new k/v entry that is handled differently in the rpm spec file template instead of being joined with the release. --- packages/brpm | 4 +++- packages/redhat/cloud-init.spec.in | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/packages/brpm b/packages/brpm index fec6acd8..e6b03609 100755 --- a/packages/brpm +++ b/packages/brpm @@ -107,7 +107,9 @@ def generate_spec_contents(args, tmpl_fn, top_dir, arc_fn): subs['revno'] = revno subs['release'] = "bzr%s" % (revno) if args.sub_release is not None: - subs['release'] += ".%s" % (args.sub_release) + subs['subrelease'] = "." + str(args.sub_release) + else: + subs['subrelease'] = '' subs['archive_name'] = arc_fn cmd = [util.abs_join(find_root(), 'tools', 'read-dependencies')] diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in index 12f0b144..30bcd050 100644 --- a/packages/redhat/cloud-init.spec.in +++ b/packages/redhat/cloud-init.spec.in @@ -11,7 +11,7 @@ Name: cloud-init Version: ${version} -Release: ${release}%{?dist} +Release: ${release}${subrelease}%{?dist} Summary: Cloud instance init scripts Group: System Environment/Base -- cgit v1.2.3 From e59ce224c45191a0ecceb113c9e5b8e6d9dcdab1 Mon Sep 17 00:00:00 2001 From: Avishai Ish-Shalom Date: Wed, 10 Oct 2012 18:27:28 +0200 Subject: Fixed typo on dict value reference LP: #1065116 --- cloudinit/user_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index 803ffc3a..58827e3d 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -224,7 +224,7 @@ class UserDataProcessor(object): for header in list(ent.keys()): if header in ('content', 'filename', 'type', 'launch-index'): continue - msg.add_header(header, ent['header']) + msg.add_header(header, ent[header]) self._attach_part(append_msg, msg) -- cgit v1.2.3 From 80eb53deb9f80694c5fd0e0190197de1ff496716 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 10 Oct 2012 16:21:22 -0700 Subject: System config niceness! 1. Move out the old helpers that provided oop access/reading/writing to various standard conf files and place those in parsers instead. 2. Unify the 'update_hostname' which varied very little between distros and make it generic so that subclasses can only provide a couple of functions to obtain the hostname updating functionality 3. Implement that new set of functions in rhel/debian 4. Use the new parsers chop_comment function for similar use cases as well as add a new utils make header function that can be used for configuration files that are newly generated to use (less duplication here of this same thing being done in multiple places. 5. Add in a distro '_apply_hostname' which calls out to the 'hostname' program to set the system hostname (more duplication elimination). 6. Make the 'constant' filenames being written to for configuration by the various distros be instance members instead of string constants 'sprinkled' throughout the code --- cloudinit/distros/__init__.py | 89 ++++++--- cloudinit/distros/debian.py | 88 ++++----- cloudinit/distros/helpers.py | 252 ------------------------- cloudinit/distros/parsers/__init__.py | 27 +++ cloudinit/distros/parsers/hosts.py | 92 +++++++++ cloudinit/distros/parsers/quoting_conf.py | 80 ++++++++ cloudinit/distros/parsers/resolv_conf.py | 171 +++++++++++++++++ cloudinit/distros/rhel.py | 149 ++++----------- cloudinit/util.py | 15 +- tests/unittests/test_distros/test_hosts.py | 8 +- tests/unittests/test_distros/test_netconfig.py | 2 +- tests/unittests/test_distros/test_resolv.py | 14 +- 12 files changed, 532 insertions(+), 455 deletions(-) delete mode 100644 cloudinit/distros/helpers.py create mode 100644 cloudinit/distros/parsers/__init__.py create mode 100644 cloudinit/distros/parsers/hosts.py create mode 100644 cloudinit/distros/parsers/quoting_conf.py create mode 100644 cloudinit/distros/parsers/resolv_conf.py diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index a5d41bdb..07f03159 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -33,7 +33,7 @@ from cloudinit import log as logging from cloudinit import ssh_util from cloudinit import util -from cloudinit.distros import helpers +from cloudinit.distros.parsers import hosts LOG = logging.getLogger(__name__) @@ -43,6 +43,8 @@ class Distro(object): __metaclass__ = abc.ABCMeta default_user = None default_user_groups = None + hosts_fn = "/etc/hosts" + ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users" def __init__(self, name, cfg, paths): self._paths = paths @@ -66,10 +68,6 @@ class Distro(object): def set_hostname(self, hostname): raise NotImplementedError() - @abc.abstractmethod - def update_hostname(self, hostname, prev_hostname_fn): - raise NotImplementedError() - @abc.abstractmethod def package_command(self, cmd, args=None): raise NotImplementedError() @@ -117,14 +115,62 @@ class Distro(object): def _get_localhost_ip(self): return "127.0.0.1" + @abc.abstractmethod + def _read_hostname(self, filename, default=None): + raise NotImplementedError() + + @abc.abstractmethod + def _write_hostname(self, hostname, filename): + raise NotImplementedError() + + @abc.abstractmethod + def _read_system_hostname(self): + raise NotImplementedError() + + def _apply_hostname(self, hostname): + LOG.debug("Setting system hostname to %s", hostname) + util.subp(['hostname', hostname]) + + def update_hostname(self, hostname, prev_hostname_fn): + if not hostname: + return + + prev_hostname = self._read_hostname(prev_hostname_fn) + (sys_fn, sys_hostname) = self._read_system_hostname() + update_files = [] + if not prev_hostname or prev_hostname != hostname: + update_files.append(prev_hostname_fn) + + if (not sys_hostname) or (sys_hostname == prev_hostname + and sys_hostname != hostname): + update_files.append(sys_fn) + + update_files = set([f for f in update_files if f]) + LOG.debug("Attempting to update hostname to %s in %s files", + hostname, len(update_files)) + + for fn in update_files: + try: + self._write_hostname(hostname, fn) + except IOError: + util.logexc(LOG, "Failed to write hostname %s to %s", + hostname, fn) + + if (sys_hostname and prev_hostname and + sys_hostname != prev_hostname): + LOG.debug("%s differs from %s, assuming user maintained hostname.", + prev_hostname_fn, sys_fn) + + if sys_fn in update_files: + self._apply_hostname(hostname) + def update_etc_hosts(self, hostname, fqdn): header = '' - if os.path.exists('/etc/hosts'): - eh = helpers.HostsConf(util.load_file("/etc/hosts")) + if os.path.exists(self.hosts_fn): + eh = hosts.HostsConf(util.load_file(self.hosts_fn)) else: - eh = helpers.HostsConf('') - header = "# Added by cloud-init" - header = "%s on %s" % (header, util.time_rfc2822()) + eh = hosts.HostsConf('') + header = util.make_header(base="added") local_ip = self._get_localhost_ip() prev_info = eh.get_entry(local_ip) need_change = False @@ -154,7 +200,7 @@ class Distro(object): if header: contents.write("%s\n" % (header)) contents.write("%s\n" % (eh)) - util.write_file("/etc/hosts", contents.getvalue(), mode=0644) + util.write_file(self.hosts_fn, contents.getvalue(), mode=0644) def _bring_up_interface(self, device_name): cmd = ['ifup', device_name] @@ -302,30 +348,31 @@ class Distro(object): return True - def write_sudo_rules(self, - user, - rules, - sudo_file="/etc/sudoers.d/90-cloud-init-users", - ): + def write_sudo_rules(self, user, rules, sudo_file=None): + if not sudo_file: + sudo_file = self.ci_sudoers_fn - content_header = "# user rules for %s" % user + content_header = "# User rules for %s" % user content = "%s\n%s %s\n\n" % (content_header, user, rules) - if isinstance(rules, list): + if isinstance(rules, (list, tuple, set)): content = "%s\n" % content_header for rule in rules: content += "%s %s\n" % (user, rule) content += "\n" if not os.path.exists(sudo_file): - util.write_file(sudo_file, content, 0440) - + contents = [ + util.make_header(), + content, + ] + util.write_file(sudo_file, "\n".join(contents), 0440) else: try: with open(sudo_file, 'a') as f: f.write(content) except IOError as e: - util.logexc(LOG, "Failed to write %s" % sudo_file, e) + util.logexc(LOG, "Failed to write sudoers file %s", sudo_file) raise e def create_group(self, name, members): diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 88f4e978..20962937 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -27,12 +27,20 @@ from cloudinit import helpers from cloudinit import log as logging from cloudinit import util +from cloudinit.distros.parsers import chop_comment + from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) class Distro(distros.Distro): + hostname_conf_fn = "/etc/hostname" + locale_conf_fn = "/etc/default/locale" + network_conf_fn = "/etc/network/interfaces" + tz_conf_fn = "/etc/timezone" + tz_local_fn = "/etc/localtime" + tz_zone_dir = "/usr/share/zoneinfo" def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -43,10 +51,15 @@ class Distro(distros.Distro): def apply_locale(self, locale, out_fn=None): if not out_fn: - out_fn = self._paths.join(False, '/etc/default/locale') + out_fn = self.locale_conf_fn util.subp(['locale-gen', locale], capture=False) util.subp(['update-locale', locale], capture=False) - lines = ["# Created by cloud-init", 'LANG="%s"' % (locale), ""] + # "" provides trailing newline during join + lines = [ + util.make_header(), + 'LANG="%s"' % (locale), + "", + ] util.write_file(out_fn, "\n".join(lines)) def install_packages(self, pkglist): @@ -54,8 +67,7 @@ class Distro(distros.Distro): self.package_command('install', pkglist) def _write_network(self, settings): - net_fn = self._paths.join(False, "/etc/network/interfaces") - util.write_file(net_fn, settings) + util.write_file(self.network_conf_fn, settings) return ['all'] def _bring_up_interfaces(self, device_names): @@ -69,54 +81,29 @@ class Distro(distros.Distro): return distros.Distro._bring_up_interfaces(self, device_names) def set_hostname(self, hostname): - out_fn = self._paths.join(False, "/etc/hostname") - self._write_hostname(hostname, out_fn) - if out_fn == '/etc/hostname': - # Only do this if we are running in non-adjusted root mode - LOG.debug("Setting hostname to %s", hostname) - util.subp(['hostname', hostname]) + self._write_hostname(hostname, self.hostname_conf_fn) + self._apply_hostname(hostname) def _write_hostname(self, hostname, out_fn): # "" gives trailing newline. - util.write_file(out_fn, "%s\n" % str(hostname), 0644) - - def update_hostname(self, hostname, prev_fn): - hostname_prev = self._read_hostname(prev_fn) - read_fn = self._paths.join(True, "/etc/hostname") - hostname_in_etc = self._read_hostname(read_fn) - update_files = [] - if not hostname_prev or hostname_prev != hostname: - update_files.append(prev_fn) - if (not hostname_in_etc or - (hostname_in_etc == hostname_prev and - hostname_in_etc != hostname)): - write_fn = self._paths.join(False, "/etc/hostname") - update_files.append(write_fn) - for fn in update_files: - try: - self._write_hostname(hostname, fn) - except: - util.logexc(LOG, "Failed to write hostname %s to %s", - hostname, fn) - if (hostname_in_etc and hostname_prev and - hostname_in_etc != hostname_prev): - LOG.debug(("%s differs from /etc/hostname." - " Assuming user maintained hostname."), prev_fn) - if "/etc/hostname" in update_files: - # Only do this if we are running in non-adjusted root mode - LOG.debug("Setting hostname to %s", hostname) - util.subp(['hostname', hostname]) + hostname_lines = [ + str(hostname), + "", + ] + util.write_file(out_fn, "\n".join(hostname_lines), 0644) + + def _read_system_hostname(self): + return (self.hostname_conf_fn, + self._read_hostname(self.hostname_conf_fn)) def _read_hostname(self, filename, default=None): contents = util.load_file(filename, quiet=True) for line in contents.splitlines(): - c_pos = line.find("#") # Handle inline comments - if c_pos != -1: - line = line[0:c_pos] - line_c = line.strip() - if line_c: - return line_c + (before_comment, _comment) = chop_comment(line, "#") + before_comment = before_comment.strip() + if len(before_comment): + return before_comment return default def _get_localhost_ip(self): @@ -124,15 +111,18 @@ class Distro(distros.Distro): return "127.0.1.1" def set_timezone(self, tz): - tz_file = os.path.join("/usr/share/zoneinfo", tz) + tz_file = os.path.join(self.tz_zone_dir, tz) if not os.path.isfile(tz_file): raise RuntimeError(("Invalid timezone %s," " no file found at %s") % (tz, tz_file)) # "" provides trailing newline during join - tz_lines = ["# Created by cloud-init", str(tz), ""] - tz_fn = self._paths.join(False, "/etc/timezone") - util.write_file(tz_fn, "\n".join(tz_lines)) - util.copy(tz_file, self._paths.join(False, "/etc/localtime")) + tz_lines = [ + util.make_header(), + str(tz), + "", + ] + util.write_file(self.tz_conf_fn, "\n".join(tz_lines)) + util.copy(tz_file, self.tz_local_fn) def package_command(self, command, args=None): e = os.environ.copy() diff --git a/cloudinit/distros/helpers.py b/cloudinit/distros/helpers.py deleted file mode 100644 index 916935eb..00000000 --- a/cloudinit/distros/helpers.py +++ /dev/null @@ -1,252 +0,0 @@ -# vi: ts=4 expandtab -# -# Copyright (C) 2012 Canonical Ltd. -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Scott Moser -# Author: Joshua Harlow -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from StringIO import StringIO - -from cloudinit import util - - -def _chop_comment(text, comment_chars): - comment_locations = [text.find(c) for c in comment_chars] - comment_locations = [c for c in comment_locations if c != -1] - if not comment_locations: - return (text, '') - min_comment = min(comment_locations) - before_comment = text[0:min_comment] - comment = text[min_comment:] - return (before_comment, comment) - - -# See: man hosts -# or http://unixhelp.ed.ac.uk/CGI/man-cgi?hosts -class HostsConf(object): - def __init__(self, text): - self._text = text - self._contents = None - - def parse(self): - if self._contents is None: - self._contents = self._parse(self._text) - - def get_entry(self, ip): - self.parse() - options = [] - for (line_type, components) in self._contents: - if line_type == 'option': - (pieces, _tail) = components - if len(pieces) and pieces[0] == ip: - options.append(pieces[1:]) - return options - - def del_entries(self, ip): - self.parse() - n_entries = [] - for (line_type, components) in self._contents: - if line_type != 'option': - n_entries.append((line_type, components)) - continue - else: - (pieces, _tail) = components - if len(pieces) and pieces[0] == ip: - pass - elif len(pieces): - n_entries.append((line_type, list(components))) - self._contents = n_entries - - def add_entry(self, ip, canonical_hostname, *aliases): - self.parse() - self._contents.append(('option', - ([ip, canonical_hostname] + list(aliases), ''))) - - def _parse(self, contents): - entries = [] - for line in contents.splitlines(): - if not len(line.strip()): - entries.append(('blank', [line])) - continue - (head, tail) = _chop_comment(line.strip(), '#') - if not len(head): - entries.append(('all_comment', [line])) - continue - entries.append(('option', [head.split(None), tail])) - return entries - - def __str__(self): - self.parse() - contents = StringIO() - for (line_type, components) in self._contents: - if line_type == 'blank': - contents.write("%s\n") - elif line_type == 'all_comment': - contents.write("%s\n" % (components[0])) - elif line_type == 'option': - (pieces, tail) = components - pieces = [str(p) for p in pieces] - pieces = "\t".join(pieces) - contents.write("%s%s\n" % (pieces, tail)) - return contents.getvalue() - - -# See: man resolv.conf -class ResolvConf(object): - def __init__(self, text): - self._text = text - self._contents = None - - def parse(self): - if self._contents is None: - self._contents = self._parse(self._text) - - @property - def nameservers(self): - self.parse() - return self._retr_option('nameserver') - - @property - def local_domain(self): - self.parse() - dm = self._retr_option('domain') - if dm: - return dm[0] - return None - - @property - def search_domains(self): - self.parse() - current_sds = self._retr_option('search') - flat_sds = [] - for sdlist in current_sds: - for sd in sdlist.split(None): - if sd: - flat_sds.append(sd) - return flat_sds - - def __str__(self): - self.parse() - contents = StringIO() - for (line_type, components) in self._contents: - if line_type == 'blank': - contents.write("\n") - elif line_type == 'all_comment': - contents.write("%s\n" % (components[0])) - elif line_type == 'option': - (cfg_opt, cfg_value, comment_tail) = components - line = "%s %s" % (cfg_opt, cfg_value) - if len(comment_tail): - line += comment_tail - contents.write("%s\n" % (line)) - return contents.getvalue() - - def _retr_option(self, opt_name): - found = [] - for (line_type, components) in self._contents: - if line_type == 'option': - (cfg_opt, cfg_value, _comment_tail) = components - if cfg_opt == opt_name: - found.append(cfg_value) - return found - - def add_nameserver(self, ns): - self.parse() - current_ns = self._retr_option('nameserver') - new_ns = list(current_ns) - new_ns.append(str(ns)) - new_ns = util.uniq_list(new_ns) - if len(new_ns) == len(current_ns): - return current_ns - if len(current_ns) >= 3: - # Hard restriction on only 3 name servers - raise ValueError(("Adding %r would go beyond the " - "'3' maximum name servers") % (ns)) - self._remove_option('nameserver') - for n in new_ns: - self._contents.append(('option', ['nameserver', n, ''])) - return new_ns - - def _remove_option(self, opt_name): - - def remove_opt(item): - line_type, components = item - if line_type != 'option': - return False - (cfg_opt, _cfg_value, _comment_tail) = components - if cfg_opt != opt_name: - return False - return True - - new_contents = [] - for c in self._contents: - if not remove_opt(c): - new_contents.append(c) - self._contents = new_contents - - def add_search_domain(self, search_domain): - flat_sds = self.search_domains - new_sds = list(flat_sds) - new_sds.append(str(search_domain)) - new_sds = util.uniq_list(new_sds) - if len(flat_sds) == len(new_sds): - return new_sds - if len(flat_sds) >= 6: - # Hard restriction on only 6 search domains - raise ValueError(("Adding %r would go beyond the " - "'6' maximum search domains") % (search_domain)) - s_list = " ".join(new_sds) - if len(s_list) > 256: - # Some hard limit on 256 chars total - raise ValueError(("Adding %r would go beyond the " - "256 maximum search list character limit") - % (search_domain)) - self._remove_option('search') - self._contents.append(('option', ['search', s_list, ''])) - return flat_sds - - @local_domain.setter - def local_domain(self, domain): - self.parse() - self._remove_option('domain') - self._contents.append(('option', ['domain', str(domain), ''])) - return domain - - def _parse(self, contents): - entries = [] - for (i, line) in enumerate(contents.splitlines()): - sline = line.strip() - if not sline: - entries.append(('blank', [line])) - continue - (head, tail) = _chop_comment(line, ';#') - if not len(head.strip()): - entries.append(('all_comment', [line])) - continue - if not tail: - tail = '' - try: - (cfg_opt, cfg_values) = head.split(None, 1) - except (IndexError, ValueError): - raise IOError("Incorrectly formatted resolv.conf line %s" - % (i + 1)) - if cfg_opt not in ['nameserver', 'domain', - 'search', 'sortlist', 'options']: - raise IOError("Unexpected resolv.conf option %s" % (cfg_opt)) - entries.append(("option", [cfg_opt, cfg_values, tail])) - return entries - - diff --git a/cloudinit/distros/parsers/__init__.py b/cloudinit/distros/parsers/__init__.py new file mode 100644 index 00000000..8334a5e6 --- /dev/null +++ b/cloudinit/distros/parsers/__init__.py @@ -0,0 +1,27 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +def chop_comment(text, comment_chars): + comment_locations = [text.find(c) for c in comment_chars] + comment_locations = [c for c in comment_locations if c != -1] + if not comment_locations: + return (text, '') + min_comment = min(comment_locations) + before_comment = text[0:min_comment] + comment = text[min_comment:] + return (before_comment, comment) diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py new file mode 100644 index 00000000..5374ab0b --- /dev/null +++ b/cloudinit/distros/parsers/hosts.py @@ -0,0 +1,92 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from StringIO import StringIO + +from cloudinit.distros.parsers import chop_comment + + +# See: man hosts +# or http://unixhelp.ed.ac.uk/CGI/man-cgi?hosts +class HostsConf(object): + def __init__(self, text): + self._text = text + self._contents = None + + def parse(self): + if self._contents is None: + self._contents = self._parse(self._text) + + def get_entry(self, ip): + self.parse() + options = [] + for (line_type, components) in self._contents: + if line_type == 'option': + (pieces, _tail) = components + if len(pieces) and pieces[0] == ip: + options.append(pieces[1:]) + return options + + def del_entries(self, ip): + self.parse() + n_entries = [] + for (line_type, components) in self._contents: + if line_type != 'option': + n_entries.append((line_type, components)) + continue + else: + (pieces, _tail) = components + if len(pieces) and pieces[0] == ip: + pass + elif len(pieces): + n_entries.append((line_type, list(components))) + self._contents = n_entries + + def add_entry(self, ip, canonical_hostname, *aliases): + self.parse() + self._contents.append(('option', + ([ip, canonical_hostname] + list(aliases), ''))) + + def _parse(self, contents): + entries = [] + for line in contents.splitlines(): + if not len(line.strip()): + entries.append(('blank', [line])) + continue + (head, tail) = chop_comment(line.strip(), '#') + if not len(head): + entries.append(('all_comment', [line])) + continue + entries.append(('option', [head.split(None), tail])) + return entries + + def __str__(self): + self.parse() + contents = StringIO() + for (line_type, components) in self._contents: + if line_type == 'blank': + contents.write("%s\n") + elif line_type == 'all_comment': + contents.write("%s\n" % (components[0])) + elif line_type == 'option': + (pieces, tail) = components + pieces = [str(p) for p in pieces] + pieces = "\t".join(pieces) + contents.write("%s%s\n" % (pieces, tail)) + return contents.getvalue() + diff --git a/cloudinit/distros/parsers/quoting_conf.py b/cloudinit/distros/parsers/quoting_conf.py new file mode 100644 index 00000000..953ccfe9 --- /dev/null +++ b/cloudinit/distros/parsers/quoting_conf.py @@ -0,0 +1,80 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# This library is used to parse/write +# out the various sysconfig files edited +# +# It has to be slightly modified though +# to ensure that all values are quoted +# since these configs are usually sourced into +# bash scripts... +from configobj import ConfigObj + +# See: http://tiny.cc/oezbgw +D_QUOTE_CHARS = { + "\"": "\\\"", + "(": "\\(", + ")": "\\)", + "$": '\$', + '`': '\`', +} + +# This class helps adjust the configobj +# writing to ensure that when writing a k/v +# on a line, that they are properly quoted +# and have no spaces between the '=' sign. +# - This is mainly due to the fact that +# the sysconfig scripts are often sourced +# directly into bash/shell scripts so ensure +# that it works for those types of use cases. +class QuotingConfigObj(ConfigObj): + def __init__(self, lines): + ConfigObj.__init__(self, lines, + interpolation=False, + write_empty_values=True) + + def _quote_posix(self, text): + if not text: + return '' + for (k, v) in D_QUOTE_CHARS.iteritems(): + text = text.replace(k, v) + return '"%s"' % (text) + + def _quote_special(self, text): + if text.lower() in ['yes', 'no', 'true', 'false']: + return text + else: + return self._quote_posix(text) + + def _write_line(self, indent_string, entry, this_entry, comment): + # Ensure it is formatted fine for + # how these sysconfig scripts are used + val = self._decode_element(self._quote(this_entry)) + # Single quoted strings should + # always work. + if not val.startswith("'"): + # Perform any special quoting + val = self._quote_special(val) + key = self._decode_element(self._quote(entry, multiline=False)) + cmnt = self._decode_element(comment) + return '%s%s%s%s%s' % (indent_string, + key, + "=", + val, + cmnt) + diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py new file mode 100644 index 00000000..377ada6b --- /dev/null +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -0,0 +1,171 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from StringIO import StringIO + +from cloudinit import util + +from cloudinit.distros.parsers import chop_comment + + +# See: man resolv.conf +class ResolvConf(object): + def __init__(self, text): + self._text = text + self._contents = None + + def parse(self): + if self._contents is None: + self._contents = self._parse(self._text) + + @property + def nameservers(self): + self.parse() + return self._retr_option('nameserver') + + @property + def local_domain(self): + self.parse() + dm = self._retr_option('domain') + if dm: + return dm[0] + return None + + @property + def search_domains(self): + self.parse() + current_sds = self._retr_option('search') + flat_sds = [] + for sdlist in current_sds: + for sd in sdlist.split(None): + if sd: + flat_sds.append(sd) + return flat_sds + + def __str__(self): + self.parse() + contents = StringIO() + for (line_type, components) in self._contents: + if line_type == 'blank': + contents.write("\n") + elif line_type == 'all_comment': + contents.write("%s\n" % (components[0])) + elif line_type == 'option': + (cfg_opt, cfg_value, comment_tail) = components + line = "%s %s" % (cfg_opt, cfg_value) + if len(comment_tail): + line += comment_tail + contents.write("%s\n" % (line)) + return contents.getvalue() + + def _retr_option(self, opt_name): + found = [] + for (line_type, components) in self._contents: + if line_type == 'option': + (cfg_opt, cfg_value, _comment_tail) = components + if cfg_opt == opt_name: + found.append(cfg_value) + return found + + def add_nameserver(self, ns): + self.parse() + current_ns = self._retr_option('nameserver') + new_ns = list(current_ns) + new_ns.append(str(ns)) + new_ns = util.uniq_list(new_ns) + if len(new_ns) == len(current_ns): + return current_ns + if len(current_ns) >= 3: + # Hard restriction on only 3 name servers + raise ValueError(("Adding %r would go beyond the " + "'3' maximum name servers") % (ns)) + self._remove_option('nameserver') + for n in new_ns: + self._contents.append(('option', ['nameserver', n, ''])) + return new_ns + + def _remove_option(self, opt_name): + + def remove_opt(item): + line_type, components = item + if line_type != 'option': + return False + (cfg_opt, _cfg_value, _comment_tail) = components + if cfg_opt != opt_name: + return False + return True + + new_contents = [] + for c in self._contents: + if not remove_opt(c): + new_contents.append(c) + self._contents = new_contents + + def add_search_domain(self, search_domain): + flat_sds = self.search_domains + new_sds = list(flat_sds) + new_sds.append(str(search_domain)) + new_sds = util.uniq_list(new_sds) + if len(flat_sds) == len(new_sds): + return new_sds + if len(flat_sds) >= 6: + # Hard restriction on only 6 search domains + raise ValueError(("Adding %r would go beyond the " + "'6' maximum search domains") % (search_domain)) + s_list = " ".join(new_sds) + if len(s_list) > 256: + # Some hard limit on 256 chars total + raise ValueError(("Adding %r would go beyond the " + "256 maximum search list character limit") + % (search_domain)) + self._remove_option('search') + self._contents.append(('option', ['search', s_list, ''])) + return flat_sds + + @local_domain.setter + def local_domain(self, domain): + self.parse() + self._remove_option('domain') + self._contents.append(('option', ['domain', str(domain), ''])) + return domain + + def _parse(self, contents): + entries = [] + for (i, line) in enumerate(contents.splitlines()): + sline = line.strip() + if not sline: + entries.append(('blank', [line])) + continue + (head, tail) = chop_comment(line, ';#') + if not len(head.strip()): + entries.append(('all_comment', [line])) + continue + if not tail: + tail = '' + try: + (cfg_opt, cfg_values) = head.split(None, 1) + except (IndexError, ValueError): + raise IOError("Incorrectly formatted resolv.conf line %s" + % (i + 1)) + if cfg_opt not in ['nameserver', 'domain', + 'search', 'sortlist', 'options']: + raise IOError("Unexpected resolv.conf option %s" % (cfg_opt)) + entries.append(("option", [cfg_opt, cfg_values, tail])) + return entries + + diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 13fd5ec8..21f2216e 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -23,41 +23,17 @@ import os from cloudinit import distros -from cloudinit.distros import helpers as d_helpers + +from cloudinit.distros.parsers import (resolv_conf, quoting_conf) from cloudinit import helpers from cloudinit import log as logging from cloudinit import util -from cloudinit import version from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) -NETWORK_FN_TPL = '/etc/sysconfig/network-scripts/ifcfg-%s' - -# See: http://tiny.cc/6r99fw -# For what alot of these files that are being written -# are and the format of them - -# This library is used to parse/write -# out the various sysconfig files edited -# -# It has to be slightly modified though -# to ensure that all values are quoted -# since these configs are usually sourced into -# bash scripts... -from configobj import ConfigObj - -# See: http://tiny.cc/oezbgw -D_QUOTE_CHARS = { - "\"": "\\\"", - "(": "\\(", - ")": "\\)", - "$": '\$', - '`': '\`', -} - def _make_sysconfig_bool(val): if val: @@ -66,12 +42,15 @@ def _make_sysconfig_bool(val): return 'no' -def _make_header(): - ci_ver = version.version_string() - return '# Created by cloud-init v. %s' % (ci_ver) - - class Distro(distros.Distro): + # See: http://tiny.cc/6r99fw + clock_conf_fn = "/etc/sysconfig/clock" + locale_conf_fn = '/etc/sysconfig/i18n' + network_conf_fn = "/etc/sysconfig/network" + network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s' + resolve_conf_fn = "/etc/resolv.conf" + tz_local_fn = "/etc/localtime" + tz_zone_dir = "/usr/share/zoneinfo" def __init__(self, name, cfg, paths): distros.Distro.__init__(self, name, cfg, paths) @@ -84,14 +63,14 @@ class Distro(distros.Distro): self.package_command('install', pkglist) def _adjust_resolve(self, dns_servers, search_servers): - r_conf = d_helpers.ResolvConf(util.load_file("/etc/resolv.conf")) + r_conf = resolv_conf.ResolvConf(util.load_file(self.resolve_conf_fn)) try: r_conf.parse() except IOError: util.logexc(LOG, "Failed at parsing %s reverting to an empty instance", - "/etc/resolv.conf") - r_conf = d_helpers.ResolvConf('') + self.resolve_conf_fn) + r_conf = resolv_conf.ResolvConf('') r_conf.parse() if dns_servers: for s in dns_servers: @@ -105,7 +84,7 @@ class Distro(distros.Distro): r_conf.add_search_domain(s) except ValueError: util.logexc(LOG, "Failed at adding search domain %s", s) - util.write_file("/etc/resolv.conf", str(r_conf), 0644) + util.write_file(self.resolve_conf_fn, str(r_conf), 0644) def _write_network(self, settings): # TODO(harlowja) fix this... since this is the ubuntu format @@ -117,7 +96,7 @@ class Distro(distros.Distro): searchservers = [] dev_names = entries.keys() for (dev, info) in entries.iteritems(): - net_fn = NETWORK_FN_TPL % (dev) + net_fn = self.network_script_tpl % (dev) net_cfg = { 'DEVICE': dev, 'NETMASK': info.get('netmask'), @@ -134,12 +113,12 @@ class Distro(distros.Distro): if 'dns-search' in info: searchservers.extend(info['dns-search']) if nameservers or searchservers: - self._write_resolve(nameservers, searchservers) + self._adjust_resolve(nameservers, searchservers) if dev_names: net_cfg = { 'NETWORKING': _make_sysconfig_bool(True), } - self._update_sysconfig_file("/etc/sysconfig/network", net_cfg) + self._update_sysconfig_file(self.network_conf_fn, net_cfg) return dev_names def _update_sysconfig_file(self, fn, adjustments, allow_empty=False): @@ -158,17 +137,16 @@ class Distro(distros.Distro): if updated_am: lines = contents.write() if not exists: - lines.insert(0, _make_header()) + lines.insert(0, util.make_header()) util.write_file(fn, "\n".join(lines), 0644) def set_hostname(self, hostname): - self._write_hostname(hostname, '/etc/sysconfig/network') - LOG.debug("Setting hostname to %s", hostname) - util.subp(['hostname', hostname]) + self._write_hostname(hostname, self.network_conf_fn) + self._apply_hostname(hostname) def apply_locale(self, locale, out_fn=None): if not out_fn: - out_fn = '/etc/sysconfig/i18n' + out_fn = self.locale_conf_fn locale_cfg = { 'LANG': locale, } @@ -180,30 +158,9 @@ class Distro(distros.Distro): } self._update_sysconfig_file(out_fn, host_cfg) - def update_hostname(self, hostname, prev_file): - hostname_prev = self._read_hostname(prev_file) - hostname_in_sys = self._read_hostname("/etc/sysconfig/network") - update_files = [] - if not hostname_prev or hostname_prev != hostname: - update_files.append(prev_file) - if (not hostname_in_sys or - (hostname_in_sys == hostname_prev - and hostname_in_sys != hostname)): - update_files.append("/etc/sysconfig/network") - for fn in update_files: - try: - self._write_hostname(hostname, fn) - except: - util.logexc(LOG, "Failed to write hostname %s to %s", - hostname, fn) - if (hostname_in_sys and hostname_prev and - hostname_in_sys != hostname_prev): - LOG.debug(("%s differs from /etc/sysconfig/network." - " Assuming user maintained hostname."), prev_file) - if "/etc/sysconfig/network" in update_files: - # Only do this if we are running in non-adjusted root mode - LOG.debug("Setting hostname to %s", hostname) - util.subp(['hostname', hostname]) + def _read_system_hostname(self): + return (self.network_conf_fn, + self._read_hostname(self.network_conf_fn)) def _read_hostname(self, filename, default=None): (_exists, contents) = self._read_conf(filename) @@ -219,7 +176,8 @@ class Distro(distros.Distro): exists = True else: contents = [] - return (exists, QuotingConfigObj(contents)) + return (exists, + quoting_conf.QuotingConfigObj(contents)) def _bring_up_interfaces(self, device_names): if device_names and 'all' in device_names: @@ -228,17 +186,19 @@ class Distro(distros.Distro): return distros.Distro._bring_up_interfaces(self, device_names) def set_timezone(self, tz): - tz_file = os.path.join("/usr/share/zoneinfo", tz) + # Ensure that this timezone is actually + # available on this system, if not give up + tz_file = os.path.join(self.tz_zone_dir, str(tz)) if not os.path.isfile(tz_file): raise RuntimeError(("Invalid timezone %s," " no file found at %s") % (tz, tz_file)) # Adjust the sysconfig clock zone setting clock_cfg = { - 'ZONE': tz, + 'ZONE': str(tz), } - self._update_sysconfig_file("/etc/sysconfig/clock", clock_cfg) + self._update_sysconfig_file(self.clock_conf_fn, clock_cfg) # This ensures that the correct tz will be used for the system - util.copy(tz_file, "/etc/localtime") + util.copy(tz_file, self.tz_local_fn) def package_command(self, command, args=None): cmd = ['yum'] @@ -262,51 +222,6 @@ class Distro(distros.Distro): ["makecache"], freq=PER_INSTANCE) -# This class helps adjust the configobj -# writing to ensure that when writing a k/v -# on a line, that they are properly quoted -# and have no spaces between the '=' sign. -# - This is mainly due to the fact that -# the sysconfig scripts are often sourced -# directly into bash/shell scripts so ensure -# that it works for those types of use cases. -class QuotingConfigObj(ConfigObj): - def __init__(self, lines): - ConfigObj.__init__(self, lines, - interpolation=False, - write_empty_values=True) - - def _quote_posix(self, text): - if not text: - return '' - for (k, v) in D_QUOTE_CHARS.iteritems(): - text = text.replace(k, v) - return '"%s"' % (text) - - def _quote_special(self, text): - if text.lower() in ['yes', 'no', 'true', 'false']: - return text - else: - return self._quote_posix(text) - - def _write_line(self, indent_string, entry, this_entry, comment): - # Ensure it is formatted fine for - # how these sysconfig scripts are used - val = self._decode_element(self._quote(this_entry)) - # Single quoted strings should - # always work. - if not val.startswith("'"): - # Perform any special quoting - val = self._quote_special(val) - key = self._decode_element(self._quote(entry, multiline=False)) - cmnt = self._decode_element(comment) - return '%s%s%s%s%s' % (indent_string, - key, - "=", - val, - cmnt) - - # This is a util function to translate a ubuntu /etc/network/interfaces 'blob' # to a rhel equiv. that can then be written to /etc/sysconfig/network-scripts/ # TODO(harlowja) remove when we have python-netcf active... diff --git a/cloudinit/util.py b/cloudinit/util.py index 79676305..918deba2 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -52,6 +52,7 @@ from cloudinit import importer from cloudinit import log as logging from cloudinit import safeyaml from cloudinit import url_helper as uhelp +from cloudinit import version from cloudinit.settings import (CFG_BUILTIN) @@ -272,11 +273,7 @@ def uniq_merge(*lists): # Kickout the empty ones a_list = [a for a in a_list if len(a)] combined_list.extend(a_list) - uniq_list = [] - for i in combined_list: - if i not in uniq_list: - uniq_list.append(i) - return uniq_list + return uniq_list(combined_list) def clean_filename(fn): @@ -1429,6 +1426,14 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, return (out, err) +def make_header(comment_char="#", base='created'): + ci_ver = version.version_string() + header = str(comment_char) + header += " %s by cloud-init v. %s" % (base.title(), ci_ver) + header += " on %s" % time_rfc2822() + return header + + def abs_join(*paths): return os.path.abspath(os.path.join(*paths)) diff --git a/tests/unittests/test_distros/test_hosts.py b/tests/unittests/test_distros/test_hosts.py index 621837ab..687a0dab 100644 --- a/tests/unittests/test_distros/test_hosts.py +++ b/tests/unittests/test_distros/test_hosts.py @@ -1,6 +1,6 @@ from mocker import MockerTestCase -from cloudinit.distros import helpers +from cloudinit.distros.parsers import hosts BASE_ETC = ''' @@ -16,7 +16,7 @@ BASE_ETC = BASE_ETC.strip() class TestHostsHelper(MockerTestCase): def test_parse(self): - eh = helpers.HostsConf(BASE_ETC) + eh = hosts.HostsConf(BASE_ETC) self.assertEquals(eh.get_entry('127.0.0.1'), [['localhost']]) self.assertEquals(eh.get_entry('192.168.1.10'), [['foo.mydomain.org', 'foo'], @@ -25,7 +25,7 @@ class TestHostsHelper(MockerTestCase): self.assertTrue(eh.startswith('# Example')) def test_add(self): - eh = helpers.HostsConf(BASE_ETC) + eh = hosts.HostsConf(BASE_ETC) eh.add_entry('127.0.0.0', 'blah') self.assertEquals(eh.get_entry('127.0.0.0'), [['blah']]) eh.add_entry('127.0.0.3', 'blah', 'blah2', 'blah3') @@ -33,7 +33,7 @@ class TestHostsHelper(MockerTestCase): [['blah', 'blah2', 'blah3']]) def test_del(self): - eh = helpers.HostsConf(BASE_ETC) + eh = hosts.HostsConf(BASE_ETC) eh.add_entry('127.0.0.0', 'blah') self.assertEquals(eh.get_entry('127.0.0.0'), [['blah']]) diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 55765f0c..b7ce6fea 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -83,7 +83,7 @@ class TestNetCfgDistro(MockerTestCase): self.assertEquals(write_buf.mode, 0644) def assertCfgEquals(self, blob1, blob2): - cfg_tester = distros.rhel.QuotingConfigObj + cfg_tester = distros.parsers.quoting_conf.QuotingConfigObj b1 = dict(cfg_tester(blob1.strip().splitlines())) b2 = dict(cfg_tester(blob2.strip().splitlines())) self.assertEquals(b1, b2) diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/test_distros/test_resolv.py index 5f122833..d947dda0 100644 --- a/tests/unittests/test_distros/test_resolv.py +++ b/tests/unittests/test_distros/test_resolv.py @@ -1,6 +1,8 @@ from mocker import MockerTestCase -from cloudinit.distros import helpers +from cloudinit.distros.parsers import resolv_conf + +import re BASE_RESOLVE = ''' @@ -14,12 +16,12 @@ BASE_RESOLVE = BASE_RESOLVE.strip() class TestResolvHelper(MockerTestCase): def test_parse_same(self): - rp = helpers.ResolvConf(BASE_RESOLVE) + rp = resolv_conf.ResolvConf(BASE_RESOLVE) rp_r = str(rp).strip() self.assertEquals(BASE_RESOLVE, rp_r) def test_local_domain(self): - rp = helpers.ResolvConf(BASE_RESOLVE) + rp = resolv_conf.ResolvConf(BASE_RESOLVE) self.assertEquals(None, rp.local_domain) rp.local_domain = "bob" @@ -27,7 +29,7 @@ class TestResolvHelper(MockerTestCase): self.assertIn('domain bob', str(rp)) def test_nameservers(self): - rp = helpers.ResolvConf(BASE_RESOLVE) + rp = resolv_conf.ResolvConf(BASE_RESOLVE) self.assertIn('10.15.44.14', rp.nameservers) self.assertIn('10.15.30.92', rp.nameservers) rp.add_nameserver('10.2') @@ -41,12 +43,12 @@ class TestResolvHelper(MockerTestCase): self.assertNotIn('10.3', rp.nameservers) def test_search_domains(self): - rp = helpers.ResolvConf(BASE_RESOLVE) + rp = resolv_conf.ResolvConf(BASE_RESOLVE) self.assertIn('yahoo.com', rp.search_domains) self.assertIn('blah.yahoo.com', rp.search_domains) rp.add_search_domain('bbb.y.com') self.assertIn('bbb.y.com', rp.search_domains) - self.assertRegexpMatches(str(rp), r'search(.*)bbb.y.com(.*)') + self.assertTrue(re.search(r'search(.*)bbb.y.com(.*)', str(rp))) self.assertIn('bbb.y.com', rp.search_domains) rp.add_search_domain('bbb.y.com') self.assertEquals(len(rp.search_domains), 3) -- cgit v1.2.3 From b2901994e4310c255a6edf64dcf35c7bf12295ef Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 10 Oct 2012 16:51:18 -0700 Subject: Add comments as to a future refactoring of this function that needs to occur since its pretty much the same now. --- cloudinit/distros/debian.py | 5 ++++- cloudinit/distros/rhel.py | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 20962937..c8b13f95 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -111,7 +111,9 @@ class Distro(distros.Distro): return "127.0.1.1" def set_timezone(self, tz): - tz_file = os.path.join(self.tz_zone_dir, tz) + # TODO(harlowja): move this code into + # the parent distro... + tz_file = os.path.join(self.tz_zone_dir, str(tz)) if not os.path.isfile(tz_file): raise RuntimeError(("Invalid timezone %s," " no file found at %s") % (tz, tz_file)) @@ -122,6 +124,7 @@ class Distro(distros.Distro): "", ] util.write_file(self.tz_conf_fn, "\n".join(tz_lines)) + # This ensures that the correct tz will be used for the system util.copy(tz_file, self.tz_local_fn) def package_command(self, command, args=None): diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 21f2216e..45c85fbb 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -186,8 +186,8 @@ class Distro(distros.Distro): return distros.Distro._bring_up_interfaces(self, device_names) def set_timezone(self, tz): - # Ensure that this timezone is actually - # available on this system, if not give up + # TODO(harlowja): move this code into + # the parent distro... tz_file = os.path.join(self.tz_zone_dir, str(tz)) if not os.path.isfile(tz_file): raise RuntimeError(("Invalid timezone %s," -- cgit v1.2.3 From fe1ec4d4cbb682731e8f65be5dab60f4593ed9d6 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 10 Oct 2012 16:59:40 -0700 Subject: Add comment explaining why the '_apply_hostname' function will not be permanent and catch the exception that occurs if it fails and log that instead of blowing up (which isn't typically useful for something that is temporary anyway). --- cloudinit/distros/__init__.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 07f03159..c6427401 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -128,8 +128,16 @@ class Distro(object): raise NotImplementedError() def _apply_hostname(self, hostname): - LOG.debug("Setting system hostname to %s", hostname) - util.subp(['hostname', hostname]) + # This really only sets the hostname + # temporarily (until reboot so it should + # not be depended on). Use the write + # hostname functions for 'permanent' adjustments. + LOG.debug("Temporarily setting the system hostname to %s", hostname) + try: + util.subp(['hostname', hostname]) + except util.ProcessExecutionError: + util.logexc(LOG, ("Failed to temporarily adjust" + " the system hostname to %s"), hostname) def update_hostname(self, hostname, prev_hostname_fn): if not hostname: -- cgit v1.2.3 From 0f1a2cbe434cba243ce65ff43a88722c2bcf6f2c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 11 Oct 2012 12:49:45 -0700 Subject: More adjustments/cleanups for the system configuration helper objects. 1. Add in a parser for the /etc/hostname file that can be shared 2. Adjust the sysconfig configobj parser to not always quote fields that it does not need to quote + add in tests around this to ensure that we don't go nuts with quoting again. --- cloudinit/distros/debian.py | 47 ++++++++------ cloudinit/distros/parsers/hostname.py | 90 ++++++++++++++++++++++++++ cloudinit/distros/parsers/hosts.py | 3 +- cloudinit/distros/parsers/quoting_conf.py | 80 ----------------------- cloudinit/distros/parsers/sys_conf.py | 85 ++++++++++++++++++++++++ cloudinit/distros/rhel.py | 13 ++-- tests/unittests/test_distros/test_hostname.py | 38 +++++++++++ tests/unittests/test_distros/test_netconfig.py | 7 +- tests/unittests/test_distros/test_sysconfig.py | 59 +++++++++++++++++ 9 files changed, 315 insertions(+), 107 deletions(-) create mode 100644 cloudinit/distros/parsers/hostname.py delete mode 100644 cloudinit/distros/parsers/quoting_conf.py create mode 100644 cloudinit/distros/parsers/sys_conf.py create mode 100644 tests/unittests/test_distros/test_hostname.py create mode 100644 tests/unittests/test_distros/test_sysconfig.py diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index c8b13f95..0d5cbac7 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -27,7 +27,7 @@ from cloudinit import helpers from cloudinit import log as logging from cloudinit import util -from cloudinit.distros.parsers import chop_comment +from cloudinit.distros.parsers.hostname import HostnameConf from cloudinit.settings import PER_INSTANCE @@ -84,27 +84,38 @@ class Distro(distros.Distro): self._write_hostname(hostname, self.hostname_conf_fn) self._apply_hostname(hostname) - def _write_hostname(self, hostname, out_fn): - # "" gives trailing newline. - hostname_lines = [ - str(hostname), - "", - ] - util.write_file(out_fn, "\n".join(hostname_lines), 0644) + def _write_hostname(self, your_hostname, out_fn): + conf = self._read_hostname_conf(out_fn) + if not conf: + conf = HostnameConf('') + conf.parse() + conf.set_hostname(your_hostname) + util.write_file(out_fn, str(conf), 0644) def _read_system_hostname(self): - return (self.hostname_conf_fn, - self._read_hostname(self.hostname_conf_fn)) + conf = self._read_hostname_conf(self.hostname_conf_fn) + if conf: + sys_hostname = conf.hostname + else: + sys_hostname = None + return (self.hostname_conf_fn, sys_hostname) + + def _read_hostname_conf(self, filename): + try: + conf = HostnameConf(util.load_file(filename)) + conf.parse() + return conf + except IOError: + util.logexc(LOG, "Error reading hostname from %s", filename) + return None def _read_hostname(self, filename, default=None): - contents = util.load_file(filename, quiet=True) - for line in contents.splitlines(): - # Handle inline comments - (before_comment, _comment) = chop_comment(line, "#") - before_comment = before_comment.strip() - if len(before_comment): - return before_comment - return default + conf = self._read_hostname_conf(filename) + if not conf: + return default + if not conf.hostname: + return default + return conf.hostname def _get_localhost_ip(self): # Note: http://www.leonardoborda.com/blog/127-0-1-1-ubuntu-debian/ diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py new file mode 100644 index 00000000..7e19f017 --- /dev/null +++ b/cloudinit/distros/parsers/hostname.py @@ -0,0 +1,90 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from StringIO import StringIO + +from cloudinit.distros.parsers import chop_comment + + +# Parser that knows how to work with /etc/hostname format +class HostnameConf(object): + def __init__(self, text): + self._text = text + self._contents = None + + def parse(self): + if self._contents is None: + self._contents = self._parse(self._text) + + def __str__(self): + self.parse() + contents = StringIO() + for (line_type, components) in self._contents: + if line_type == 'blank': + contents.write("%s\n" % (components[0])) + elif line_type == 'all_comment': + contents.write("%s\n" % (components[0])) + elif line_type == 'hostname': + (hostname, tail) = components + contents.write("%s%s\n" % (hostname, tail)) + # Ensure trailing newline + contents = contents.getvalue() + if not contents.endswith("\n"): + contents += "\n" + return contents + + @property + def hostname(self): + self.parse() + for (line_type, components) in self._contents: + if line_type == 'hostname': + return components[0] + return None + + def set_hostname(self, your_hostname): + your_hostname = your_hostname.strip() + if not your_hostname: + return + self.parse() + replaced = False + for (line_type, components) in self._contents: + if line_type == 'hostname': + components[0] = str(your_hostname) + replaced = True + if not replaced: + self._contents.append(('hostname', [str(your_hostname), ''])) + + def _parse(self, contents): + entries = [] + hostnames_found = set() + for line in contents.splitlines(): + if not len(line.strip()): + entries.append(('blank', [line])) + continue + (head, tail) = chop_comment(line.strip(), '#') + if not len(head): + entries.append(('all_comment', [line])) + continue + entries.append(('hostname', [head, tail])) + hostnames_found.add(head) + if len(hostnames_found) > 1: + raise IOError("Multiple hostnames (%s) found!" + % (hostnames_found)) + return entries + + diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py index 5374ab0b..958a7c31 100644 --- a/cloudinit/distros/parsers/hosts.py +++ b/cloudinit/distros/parsers/hosts.py @@ -23,6 +23,7 @@ from cloudinit.distros.parsers import chop_comment # See: man hosts # or http://unixhelp.ed.ac.uk/CGI/man-cgi?hosts +# or http://tinyurl.com/6lmox3 class HostsConf(object): def __init__(self, text): self._text = text @@ -80,7 +81,7 @@ class HostsConf(object): contents = StringIO() for (line_type, components) in self._contents: if line_type == 'blank': - contents.write("%s\n") + contents.write("%s\n" % (components[0])) elif line_type == 'all_comment': contents.write("%s\n" % (components[0])) elif line_type == 'option': diff --git a/cloudinit/distros/parsers/quoting_conf.py b/cloudinit/distros/parsers/quoting_conf.py deleted file mode 100644 index 953ccfe9..00000000 --- a/cloudinit/distros/parsers/quoting_conf.py +++ /dev/null @@ -1,80 +0,0 @@ -# vi: ts=4 expandtab -# -# Copyright (C) 2012 Yahoo! Inc. -# -# Author: Joshua Harlow -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# This library is used to parse/write -# out the various sysconfig files edited -# -# It has to be slightly modified though -# to ensure that all values are quoted -# since these configs are usually sourced into -# bash scripts... -from configobj import ConfigObj - -# See: http://tiny.cc/oezbgw -D_QUOTE_CHARS = { - "\"": "\\\"", - "(": "\\(", - ")": "\\)", - "$": '\$', - '`': '\`', -} - -# This class helps adjust the configobj -# writing to ensure that when writing a k/v -# on a line, that they are properly quoted -# and have no spaces between the '=' sign. -# - This is mainly due to the fact that -# the sysconfig scripts are often sourced -# directly into bash/shell scripts so ensure -# that it works for those types of use cases. -class QuotingConfigObj(ConfigObj): - def __init__(self, lines): - ConfigObj.__init__(self, lines, - interpolation=False, - write_empty_values=True) - - def _quote_posix(self, text): - if not text: - return '' - for (k, v) in D_QUOTE_CHARS.iteritems(): - text = text.replace(k, v) - return '"%s"' % (text) - - def _quote_special(self, text): - if text.lower() in ['yes', 'no', 'true', 'false']: - return text - else: - return self._quote_posix(text) - - def _write_line(self, indent_string, entry, this_entry, comment): - # Ensure it is formatted fine for - # how these sysconfig scripts are used - val = self._decode_element(self._quote(this_entry)) - # Single quoted strings should - # always work. - if not val.startswith("'"): - # Perform any special quoting - val = self._quote_special(val) - key = self._decode_element(self._quote(entry, multiline=False)) - cmnt = self._decode_element(comment) - return '%s%s%s%s%s' % (indent_string, - key, - "=", - val, - cmnt) - diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py new file mode 100644 index 00000000..3d8802b8 --- /dev/null +++ b/cloudinit/distros/parsers/sys_conf.py @@ -0,0 +1,85 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from StringIO import StringIO + +import re + +# This library is used to parse/write +# out the various sysconfig files edited +# +# It has to be slightly modified though +# to ensure that all values are quoted/unquoted correctly +# since these configs are usually sourced into +# bash scripts... +import configobj + + +class SysConf(configobj.ConfigObj): + def __init__(self, contents): + configobj.ConfigObj.__init__(self, contents, + interpolation=False, + write_empty_values=True) + + def __str__(self): + contents = self.write() + out_contents = StringIO() + if isinstance(contents, (list, tuple)): + out_contents.write("\n".join(contents)) + else: + out_contents.write(str(contents)) + return out_contents.getvalue() + + def _quote(self, value, multiline=False): + if not isinstance(value, (str, basestring)): + raise ValueError('Value "%s" is not a string' % (value)) + if len(value) == 0: + return '' + if re.search(r"[\n\r]", value): + raise ValueError('Value "%s" cannot be safely quoted.' % (value)) + quot = "%s" + if '#' in value: + quot = self._get_single_quote(value) + elif value[0] in ['"', "'"] and value[-1] in ['"', "'"]: + # Already quoted, leave it be + pass + elif "'" in value and '"' in value: + quot = self._get_triple_quote(value) + else: + # Quote whitespace if it isn't the start+end of a shell command + white_space_ok = False + if value.strip().startswith("$(") and value.strip().endswith(")"): + white_space_ok = True + if re.search(r"[\t ]", value) and not white_space_ok: + quot = self._get_single_quote(value) + return quot % (value) + + def _write_line(self, indent_string, entry, this_entry, comment): + # Ensure it is formatted fine for + # how these sysconfig scripts are used + if this_entry.startswith("'") or this_entry.startswith('"'): + val = this_entry + val = self._decode_element(self._quote(this_entry)) + key = self._decode_element(self._quote(entry)) + cmnt = self._decode_element(comment) + return '%s%s%s%s%s' % (indent_string, + key, + self._a_to_u('='), + val, + cmnt) + diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 45c85fbb..039215c8 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -24,7 +24,8 @@ import os from cloudinit import distros -from cloudinit.distros.parsers import (resolv_conf, quoting_conf) +from cloudinit.distros.parsers.resolv_conf import ResolvConf +from cloudinit.distros.parsers.sys_conf import SysConf from cloudinit import helpers from cloudinit import log as logging @@ -63,14 +64,14 @@ class Distro(distros.Distro): self.package_command('install', pkglist) def _adjust_resolve(self, dns_servers, search_servers): - r_conf = resolv_conf.ResolvConf(util.load_file(self.resolve_conf_fn)) + r_conf = ResolvConf(util.load_file(self.resolve_conf_fn)) try: r_conf.parse() except IOError: util.logexc(LOG, "Failed at parsing %s reverting to an empty instance", self.resolve_conf_fn) - r_conf = resolv_conf.ResolvConf('') + r_conf = ResolvConf('') r_conf.parse() if dns_servers: for s in dns_servers: @@ -135,7 +136,9 @@ class Distro(distros.Distro): contents[k] = v updated_am += 1 if updated_am: - lines = contents.write() + lines = [ + str(contents), + ] if not exists: lines.insert(0, util.make_header()) util.write_file(fn, "\n".join(lines), 0644) @@ -177,7 +180,7 @@ class Distro(distros.Distro): else: contents = [] return (exists, - quoting_conf.QuotingConfigObj(contents)) + SysConf(contents)) def _bring_up_interfaces(self, device_names): if device_names and 'all' in device_names: diff --git a/tests/unittests/test_distros/test_hostname.py b/tests/unittests/test_distros/test_hostname.py new file mode 100644 index 00000000..8e644f4d --- /dev/null +++ b/tests/unittests/test_distros/test_hostname.py @@ -0,0 +1,38 @@ +from mocker import MockerTestCase + +from cloudinit.distros.parsers import hostname + + +BASE_HOSTNAME = ''' +# My super-duper-hostname + +blahblah + +''' +BASE_HOSTNAME = BASE_HOSTNAME.strip() + + +class TestHostnameHelper(MockerTestCase): + def test_parse_same(self): + hn = hostname.HostnameConf(BASE_HOSTNAME) + self.assertEquals(str(hn).strip(), BASE_HOSTNAME) + self.assertEquals(hn.hostname, 'blahblah') + + def test_no_adjust_hostname(self): + hn = hostname.HostnameConf(BASE_HOSTNAME) + prev_name = hn.hostname + hn.set_hostname("") + self.assertEquals(hn.hostname, prev_name) + + def test_adjust_hostname(self): + hn = hostname.HostnameConf(BASE_HOSTNAME) + prev_name = hn.hostname + self.assertEquals(prev_name, 'blahblah') + hn.set_hostname("bbbbd") + self.assertEquals(hn.hostname, 'bbbbd') + expected_out = ''' +# My super-duper-hostname + +bbbbd +''' + self.assertEquals(str(hn).strip(), expected_out.strip()) diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index b7ce6fea..9763b14b 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -9,6 +9,8 @@ from cloudinit import helpers from cloudinit import settings from cloudinit import util +from cloudinit.distros.parsers.sys_conf import SysConf + from StringIO import StringIO @@ -83,9 +85,8 @@ class TestNetCfgDistro(MockerTestCase): self.assertEquals(write_buf.mode, 0644) def assertCfgEquals(self, blob1, blob2): - cfg_tester = distros.parsers.quoting_conf.QuotingConfigObj - b1 = dict(cfg_tester(blob1.strip().splitlines())) - b2 = dict(cfg_tester(blob2.strip().splitlines())) + b1 = dict(SysConf(blob1.strip().splitlines())) + b2 = dict(SysConf(blob2.strip().splitlines())) self.assertEquals(b1, b2) for (k, v) in b1.items(): self.assertIn(k, b2) diff --git a/tests/unittests/test_distros/test_sysconfig.py b/tests/unittests/test_distros/test_sysconfig.py new file mode 100644 index 00000000..196d090d --- /dev/null +++ b/tests/unittests/test_distros/test_sysconfig.py @@ -0,0 +1,59 @@ +from mocker import MockerTestCase + +from cloudinit.distros.parsers.sys_conf import SysConf + + +# Lots of good examples @ +# http://content.hccfl.edu/pollock/AUnix1/SysconfigFilesDesc.txt + +class TestSysConfHelper(MockerTestCase): + def test_parse_no_change(self): + contents = '''# A comment +USESMBAUTH=no +KEYTABLE=/usr/lib/kbd/keytables/us.map +SHORTDATE=$(date +%y:%m:%d:%H:%M) +HOSTNAME=blahblah +NETMASK0=255.255.255.0 +# Inline comment +LIST=$LOGROOT/incremental-list +IPV6TO4_ROUTING="eth0-:0004::1/64 eth1-:0005::1/64" +ETHTOOL_OPTS="-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256" +USEMD5=no''' + conf = SysConf(contents.splitlines()) + self.assertEquals(conf['HOSTNAME'], 'blahblah') + self.assertEquals(conf['SHORTDATE'], '$(date +%y:%m:%d:%H:%M)') + # Should be unquoted + self.assertEquals(conf['ETHTOOL_OPTS'], ('-K ${DEVICE} tso on; ' + '-G ${DEVICE} rx 256 tx 256')) + self.assertEquals(contents, str(conf)) + + def test_parse_adjust(self): + contents = 'IPV6TO4_ROUTING="eth0-:0004::1/64 eth1-:0005::1/64"' + conf = SysConf(contents.splitlines()) + # Should be unquoted + self.assertEquals('eth0-:0004::1/64 eth1-:0005::1/64', + conf['IPV6TO4_ROUTING']) + conf['IPV6TO4_ROUTING'] = "blah \tblah" + contents2 = str(conf).strip() + # Should be requoted due to whitespace + self.assertEquals('IPV6TO4_ROUTING="blah \tblah"', contents2) + + def test_parse_no_adjust_shell(self): + conf = SysConf(''.splitlines()) + conf['B'] = ' $(time)' + contents = str(conf) + self.assertEquals('B= $(time)', contents) + + def test_parse_empty(self): + contents = '' + conf = SysConf(contents.splitlines()) + self.assertEquals('', str(conf).strip()) + + def test_parse_add_new(self): + contents = 'BLAH=b' + conf = SysConf(contents.splitlines()) + conf['Z'] = 'd' + contents = str(conf) + self.assertIn("Z=d", contents) + self.assertIn("BLAH=b", contents) + -- cgit v1.2.3 From 059a4c45ab2b4439872d138452d6296bfe82be07 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 11 Oct 2012 12:52:19 -0700 Subject: Update log message to use the more appropriate 'non-persistently' wording. --- cloudinit/distros/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index c6427401..bafa69d3 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -132,11 +132,11 @@ class Distro(object): # temporarily (until reboot so it should # not be depended on). Use the write # hostname functions for 'permanent' adjustments. - LOG.debug("Temporarily setting the system hostname to %s", hostname) + LOG.debug("Non-persistently setting the system hostname to %s", hostname) try: util.subp(['hostname', hostname]) except util.ProcessExecutionError: - util.logexc(LOG, ("Failed to temporarily adjust" + util.logexc(LOG, ("Failed to non-persistently adjust" " the system hostname to %s"), hostname) def update_hostname(self, hostname, prev_hostname_fn): -- cgit v1.2.3 From 66f6d2d4fd1e8c1b397c9533ee0596d7b09e9824 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 11 Oct 2012 18:48:50 -0700 Subject: Update to use pipes.quote to ensure that variables adjusted in sysconfig files are properly quoted for there common use case, that being sourced into shell scripts. --- cloudinit/distros/parsers/sys_conf.py | 24 ++++++++++-------------- tests/unittests/test_distros/test_sysconfig.py | 14 +++++++++++--- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py index 3d8802b8..7549c7a3 100644 --- a/cloudinit/distros/parsers/sys_conf.py +++ b/cloudinit/distros/parsers/sys_conf.py @@ -18,6 +18,7 @@ from StringIO import StringIO +import pipes import re # This library is used to parse/write @@ -30,6 +31,7 @@ import re import configobj + class SysConf(configobj.ConfigObj): def __init__(self, contents): configobj.ConfigObj.__init__(self, contents, @@ -50,24 +52,18 @@ class SysConf(configobj.ConfigObj): raise ValueError('Value "%s" is not a string' % (value)) if len(value) == 0: return '' - if re.search(r"[\n\r]", value): - raise ValueError('Value "%s" cannot be safely quoted.' % (value)) - quot = "%s" - if '#' in value: - quot = self._get_single_quote(value) - elif value[0] in ['"', "'"] and value[-1] in ['"', "'"]: - # Already quoted, leave it be - pass - elif "'" in value and '"' in value: - quot = self._get_triple_quote(value) + quot_func = (lambda x: str(x)) + if value[0] in ['"', "'"] and value[-1] in ['"', "'"]: + if len(value) == 1: + quot_func = self._get_single_quote else: - # Quote whitespace if it isn't the start+end of a shell command + # Quote whitespace if it isn't the start + end of a shell command white_space_ok = False if value.strip().startswith("$(") and value.strip().endswith(")"): white_space_ok = True - if re.search(r"[\t ]", value) and not white_space_ok: - quot = self._get_single_quote(value) - return quot % (value) + if re.search(r"[\t\r\n ]", value) and not white_space_ok: + quot_func = pipes.quote + return quot_func(value) def _write_line(self, indent_string, entry, this_entry, comment): # Ensure it is formatted fine for diff --git a/tests/unittests/test_distros/test_sysconfig.py b/tests/unittests/test_distros/test_sysconfig.py index 196d090d..a07a251e 100644 --- a/tests/unittests/test_distros/test_sysconfig.py +++ b/tests/unittests/test_distros/test_sysconfig.py @@ -1,5 +1,7 @@ from mocker import MockerTestCase +import re + from cloudinit.distros.parsers.sys_conf import SysConf @@ -7,6 +9,11 @@ from cloudinit.distros.parsers.sys_conf import SysConf # http://content.hccfl.edu/pollock/AUnix1/SysconfigFilesDesc.txt class TestSysConfHelper(MockerTestCase): + def assertRegexpMatches(self, text, regexp): + regexp = re.compile(regexp) + self.assertTrue(regexp.search(text), + msg="%s must match %s!" % (text, regexp.pattern)) + def test_parse_no_change(self): contents = '''# A comment USESMBAUTH=no @@ -16,8 +23,8 @@ HOSTNAME=blahblah NETMASK0=255.255.255.0 # Inline comment LIST=$LOGROOT/incremental-list -IPV6TO4_ROUTING="eth0-:0004::1/64 eth1-:0005::1/64" -ETHTOOL_OPTS="-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256" +IPV6TO4_ROUTING='eth0-:0004::1/64 eth1-:0005::1/64' +ETHTOOL_OPTS='-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256' USEMD5=no''' conf = SysConf(contents.splitlines()) self.assertEquals(conf['HOSTNAME'], 'blahblah') @@ -25,6 +32,7 @@ USEMD5=no''' # Should be unquoted self.assertEquals(conf['ETHTOOL_OPTS'], ('-K ${DEVICE} tso on; ' '-G ${DEVICE} rx 256 tx 256')) + # This is harmless convertion self.assertEquals(contents, str(conf)) def test_parse_adjust(self): @@ -36,7 +44,7 @@ USEMD5=no''' conf['IPV6TO4_ROUTING'] = "blah \tblah" contents2 = str(conf).strip() # Should be requoted due to whitespace - self.assertEquals('IPV6TO4_ROUTING="blah \tblah"', contents2) + self.assertRegexpMatches(contents2, r'IPV6TO4_ROUTING=["\']blah \tblah["\']') def test_parse_no_adjust_shell(self): conf = SysConf(''.splitlines()) -- cgit v1.2.3 From 3cf8b618f9efcdb2e9cca695c2930a2bf14d42ec Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 11 Oct 2012 19:10:42 -0700 Subject: Handle the case where the value contains shell variables to be expanded which when using pipes.quote will now not be expanded, so add some checks to ensure that this case will still happen. --- cloudinit/distros/parsers/sys_conf.py | 17 ++++++++++++++++- tests/unittests/test_distros/test_sysconfig.py | 5 ++--- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py index 7549c7a3..80c305fe 100644 --- a/cloudinit/distros/parsers/sys_conf.py +++ b/cloudinit/distros/parsers/sys_conf.py @@ -31,6 +31,12 @@ import re import configobj +def _contains_shell_variable(text): + if (re.search(r"\$\{.+\}", text) or + re.search(r"\$[a-zA-Z_]+[a-zA-Z0-9_]*", text)): + return True + return False + class SysConf(configobj.ConfigObj): def __init__(self, contents): @@ -62,7 +68,16 @@ class SysConf(configobj.ConfigObj): if value.strip().startswith("$(") and value.strip().endswith(")"): white_space_ok = True if re.search(r"[\t\r\n ]", value) and not white_space_ok: - quot_func = pipes.quote + if _contains_shell_variable(value): + # If it contains shell variables then we likely want to + # leave it alone since the pipes.quote function likes to + # use single quotes which won't get expanded... + if re.search(r"[\n\"']", value): + quot_func = (lambda x: self._get_triple_quote(x) % x) + else: + quot_func = (lambda x: self._get_single_quote(x) % x) + else: + quot_func = pipes.quote return quot_func(value) def _write_line(self, indent_string, entry, this_entry, comment): diff --git a/tests/unittests/test_distros/test_sysconfig.py b/tests/unittests/test_distros/test_sysconfig.py index a07a251e..21e161ad 100644 --- a/tests/unittests/test_distros/test_sysconfig.py +++ b/tests/unittests/test_distros/test_sysconfig.py @@ -24,7 +24,7 @@ NETMASK0=255.255.255.0 # Inline comment LIST=$LOGROOT/incremental-list IPV6TO4_ROUTING='eth0-:0004::1/64 eth1-:0005::1/64' -ETHTOOL_OPTS='-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256' +ETHTOOL_OPTS="-K ${DEVICE} tso on; -G ${DEVICE} rx 256 tx 256" USEMD5=no''' conf = SysConf(contents.splitlines()) self.assertEquals(conf['HOSTNAME'], 'blahblah') @@ -32,7 +32,6 @@ USEMD5=no''' # Should be unquoted self.assertEquals(conf['ETHTOOL_OPTS'], ('-K ${DEVICE} tso on; ' '-G ${DEVICE} rx 256 tx 256')) - # This is harmless convertion self.assertEquals(contents, str(conf)) def test_parse_adjust(self): @@ -44,7 +43,7 @@ USEMD5=no''' conf['IPV6TO4_ROUTING'] = "blah \tblah" contents2 = str(conf).strip() # Should be requoted due to whitespace - self.assertRegexpMatches(contents2, r'IPV6TO4_ROUTING=["\']blah \tblah["\']') + self.assertRegexpMatches(contents2, r'IPV6TO4_ROUTING=[\']blah\s+blah[\']') def test_parse_no_adjust_shell(self): conf = SysConf(''.splitlines()) -- cgit v1.2.3 From 204c635e622b52bbe2b2c2a72765e3cb886602fc Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 11 Oct 2012 19:14:14 -0700 Subject: Fix the single item string quoting function. --- cloudinit/distros/parsers/sys_conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py index 80c305fe..1cefb8bc 100644 --- a/cloudinit/distros/parsers/sys_conf.py +++ b/cloudinit/distros/parsers/sys_conf.py @@ -61,7 +61,7 @@ class SysConf(configobj.ConfigObj): quot_func = (lambda x: str(x)) if value[0] in ['"', "'"] and value[-1] in ['"', "'"]: if len(value) == 1: - quot_func = self._get_single_quote + quot_func = (lambda x: self._get_single_quote(x) % x) else: # Quote whitespace if it isn't the start + end of a shell command white_space_ok = False -- cgit v1.2.3 From bbe325c902ef3a3b8845cd3c1bb8bee0c3c74a89 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 11 Oct 2012 21:24:30 -0700 Subject: Add some more sysconfig tests + pylint cleanups. --- cloudinit/distros/__init__.py | 3 +- cloudinit/distros/parsers/sys_conf.py | 59 +++++++++++++++++--------- tests/unittests/test_distros/test_sysconfig.py | 21 ++++++++- 3 files changed, 59 insertions(+), 24 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index bafa69d3..fa7cc1ca 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -132,7 +132,8 @@ class Distro(object): # temporarily (until reboot so it should # not be depended on). Use the write # hostname functions for 'permanent' adjustments. - LOG.debug("Non-persistently setting the system hostname to %s", hostname) + LOG.debug("Non-persistently setting the system hostname to %s", + hostname) try: util.subp(['hostname', hostname]) except util.ProcessExecutionError: diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py index 1cefb8bc..5cd765fc 100644 --- a/cloudinit/distros/parsers/sys_conf.py +++ b/cloudinit/distros/parsers/sys_conf.py @@ -22,7 +22,7 @@ import pipes import re # This library is used to parse/write -# out the various sysconfig files edited +# out the various sysconfig files edited (best attempt effort) # # It has to be slightly modified though # to ensure that all values are quoted/unquoted correctly @@ -30,11 +30,26 @@ import re # bash scripts... import configobj +# See: http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html +# or look at the 'param_expand()' function in the subst.c file in the bash +# source tarball... +SHELL_VAR_RULE = r'[a-zA-Z_]+[a-zA-Z0-9_]*' +SHELL_VAR_REGEXES = [ + # Basic variables + re.compile(r"\$" + SHELL_VAR_RULE), + # Things like $?, $0, $-, $@ + re.compile(r"\$[0-9#\?\-@\*]"), + # Things like ${blah:1} - but this one + # gets very complex so just try the + # simple path + re.compile(r"\$\{.+\}"), +] + def _contains_shell_variable(text): - if (re.search(r"\$\{.+\}", text) or - re.search(r"\$[a-zA-Z_]+[a-zA-Z0-9_]*", text)): - return True + for r in SHELL_VAR_REGEXES: + if r.search(text): + return True return False @@ -58,33 +73,36 @@ class SysConf(configobj.ConfigObj): raise ValueError('Value "%s" is not a string' % (value)) if len(value) == 0: return '' - quot_func = (lambda x: str(x)) + quot_func = None if value[0] in ['"', "'"] and value[-1] in ['"', "'"]: if len(value) == 1: - quot_func = (lambda x: self._get_single_quote(x) % x) + quot_func = (lambda x: + self._get_single_quote(x) % x) else: # Quote whitespace if it isn't the start + end of a shell command - white_space_ok = False if value.strip().startswith("$(") and value.strip().endswith(")"): - white_space_ok = True - if re.search(r"[\t\r\n ]", value) and not white_space_ok: - if _contains_shell_variable(value): - # If it contains shell variables then we likely want to - # leave it alone since the pipes.quote function likes to - # use single quotes which won't get expanded... - if re.search(r"[\n\"']", value): - quot_func = (lambda x: self._get_triple_quote(x) % x) + pass + else: + if re.search(r"[\t\r\n ]", value): + if _contains_shell_variable(value): + # If it contains shell variables then we likely want to + # leave it alone since the pipes.quote function likes + # to use single quotes which won't get expanded... + if re.search(r"[\n\"']", value): + quot_func = (lambda x: + self._get_triple_quote(x) % x) + else: + quot_func = (lambda x: + self._get_single_quote(x) % x) else: - quot_func = (lambda x: self._get_single_quote(x) % x) - else: - quot_func = pipes.quote + quot_func = pipes.quote + if not quot_func: + return value return quot_func(value) def _write_line(self, indent_string, entry, this_entry, comment): # Ensure it is formatted fine for # how these sysconfig scripts are used - if this_entry.startswith("'") or this_entry.startswith('"'): - val = this_entry val = self._decode_element(self._quote(this_entry)) key = self._decode_element(self._quote(entry)) cmnt = self._decode_element(comment) @@ -93,4 +111,3 @@ class SysConf(configobj.ConfigObj): self._a_to_u('='), val, cmnt) - diff --git a/tests/unittests/test_distros/test_sysconfig.py b/tests/unittests/test_distros/test_sysconfig.py index 21e161ad..1e34909d 100644 --- a/tests/unittests/test_distros/test_sysconfig.py +++ b/tests/unittests/test_distros/test_sysconfig.py @@ -9,7 +9,8 @@ from cloudinit.distros.parsers.sys_conf import SysConf # http://content.hccfl.edu/pollock/AUnix1/SysconfigFilesDesc.txt class TestSysConfHelper(MockerTestCase): - def assertRegexpMatches(self, text, regexp): + # This function was added in 2.7, make it work for 2.6 + def assertRegMatches(self, text, regexp): regexp = re.compile(regexp) self.assertTrue(regexp.search(text), msg="%s must match %s!" % (text, regexp.pattern)) @@ -34,6 +35,21 @@ USEMD5=no''' '-G ${DEVICE} rx 256 tx 256')) self.assertEquals(contents, str(conf)) + def test_parse_shell_vars(self): + contents = 'USESMBAUTH=$XYZ' + conf = SysConf(contents.splitlines()) + self.assertEquals(contents, str(conf)) + conf = SysConf('') + conf['B'] = '${ZZ}d apples' + # Should be quoted + self.assertEquals('B="${ZZ}d apples"', str(conf)) + conf = SysConf('') + conf['B'] = '$? d apples' + self.assertEquals('B="$? d apples"', str(conf)) + contents = 'IPMI_WATCHDOG_OPTIONS="timeout=60"' + conf = SysConf(contents.splitlines()) + self.assertEquals('IPMI_WATCHDOG_OPTIONS=timeout=60', str(conf)) + def test_parse_adjust(self): contents = 'IPV6TO4_ROUTING="eth0-:0004::1/64 eth1-:0005::1/64"' conf = SysConf(contents.splitlines()) @@ -43,7 +59,8 @@ USEMD5=no''' conf['IPV6TO4_ROUTING'] = "blah \tblah" contents2 = str(conf).strip() # Should be requoted due to whitespace - self.assertRegexpMatches(contents2, r'IPV6TO4_ROUTING=[\']blah\s+blah[\']') + self.assertRegMatches(contents2, + r'IPV6TO4_ROUTING=[\']blah\s+blah[\']') def test_parse_no_adjust_shell(self): conf = SysConf(''.splitlines()) -- cgit v1.2.3 From 7d7cb0c7126ad4f67099f741dac67aceff66d002 Mon Sep 17 00:00:00 2001 From: Thomas Hervé Date: Mon, 15 Oct 2012 10:25:09 +0200 Subject: Skip install when there is not configuration, and install the package to be setup properly. --- cloudinit/config/cc_landscape.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 7cfb8296..331559f4 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -59,6 +59,10 @@ def handle(_name, cfg, cloud, log, _args): raise RuntimeError(("'landscape' key existed in config," " but not a dictionary type," " is a %s instead"), util.obj_name(ls_cloudcfg)) + if not ls_cloudcfg: + return + + cloud.distro.install_packages(["landscape-client"]) merge_data = [ LSC_BUILTIN_CFG, @@ -79,8 +83,7 @@ def handle(_name, cfg, cloud, log, _args): util.write_file(lsc_client_fn, contents.getvalue()) log.debug("Wrote landscape config file to %s", lsc_client_fn) - if ls_cloudcfg: - util.write_file(LS_DEFAULT_FILE, "RUN=1\n") + util.write_file(LS_DEFAULT_FILE, "RUN=1\n") def merge_together(objs): -- cgit v1.2.3 From ea21cecc94b613b063d17588d4cd6612bc56753b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 15:15:48 -0700 Subject: Add in a configuration module that can write out the yum.repo format for those that want to hook into different repos for installing. --- cloudinit/config/cc_yum_add_repo.py | 93 +++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 cloudinit/config/cc_yum_add_repo.py diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py new file mode 100644 index 00000000..41dc72a0 --- /dev/null +++ b/cloudinit/config/cc_yum_add_repo.py @@ -0,0 +1,93 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2011 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# +# Author: Scott Moser +# Author: Juerg Haefliger +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this + +import os + +from cloudinit import templater +from cloudinit import util + +import configobj + + +def _canonicalize_id(repo_id): + repo_id = repo_id.lower().replace("-", "_") + repo_id = repo_id.replace(" ", "_") + return repo_id + + +## TODO(harlowja): move to distro? +# See man yum.conf +def _format_repository_config(repo_id, repo_config): + to_be = configobj.ConfigObj() + to_be[repo_id] = {} + # Do basic translation of + for (k, v) in repo_config.items(): + if isinstance(v, bool): + if v: + v = '1' + else: + v = '0' + elif isinstance(v, (tuple, list)): + v = "\n ".join(v) + # For now assume that peopel using this know + # the format of yum and don't verify further + to_be[repo_id][k] = v + lines = to_be.write() + return "\n".join(lines) + + +def handle(name, cfg, cloud, log, _args): + repos = cfg.get('yum_repos') + if not repos: + log.debug(("Skipping module named %s," + " no 'yum_repos' configuration found"), name) + return + repo_base_path = util.get_cfg_option_str(cfg, 'yum_repo_dir', + '/etc/yum.repos.d/') + repo_locations = {} + repo_configs = {} + for (repo_id, repo_config) in repos.items(): + canon_repo_id = _canonicalize_id(repo_id) + repo_fn_pth = os.path.join(repo_base_path, "%s.repo" % (canon_repo_id)) + if os.path.exists(repo_fn_pth): + log.info("Skipping repo %s, file %s already exists!", + repo_id, repo_fn_pth) + continue + elif canon_repo_id in repo_locations: + log.info("Skipping repo %s, file %s already pending!", + repo_id, repo_fn_pth) + continue + if not repo_config: + repo_config = {} + # Do some basic sanity checks/cleaning + n_repo_config = {} + for (k, v) in repo_config.items(): + k = k.lower().strip().replace("-", "_") + if k: + n_repo_config[k] = v + repo_config = n_repo_config + if not 'baseurl' in repo_config: + log.warn("Repository %s does not contain a baseurl address", repo_id) + else: + repo_configs[canon_repo_id] = repo_config + repo_locations[canon_repo_id] = repo_fn_pth + for (c_repo_id, path) in repo_locations.items(): + repo_blob = _format_repository_config(c_repo_id, repo_configs.get(c_repo_id)) + util.write_file(path, repo_blob) -- cgit v1.2.3 From 19c640ebfd3832ec1582c6c134ea68efac95588c Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 15:17:20 -0700 Subject: Remove some pylint buggies. --- cloudinit/config/cc_yum_add_repo.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 41dc72a0..10c423b5 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -20,7 +20,6 @@ import os -from cloudinit import templater from cloudinit import util import configobj @@ -53,7 +52,7 @@ def _format_repository_config(repo_id, repo_config): return "\n".join(lines) -def handle(name, cfg, cloud, log, _args): +def handle(name, cfg, _cloud, log, _args): repos = cfg.get('yum_repos') if not repos: log.debug(("Skipping module named %s," @@ -84,10 +83,12 @@ def handle(name, cfg, cloud, log, _args): n_repo_config[k] = v repo_config = n_repo_config if not 'baseurl' in repo_config: - log.warn("Repository %s does not contain a baseurl address", repo_id) + log.warn("Repository %s does not contain a baseurl address", + repo_id) else: repo_configs[canon_repo_id] = repo_config repo_locations[canon_repo_id] = repo_fn_pth for (c_repo_id, path) in repo_locations.items(): - repo_blob = _format_repository_config(c_repo_id, repo_configs.get(c_repo_id)) + repo_blob = _format_repository_config(c_repo_id, + repo_configs.get(c_repo_id)) util.write_file(path, repo_blob) -- cgit v1.2.3 From e8371b113c078f3017c44804662960da9a04abf2 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 15:19:14 -0700 Subject: Fix copyright. --- cloudinit/config/cc_yum_add_repo.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 10c423b5..f0487773 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -1,10 +1,8 @@ # vi: ts=4 expandtab # -# Copyright (C) 2011 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012 Yahoo! Inc. # -# Author: Scott Moser -# Author: Juerg Haefliger +# Author: Joshua Harlow # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -16,7 +14,7 @@ # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License -# along with this +# along with this program. If not, see . import os -- cgit v1.2.3 From cf80962e51a925d5b895d0ef90c2a15ad99778b3 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 15:24:15 -0700 Subject: Update comments as to why format is the way it is. --- cloudinit/config/cc_yum_add_repo.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index f0487773..fda3236f 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -34,14 +34,17 @@ def _canonicalize_id(repo_id): def _format_repository_config(repo_id, repo_config): to_be = configobj.ConfigObj() to_be[repo_id] = {} - # Do basic translation of + # Do basic translation of the items -> values for (k, v) in repo_config.items(): if isinstance(v, bool): + # Seems like yum prefers 1/0 if v: v = '1' else: v = '0' elif isinstance(v, (tuple, list)): + # Can handle 'lists' in certain cases + # See: http://bit.ly/Qqrf1t v = "\n ".join(v) # For now assume that peopel using this know # the format of yum and don't verify further -- cgit v1.2.3 From 7029732d496181233f2115dbfd65b13d20aceca7 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 19:18:15 -0700 Subject: Add a more generic package install mechansim that removes some of the code in apt_update_upgrade to do upgrades and installs and places it in a generic package module and adjusts some of the reboot backoffs and log flushing/sleeping that was happening there. --- cloudinit/config/cc_apt_configure.py | 258 +++++++++++++++++ cloudinit/config/cc_apt_update_upgrade.py | 305 --------------------- .../config/cc_package_update_upgrade_install.py | 110 ++++++++ 3 files changed, 368 insertions(+), 305 deletions(-) create mode 100644 cloudinit/config/cc_apt_configure.py delete mode 100644 cloudinit/config/cc_apt_update_upgrade.py create mode 100644 cloudinit/config/cc_package_update_upgrade_install.py diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py new file mode 100644 index 00000000..2cf65ecc --- /dev/null +++ b/cloudinit/config/cc_apt_configure.py @@ -0,0 +1,258 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2009-2010 Canonical Ltd. +# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. +# +# Author: Scott Moser +# Author: Juerg Haefliger +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import glob +import os + +from cloudinit import templater +from cloudinit import util + +distros = ['ubuntu', 'debian'] + +PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n" +PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy" + +# A temporary shell program to get a given gpg key +# from a given keyserver +EXPORT_GPG_KEYID = """ + k=${1} ks=${2}; + exec 2>/dev/null + [ -n "$k" ] || exit 1; + armour=$(gpg --list-keys --armour "${k}") + if [ -z "${armour}" ]; then + gpg --keyserver ${ks} --recv $k >/dev/null && + armour=$(gpg --export --armour "${k}") && + gpg --batch --yes --delete-keys "${k}" + fi + [ -n "${armour}" ] && echo "${armour}" +""" + + +def handle(name, cfg, cloud, log, _args): + release = get_release() + mirrors = find_apt_mirror_info(cloud, cfg) + if not mirrors or "primary" not in mirrors: + log.debug(("Skipping module named %s," + " no package 'mirror' located"), name) + return + + # backwards compatibility + mirror = mirrors["primary"] + mirrors["mirror"] = mirror + + log.debug("Mirror info: %s" % mirrors) + + if not util.get_cfg_option_bool(cfg, + 'apt_preserve_sources_list', False): + generate_sources_list(release, mirrors, cloud, log) + old_mirrors = cfg.get('apt_old_mirrors', + {"primary": "archive.ubuntu.com/ubuntu", + "security": "security.ubuntu.com/ubuntu"}) + rename_apt_lists(old_mirrors, mirrors) + + # Set up any apt proxy + proxy = cfg.get("apt_proxy", None) + proxy_filename = PROXY_FN + if proxy: + try: + # See man 'apt.conf' + contents = PROXY_TPL % (proxy) + util.write_file(cloud.paths.join(False, proxy_filename), + contents) + except Exception as e: + util.logexc(log, "Failed to write proxy to %s", proxy_filename) + elif os.path.isfile(proxy_filename): + util.del_file(proxy_filename) + + # Process 'apt_sources' + if 'apt_sources' in cfg: + params = mirrors + params['RELEASE'] = release + params['MIRROR'] = mirror + errors = add_sources(cloud, cfg['apt_sources'], params) + for e in errors: + log.warn("Add source error: %s", ':'.join(e)) + + dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False) + if dconf_sel: + log.debug("Setting debconf selections per cloud config") + try: + util.subp(('debconf-set-selections', '-'), dconf_sel) + except Exception: + util.logexc(log, "Failed to run debconf-set-selections") + + +# get gpg keyid from keyserver +def getkeybyid(keyid, keyserver): + with util.ExtendedTemporaryFile(suffix='.sh') as fh: + fh.write(EXPORT_GPG_KEYID) + fh.flush() + cmd = ['/bin/sh', fh.name, keyid, keyserver] + (stdout, _stderr) = util.subp(cmd) + return stdout.strip() + + +def mirror2lists_fileprefix(mirror): + string = mirror + # take off http:// or ftp:// + if string.endswith("/"): + string = string[0:-1] + pos = string.find("://") + if pos >= 0: + string = string[pos + 3:] + string = string.replace("/", "_") + return string + + +def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"): + for (name, omirror) in old_mirrors.iteritems(): + nmirror = new_mirrors.get(name) + if not nmirror: + continue + oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror)) + nprefix = os.path.join(lists_d, mirror2lists_fileprefix(nmirror)) + if oprefix == nprefix: + continue + olen = len(oprefix) + for filename in glob.glob("%s_*" % oprefix): + util.rename(filename, "%s%s" % (nprefix, filename[olen:])) + + +def get_release(): + (stdout, _stderr) = util.subp(['lsb_release', '-cs']) + return stdout.strip() + + +def generate_sources_list(codename, mirrors, cloud, log): + template_fn = cloud.get_template_filename('sources.list') + if not template_fn: + log.warn("No template found, not rendering /etc/apt/sources.list") + return + + params = {'codename': codename} + for k in mirrors: + params[k] = mirrors[k] + out_fn = cloud.paths.join(False, '/etc/apt/sources.list') + templater.render_to_file(template_fn, out_fn, params) + + +def add_sources(cloud, srclist, template_params=None): + """ + add entries in /etc/apt/sources.list.d for each abbreviated + sources.list entry in 'srclist'. When rendering template, also + include the values in dictionary searchList + """ + if template_params is None: + template_params = {} + + errorlist = [] + for ent in srclist: + if 'source' not in ent: + errorlist.append(["", "missing source"]) + continue + + source = ent['source'] + if source.startswith("ppa:"): + try: + util.subp(["add-apt-repository", source]) + except: + errorlist.append([source, "add-apt-repository failed"]) + continue + + source = templater.render_string(source, template_params) + + if 'filename' not in ent: + ent['filename'] = 'cloud_config_sources.list' + + if not ent['filename'].startswith("/"): + ent['filename'] = os.path.join("/etc/apt/sources.list.d/", + ent['filename']) + + if ('keyid' in ent and 'key' not in ent): + ks = "keyserver.ubuntu.com" + if 'keyserver' in ent: + ks = ent['keyserver'] + try: + ent['key'] = getkeybyid(ent['keyid'], ks) + except: + errorlist.append([source, "failed to get key from %s" % ks]) + continue + + if 'key' in ent: + try: + util.subp(('apt-key', 'add', '-'), ent['key']) + except: + errorlist.append([source, "failed add key"]) + + try: + contents = "%s\n" % (source) + util.write_file(cloud.paths.join(False, ent['filename']), + contents, omode="ab") + except: + errorlist.append([source, + "failed write to file %s" % ent['filename']]) + + return errorlist + + +def find_apt_mirror_info(cloud, cfg): + """find an apt_mirror given the cloud and cfg provided.""" + + mirror = None + + # this is less preferred way of specifying mirror preferred would be to + # use the distro's search or package_mirror. + mirror = cfg.get("apt_mirror", None) + + search = cfg.get("apt_mirror_search", None) + if not mirror and search: + mirror = util.search_for_mirror(search) + + if (not mirror and + util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)): + mydom = "" + doms = [] + + # if we have a fqdn, then search its domain portion first + (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) + mydom = ".".join(fqdn.split(".")[1:]) + if mydom: + doms.append(".%s" % mydom) + + doms.extend((".localdomain", "",)) + + mirror_list = [] + distro = cloud.distro.name + mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) + for post in doms: + mirror_list.append(mirrorfmt % (post)) + + mirror = util.search_for_mirror(mirror_list) + + mirror_info = cloud.datasource.get_package_mirror_info() + + # this is a bit strange. + # if mirror is set, then one of the legacy options above set it + # but they do not cover security. so we need to get that from + # get_package_mirror_info + if mirror: + mirror_info.update({'primary': mirror}) + + return mirror_info diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py deleted file mode 100644 index 356bb98d..00000000 --- a/cloudinit/config/cc_apt_update_upgrade.py +++ /dev/null @@ -1,305 +0,0 @@ -# vi: ts=4 expandtab -# -# Copyright (C) 2009-2010 Canonical Ltd. -# Copyright (C) 2012 Hewlett-Packard Development Company, L.P. -# -# Author: Scott Moser -# Author: Juerg Haefliger -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import glob -import os -import time - -from cloudinit import templater -from cloudinit import util - -distros = ['ubuntu', 'debian'] - -PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n" -PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy" - -# A temporary shell program to get a given gpg key -# from a given keyserver -EXPORT_GPG_KEYID = """ - k=${1} ks=${2}; - exec 2>/dev/null - [ -n "$k" ] || exit 1; - armour=$(gpg --list-keys --armour "${k}") - if [ -z "${armour}" ]; then - gpg --keyserver ${ks} --recv $k >/dev/null && - armour=$(gpg --export --armour "${k}") && - gpg --batch --yes --delete-keys "${k}" - fi - [ -n "${armour}" ] && echo "${armour}" -""" - - -def handle(name, cfg, cloud, log, _args): - update = util.get_cfg_option_bool(cfg, 'apt_update', False) - upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False) - - release = get_release() - mirrors = find_apt_mirror_info(cloud, cfg) - if not mirrors or "primary" not in mirrors: - log.debug(("Skipping module named %s," - " no package 'mirror' located"), name) - return - - # backwards compatibility - mirror = mirrors["primary"] - mirrors["mirror"] = mirror - - log.debug("mirror info: %s" % mirrors) - - if not util.get_cfg_option_bool(cfg, - 'apt_preserve_sources_list', False): - generate_sources_list(release, mirrors, cloud, log) - old_mirrors = cfg.get('apt_old_mirrors', - {"primary": "archive.ubuntu.com/ubuntu", - "security": "security.ubuntu.com/ubuntu"}) - rename_apt_lists(old_mirrors, mirrors) - - # Set up any apt proxy - proxy = cfg.get("apt_proxy", None) - proxy_filename = PROXY_FN - if proxy: - try: - # See man 'apt.conf' - contents = PROXY_TPL % (proxy) - util.write_file(cloud.paths.join(False, proxy_filename), - contents) - except Exception as e: - util.logexc(log, "Failed to write proxy to %s", proxy_filename) - elif os.path.isfile(proxy_filename): - util.del_file(proxy_filename) - - # Process 'apt_sources' - if 'apt_sources' in cfg: - params = mirrors - params['RELEASE'] = release - params['MIRROR'] = mirror - errors = add_sources(cloud, cfg['apt_sources'], params) - for e in errors: - log.warn("Source Error: %s", ':'.join(e)) - - dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False) - if dconf_sel: - log.debug("setting debconf selections per cloud config") - try: - util.subp(('debconf-set-selections', '-'), dconf_sel) - except: - util.logexc(log, "Failed to run debconf-set-selections") - - pkglist = util.get_cfg_option_list(cfg, 'packages', []) - - errors = [] - if update or len(pkglist) or upgrade: - try: - cloud.distro.update_package_sources() - except Exception as e: - util.logexc(log, "Package update failed") - errors.append(e) - - if upgrade: - try: - cloud.distro.package_command("upgrade") - except Exception as e: - util.logexc(log, "Package upgrade failed") - errors.append(e) - - if len(pkglist): - try: - cloud.distro.install_packages(pkglist) - except Exception as e: - util.logexc(log, "Failed to install packages: %s ", pkglist) - errors.append(e) - - # kernel and openssl (possibly some other packages) - # write a file /var/run/reboot-required after upgrading. - # if that file exists and configured, then just stop right now and reboot - # TODO(smoser): handle this less voilently - reboot_file = "/var/run/reboot-required" - if ((upgrade or pkglist) and cfg.get("apt_reboot_if_required", False) and - os.path.isfile(reboot_file)): - log.warn("rebooting after upgrade or install per %s" % reboot_file) - time.sleep(1) # give the warning time to get out - util.subp(["/sbin/reboot"]) - time.sleep(60) - log.warn("requested reboot did not happen!") - errors.append(Exception("requested reboot did not happen!")) - - if len(errors): - log.warn("%s failed with exceptions, re-raising the last one", - len(errors)) - raise errors[-1] - - -# get gpg keyid from keyserver -def getkeybyid(keyid, keyserver): - with util.ExtendedTemporaryFile(suffix='.sh') as fh: - fh.write(EXPORT_GPG_KEYID) - fh.flush() - cmd = ['/bin/sh', fh.name, keyid, keyserver] - (stdout, _stderr) = util.subp(cmd) - return stdout.strip() - - -def mirror2lists_fileprefix(mirror): - string = mirror - # take off http:// or ftp:// - if string.endswith("/"): - string = string[0:-1] - pos = string.find("://") - if pos >= 0: - string = string[pos + 3:] - string = string.replace("/", "_") - return string - - -def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"): - for (name, omirror) in old_mirrors.iteritems(): - nmirror = new_mirrors.get(name) - if not nmirror: - continue - oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror)) - nprefix = os.path.join(lists_d, mirror2lists_fileprefix(nmirror)) - if oprefix == nprefix: - continue - olen = len(oprefix) - for filename in glob.glob("%s_*" % oprefix): - util.rename(filename, "%s%s" % (nprefix, filename[olen:])) - - -def get_release(): - (stdout, _stderr) = util.subp(['lsb_release', '-cs']) - return stdout.strip() - - -def generate_sources_list(codename, mirrors, cloud, log): - template_fn = cloud.get_template_filename('sources.list') - if not template_fn: - log.warn("No template found, not rendering /etc/apt/sources.list") - return - - params = {'codename': codename} - for k in mirrors: - params[k] = mirrors[k] - out_fn = cloud.paths.join(False, '/etc/apt/sources.list') - templater.render_to_file(template_fn, out_fn, params) - - -def add_sources(cloud, srclist, template_params=None): - """ - add entries in /etc/apt/sources.list.d for each abbreviated - sources.list entry in 'srclist'. When rendering template, also - include the values in dictionary searchList - """ - if template_params is None: - template_params = {} - - errorlist = [] - for ent in srclist: - if 'source' not in ent: - errorlist.append(["", "missing source"]) - continue - - source = ent['source'] - if source.startswith("ppa:"): - try: - util.subp(["add-apt-repository", source]) - except: - errorlist.append([source, "add-apt-repository failed"]) - continue - - source = templater.render_string(source, template_params) - - if 'filename' not in ent: - ent['filename'] = 'cloud_config_sources.list' - - if not ent['filename'].startswith("/"): - ent['filename'] = os.path.join("/etc/apt/sources.list.d/", - ent['filename']) - - if ('keyid' in ent and 'key' not in ent): - ks = "keyserver.ubuntu.com" - if 'keyserver' in ent: - ks = ent['keyserver'] - try: - ent['key'] = getkeybyid(ent['keyid'], ks) - except: - errorlist.append([source, "failed to get key from %s" % ks]) - continue - - if 'key' in ent: - try: - util.subp(('apt-key', 'add', '-'), ent['key']) - except: - errorlist.append([source, "failed add key"]) - - try: - contents = "%s\n" % (source) - util.write_file(cloud.paths.join(False, ent['filename']), - contents, omode="ab") - except: - errorlist.append([source, - "failed write to file %s" % ent['filename']]) - - return errorlist - - -def find_apt_mirror_info(cloud, cfg): - """find an apt_mirror given the cloud and cfg provided.""" - - mirror = None - - # this is less preferred way of specifying mirror preferred would be to - # use the distro's search or package_mirror. - mirror = cfg.get("apt_mirror", None) - - search = cfg.get("apt_mirror_search", None) - if not mirror and search: - mirror = util.search_for_mirror(search) - - if (not mirror and - util.get_cfg_option_bool(cfg, "apt_mirror_search_dns", False)): - mydom = "" - doms = [] - - # if we have a fqdn, then search its domain portion first - (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) - mydom = ".".join(fqdn.split(".")[1:]) - if mydom: - doms.append(".%s" % mydom) - - doms.extend((".localdomain", "",)) - - mirror_list = [] - distro = cloud.distro.name - mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro) - for post in doms: - mirror_list.append(mirrorfmt % (post)) - - mirror = util.search_for_mirror(mirror_list) - - mirror_info = cloud.datasource.get_package_mirror_info() - - # this is a bit strange. - # if mirror is set, then one of the legacy options above set it - # but they do not cover security. so we need to get that from - # get_package_mirror_info - if mirror: - mirror_info.update({'primary': mirror}) - - return mirror_info diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py new file mode 100644 index 00000000..e319e147 --- /dev/null +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -0,0 +1,110 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from logging import StreamHandler +import os +import time + +from cloudinit import util + +REBOOT_FILE = "/var/run/reboot-required" +REBOOT_CMD = ["/sbin/reboot"] + + +def _multi_cfg_bool_get(cfg, *keys): + for k in keys: + if util.get_cfg_option_bool(cfg, k, False): + return True + return False + + +def _flush_loggers(root): + for h in root.handlers: + if isinstance(h, (StreamHandler)): + try: + h.flush() + except IOError: + pass + if root.parent: + _flush_loggers(root.parent) + + +def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2): + util.subp(REBOOT_CMD) + start = time.time() + wait_time = initial_sleep + for _i in range(0, wait_attempts): + time.sleep(wait_time) + wait_time *= backoff + elapsed = time.time() - start + log.debug("Rebooted, but still running after %s seconds", int(elapsed)) + # If we got here, not good + elapsed = time.time() - start + raise RuntimeError(("Reboot did not happen" + " after %s seconds!") % (int(elapsed))) + + +def handle(_name, cfg, cloud, log, _args): + # Handle the old style + new config names + update = _multi_cfg_bool_get(cfg, 'apt_update', 'package_update') + upgrade = _multi_cfg_bool_get(cfg, 'package_upgrade', 'apt_upgrade') + reboot_if_required = _multi_cfg_bool_get(cfg, 'apt_reboot_if_required', + 'package_reboot_if_required') + pkglist = util.get_cfg_option_list(cfg, 'packages', []) + + errors = [] + if update or len(pkglist) or upgrade: + try: + cloud.distro.update_package_sources() + except Exception as e: + util.logexc(log, "Package update failed") + errors.append(e) + + if upgrade: + try: + cloud.distro.package_command("upgrade") + except Exception as e: + util.logexc(log, "Package upgrade failed") + errors.append(e) + + if len(pkglist): + try: + cloud.distro.install_packages(pkglist) + except Exception as e: + util.logexc(log, "Failed to install packages: %s", pkglist) + errors.append(e) + + # TODO(smoser): handle this less violently + # kernel and openssl (possibly some other packages) + # write a file /var/run/reboot-required after upgrading. + # if that file exists and configured, then just stop right now and reboot + reboot_fn_exists = os.path.isfile(REBOOT_FILE) + if (upgrade or pkglist) and reboot_if_required and reboot_fn_exists: + try: + log.warn("Rebooting after upgrade or install per %s", REBOOT_FILE) + # Flush the above warning + anything else out... + _flush_loggers(log) + _fire_reboot(log) + except Exception as e: + util.logexc(log, "Requested reboot did not happen!") + errors.append(e) + + if len(errors): + log.warn("%s failed with exceptions, re-raising the last one", + len(errors)) + raise errors[-1] -- cgit v1.2.3 From 85a412c172044ae89d381f69ddb309ce8b3cea6e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 21:14:51 -0700 Subject: Move the recursive flushing to the log module. --- cloudinit/config/cc_package_update_upgrade_install.py | 15 ++------------- cloudinit/log.py | 12 ++++++++++++ 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/cloudinit/config/cc_package_update_upgrade_install.py b/cloudinit/config/cc_package_update_upgrade_install.py index e319e147..73b0e30d 100644 --- a/cloudinit/config/cc_package_update_upgrade_install.py +++ b/cloudinit/config/cc_package_update_upgrade_install.py @@ -16,10 +16,10 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from logging import StreamHandler import os import time +from cloudinit import log as logging from cloudinit import util REBOOT_FILE = "/var/run/reboot-required" @@ -33,17 +33,6 @@ def _multi_cfg_bool_get(cfg, *keys): return False -def _flush_loggers(root): - for h in root.handlers: - if isinstance(h, (StreamHandler)): - try: - h.flush() - except IOError: - pass - if root.parent: - _flush_loggers(root.parent) - - def _fire_reboot(log, wait_attempts=6, initial_sleep=1, backoff=2): util.subp(REBOOT_CMD) start = time.time() @@ -98,7 +87,7 @@ def handle(_name, cfg, cloud, log, _args): try: log.warn("Rebooting after upgrade or install per %s", REBOOT_FILE) # Flush the above warning + anything else out... - _flush_loggers(log) + logging.flushLoggers(log) _fire_reboot(log) except Exception as e: util.logexc(log, "Requested reboot did not happen!") diff --git a/cloudinit/log.py b/cloudinit/log.py index 2333e5ee..da6c2851 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -53,6 +53,18 @@ def setupBasicLogging(): root.setLevel(DEBUG) +def flushLoggers(root): + if not root: + return + for h in root.handlers: + if isinstance(h, (logging.StreamHandler)): + try: + h.flush() + except IOError: + pass + flushLoggers(root.parent) + + def setupLogging(cfg=None): # See if the config provides any logging conf... if not cfg: -- cgit v1.2.3 From 8e38c908da002b350d26845ffb5931b4efce1ca8 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 16 Oct 2012 21:20:19 -0700 Subject: Add in a created by cloud-init header. --- cloudinit/config/cc_yum_add_repo.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index fda3236f..ee117bcd 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -50,6 +50,7 @@ def _format_repository_config(repo_id, repo_config): # the format of yum and don't verify further to_be[repo_id][k] = v lines = to_be.write() + lines.insert(0, util.make_header()) return "\n".join(lines) -- cgit v1.2.3 From 0b73122b6a7c63d4ab1b117c6c0c136876fefd16 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 17 Oct 2012 09:32:35 -0700 Subject: Make the yum value 'string' translation a function. --- cloudinit/config/cc_yum_add_repo.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index ee117bcd..123f7827 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -29,6 +29,19 @@ def _canonicalize_id(repo_id): return repo_id +def _format_repo_value(val): + if isinstance(val, (bool)): + # Seems like yum prefers 1/0 + return str(int(val)) + if isinstance(val, (list, tuple)): + # Can handle 'lists' in certain cases + # See: http://bit.ly/Qqrf1t + return "\n ".join([_format_repo_value(v) for v in val]) + if not isinstance(val, (basestring, str)): + return str(val) + return val + + ## TODO(harlowja): move to distro? # See man yum.conf def _format_repository_config(repo_id, repo_config): @@ -36,19 +49,9 @@ def _format_repository_config(repo_id, repo_config): to_be[repo_id] = {} # Do basic translation of the items -> values for (k, v) in repo_config.items(): - if isinstance(v, bool): - # Seems like yum prefers 1/0 - if v: - v = '1' - else: - v = '0' - elif isinstance(v, (tuple, list)): - # Can handle 'lists' in certain cases - # See: http://bit.ly/Qqrf1t - v = "\n ".join(v) - # For now assume that peopel using this know - # the format of yum and don't verify further - to_be[repo_id][k] = v + # For now assume that people using this know + # the format of yum and don't verify keys/values further + to_be[repo_id][k] = _format_repo_value(v) lines = to_be.write() lines.insert(0, util.make_header()) return "\n".join(lines) -- cgit v1.2.3 From 04f52eb593e4f5114626c74fd8f3c5a9a8d440bd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 17 Oct 2012 09:42:22 -0700 Subject: More cleanups, fix header function. --- cloudinit/config/cc_yum_add_repo.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 123f7827..5c273825 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -53,7 +53,7 @@ def _format_repository_config(repo_id, repo_config): # the format of yum and don't verify keys/values further to_be[repo_id][k] = _format_repo_value(v) lines = to_be.write() - lines.insert(0, util.make_header()) + lines.insert(0, "# Created by cloud-init on %s" % (util.time_rfc2822())) return "\n".join(lines) @@ -87,12 +87,19 @@ def handle(name, cfg, _cloud, log, _args): if k: n_repo_config[k] = v repo_config = n_repo_config - if not 'baseurl' in repo_config: - log.warn("Repository %s does not contain a baseurl address", - repo_id) - else: + missing_required = 0 + for req_field in ['baseurl']: + if not req_field in repo_config: + log.warn(("Repository %s does not contain a %s" + " configuration 'required' entry"), + repo_id, req_field) + missing_required += 1 + if not missing_required: repo_configs[canon_repo_id] = repo_config repo_locations[canon_repo_id] = repo_fn_pth + else: + log.warn("Repository %s is missing %s required fields, skipping!", + repo_id, missing_required) for (c_repo_id, path) in repo_locations.items(): repo_blob = _format_repository_config(c_repo_id, repo_configs.get(c_repo_id)) -- cgit v1.2.3 From 923f5c70fbff04ff538a5df17c300a9f39a85180 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 23 Oct 2012 06:18:24 -0400 Subject: fix pep8/pylint --- cloudinit/distros/__init__.py | 2 +- tests/unittests/test_datasource/test_configdrive.py | 5 ++--- tests/unittests/test_distros/test_user_data_normalize.py | 1 + 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index abd6cc48..cac3ed7a 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -424,7 +424,7 @@ def _normalize_groups(grp_cfg): # configuration. # # The output is a dictionary of user -# names => user config which is the standard +# names => user config which is the standard # form used in the rest of cloud-init. Note # the default user will have a special config # entry 'default' which will be marked as true diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 4fa13db8..00379e03 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -6,11 +6,10 @@ import os.path import mocker from mocker import MockerTestCase -from cloudinit.sources import DataSourceConfigDrive as ds +from cloudinit import helpers from cloudinit import settings +from cloudinit.sources import DataSourceConfigDrive as ds from cloudinit import util -from cloudinit import helpers - PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index f27af826..8f0d8896 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -14,6 +14,7 @@ bcfg = { 'groups': ["foo"] } + class TestUGNormalize(MockerTestCase): def _make_distro(self, dtype, def_user=None): -- cgit v1.2.3 From 182bf60757b76f86ffb4e5ee9aebcc8ce0fcc74f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 23 Oct 2012 06:35:58 -0400 Subject: tweak default ubuntu user --- config/cloud.cfg | 3 --- 1 file changed, 3 deletions(-) diff --git a/config/cloud.cfg b/config/cloud.cfg index efa0fa36..f6c9065a 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -76,9 +76,6 @@ system_info: # Default user name + that default users groups (if added/used) default_user: name: Ubuntu - plain_text_passwd: 'ubuntu' - home: /home/ubuntu - shell: /bin/bash lock_passwd: True gecos: Ubuntu groups: [adm, audio, cdrom, dialout, floppy, video, plugdev, dip, netdev] -- cgit v1.2.3 From 179f82a9719c2850e465f7f06221978f3be15ffc Mon Sep 17 00:00:00 2001 From: Thomas Hervé Date: Tue, 23 Oct 2012 12:57:26 +0200 Subject: Restart landscape at the end of the configuration --- cloudinit/config/cc_landscape.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 331559f4..56ab0ce3 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -84,6 +84,7 @@ def handle(_name, cfg, cloud, log, _args): log.debug("Wrote landscape config file to %s", lsc_client_fn) util.write_file(LS_DEFAULT_FILE, "RUN=1\n") + util.subp(["service", "landscape-client", "restart"]) def merge_together(objs): -- cgit v1.2.3 From 2953d9d448fde3af19fc96ae00f41066f510d6fd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 23 Oct 2012 07:54:40 -0700 Subject: No need for the get default users groups function when its provided by the get user function. --- cloudinit/distros/__init__.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 6ef63442..11a72da1 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -175,9 +175,6 @@ class Distro(object): def get_default_user(self): return self.get_option('default_user') - def get_default_user_groups(self): - return self.get_option('default_user_groups') - def create_user(self, name, **kwargs): """ Creates users for the system using the GNU passwd tools. This -- cgit v1.2.3 From 758e152721891c707573757fe7a7ff410ec446e2 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 24 Oct 2012 20:31:19 -0700 Subject: Handle the case where newer versions of boto are used that lazily load the metadata from the ec2 metadata service. 1. Add a ec2_utils module that checks which version of boto is being used and under the right versions the metadata dictionary will be expanded. 2. Use this new ec2_utils module in the cloudstack and ec2 datasources as there entrypoints into boto. --- cloudinit/ec2_utils.py | 65 +++++++++++++++++++++++++++++++ cloudinit/sources/DataSourceCloudStack.py | 11 +++--- cloudinit/sources/DataSourceEc2.py | 15 ++++--- 3 files changed, 79 insertions(+), 12 deletions(-) create mode 100644 cloudinit/ec2_utils.py diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py new file mode 100644 index 00000000..76699102 --- /dev/null +++ b/cloudinit/ec2_utils.py @@ -0,0 +1,65 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import pkg_resources +from pkg_resources import parse_version + +import cloudinit.util as util +import cloudinit.url_helper as uh + +import boto.utils as boto_utils + + +BOTO_LAZY = False +try: + _boto_lib = pkg_resources.get_distribution('boto') + if _boto_lib.parsed_version > parse_version("2.5.2"): + BOTO_LAZY = True +except pkg_resources.DistributionNotFound: + pass + + +# Versions of boto >= 2.6.0 try to lazily load +# the metadata backing, which doesn't work so well +# in cloud-init especially since the metadata is +# serialized and actions are performed where the +# metadata server may be blocked (thus the datasource +# will start failing) resulting in url exceptions +# when fields that do exist (or would have existed) +# do not exist due to the blocking that occurred. +def _unlazy_dict(mp): + if not isinstance(mp, (dict)): + return mp + if not BOTO_LAZY: + return mp + for (k, v) in mp.items(): + _unlazy_dict(v) + + +def get_instance_userdata(api_version, metadata_address): + ud = boto_utils.get_instance_userdata(api_version, None, metadata_address) + if not ud: + ud = '' + return ud + + +def get_instance_metadata(api_version, metadata_address): + metadata = boto_utils.get_instance_metadata(api_version, metadata_address) + if not isinstance(metadata, (dict)): + metadata = {} + return _unlazy_dict(metadata) diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index f7ffa7cb..78cf24d7 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -26,8 +26,7 @@ from struct import pack import os import time -import boto.utils as boto_utils - +from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper as uhelp @@ -116,10 +115,10 @@ class DataSourceCloudStack(sources.DataSource): if not self.wait_for_metadata_service(): return False start_time = time.time() - self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver, - None, self.metadata_address) - self.metadata = boto_utils.get_instance_metadata(self.api_ver, - self.metadata_address) + self.userdata_raw = ec2.get_instance_userdata(self.api_ver, + self.metadata_address) + self.metadata = ec2.get_instance_metadata(self.api_ver, + self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) return True diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 3686fa10..3da7b54e 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -23,8 +23,7 @@ import os import time -import boto.utils as boto_utils - +from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper as uhelp @@ -53,6 +52,10 @@ class DataSourceEc2(sources.DataSource): def __str__(self): return util.obj_name(self) + def __getstate__(self): + # Versions of boto + pass + def get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): @@ -65,10 +68,10 @@ class DataSourceEc2(sources.DataSource): if not self.wait_for_metadata_service(): return False start_time = time.time() - self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver, - None, self.metadata_address) - self.metadata = boto_utils.get_instance_metadata(self.api_ver, - self.metadata_address) + self.userdata_raw = ec2.get_instance_userdata(self.api_ver, + self.metadata_address) + self.metadata = ec2.get_instance_metadata(self.api_ver, + self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) return True -- cgit v1.2.3 From 8ffc2c8f791b7694a121ec30dac7437c6e8fdb9b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 24 Oct 2012 20:35:30 -0700 Subject: Remove function that shouldn't have shown up. --- cloudinit/sources/DataSourceEc2.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 3da7b54e..0fc79b32 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -52,10 +52,6 @@ class DataSourceEc2(sources.DataSource): def __str__(self): return util.obj_name(self) - def __getstate__(self): - # Versions of boto - pass - def get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): -- cgit v1.2.3 From ec28772204c9b262e34cc7837e3baac0dac5ec5e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 24 Oct 2012 20:37:12 -0700 Subject: Move the comment to the top + mark as fixing. LP: #1068801 --- cloudinit/ec2_utils.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 76699102..06b302f2 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -24,6 +24,14 @@ import cloudinit.url_helper as uh import boto.utils as boto_utils +# Versions of boto >= 2.6.0 try to lazily load +# the metadata backing, which doesn't work so well +# in cloud-init especially since the metadata is +# serialized and actions are performed where the +# metadata server may be blocked (thus the datasource +# will start failing) resulting in url exceptions +# when fields that do exist (or would have existed) +# do not exist due to the blocking that occurred. BOTO_LAZY = False try: @@ -34,14 +42,6 @@ except pkg_resources.DistributionNotFound: pass -# Versions of boto >= 2.6.0 try to lazily load -# the metadata backing, which doesn't work so well -# in cloud-init especially since the metadata is -# serialized and actions are performed where the -# metadata server may be blocked (thus the datasource -# will start failing) resulting in url exceptions -# when fields that do exist (or would have existed) -# do not exist due to the blocking that occurred. def _unlazy_dict(mp): if not isinstance(mp, (dict)): return mp -- cgit v1.2.3 From aa8b51a48a30e3a3c863ca0ddb8bc4667026d57a Mon Sep 17 00:00:00 2001 From: harlowja Date: Sat, 27 Oct 2012 19:25:48 -0700 Subject: Helpful cleanups. 1. Remove the usage of the path.join function now that all code should be going through the util file methods (and they can be mocked out as needed). 2. Adjust all occurences of the above join function to either not use it or replace it with the standard os.path.join (which can also be mocked out as needed) 3. Fix pylint from complaining about the tests folder 'helpers.py' not being found 4. Add a pylintrc file that is used instead of the options hidden in the 'run_pylint' tool. --- Makefile | 8 +-- cloudinit/config/cc_apt_pipelining.py | 12 ++-- cloudinit/config/cc_apt_update_upgrade.py | 13 ++-- cloudinit/config/cc_ca_certs.py | 24 ++++---- cloudinit/config/cc_chef.py | 30 +++++----- cloudinit/config/cc_landscape.py | 14 ++--- cloudinit/config/cc_mcollective.py | 22 +++---- cloudinit/config/cc_mounts.py | 9 ++- cloudinit/config/cc_phone_home.py | 4 +- cloudinit/config/cc_puppet.py | 70 ++++++++++------------ cloudinit/config/cc_resizefs.py | 5 +- cloudinit/config/cc_rsyslog.py | 3 +- cloudinit/config/cc_runcmd.py | 2 +- cloudinit/config/cc_salt_minion.py | 6 +- cloudinit/config/cc_set_passwords.py | 6 +- cloudinit/config/cc_ssh.py | 16 +++-- cloudinit/config/cc_ssh_authkey_fingerprints.py | 7 +-- cloudinit/config/cc_update_etc_hosts.py | 3 +- cloudinit/distros/__init__.py | 8 +-- cloudinit/distros/debian.py | 26 +++----- cloudinit/helpers.py | 29 +-------- cloudinit/sources/__init__.py | 2 - cloudinit/ssh_util.py | 26 ++++---- pylintrc | 19 ++++++ tests/__init__.py | 0 tests/unittests/__init__.py | 0 tests/unittests/test_datasource/__init__.py | 0 tests/unittests/test_distros/__init__.py | 0 tests/unittests/test_filters/__init__.py | 0 tests/unittests/test_filters/test_launch_index.py | 10 +--- tests/unittests/test_handler/__init__.py | 0 .../test_handler/test_handler_ca_certs.py | 18 +++--- tests/unittests/test_runs/__init__.py | 0 tests/unittests/test_runs/test_simple_run.py | 10 +--- tools/run-pylint | 19 ++---- 35 files changed, 170 insertions(+), 251 deletions(-) create mode 100644 pylintrc create mode 100644 tests/__init__.py create mode 100644 tests/unittests/__init__.py create mode 100644 tests/unittests/test_datasource/__init__.py create mode 100644 tests/unittests/test_distros/__init__.py create mode 100644 tests/unittests/test_filters/__init__.py create mode 100644 tests/unittests/test_handler/__init__.py create mode 100644 tests/unittests/test_runs/__init__.py diff --git a/Makefile b/Makefile index 49324ca0..8f5646b7 100644 --- a/Makefile +++ b/Makefile @@ -1,20 +1,20 @@ CWD=$(shell pwd) -PY_FILES=$(shell find cloudinit bin tests tools -name "*.py") +PY_FILES=$(shell find cloudinit bin tests tools -type f -name "*.py") PY_FILES+="bin/cloud-init" all: test pep8: - $(CWD)/tools/run-pep8 $(PY_FILES) + @$(CWD)/tools/run-pep8 $(PY_FILES) pylint: - $(CWD)/tools/run-pylint $(PY_FILES) + @$(CWD)/tools/run-pylint $(PY_FILES) pyflakes: pyflakes $(PY_FILES) test: - nosetests $(noseopts) tests/unittests/ + @nosetests $(noseopts) tests/ 2to3: 2to3 $(PY_FILES) diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index 02056ee0..e5629175 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -34,26 +34,24 @@ APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n" # on TCP connections - otherwise data corruption will occur. -def handle(_name, cfg, cloud, log, _args): +def handle(_name, cfg, _cloud, log, _args): apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False) apt_pipe_value_s = str(apt_pipe_value).lower().strip() if apt_pipe_value_s == "false": - write_apt_snippet(cloud, "0", log, DEFAULT_FILE) + write_apt_snippet("0", log, DEFAULT_FILE) elif apt_pipe_value_s in ("none", "unchanged", "os"): return elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]: - write_apt_snippet(cloud, apt_pipe_value_s, log, DEFAULT_FILE) + write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE) else: log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value) -def write_apt_snippet(cloud, setting, log, f_name): +def write_apt_snippet(setting, log, f_name): """Writes f_name with apt pipeline depth 'setting'.""" file_contents = APT_PIPE_TPL % (setting) - - util.write_file(cloud.paths.join(False, f_name), file_contents) - + util.write_file(f_name, file_contents) log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting) diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py index 356bb98d..59c34b59 100644 --- a/cloudinit/config/cc_apt_update_upgrade.py +++ b/cloudinit/config/cc_apt_update_upgrade.py @@ -78,8 +78,7 @@ def handle(name, cfg, cloud, log, _args): try: # See man 'apt.conf' contents = PROXY_TPL % (proxy) - util.write_file(cloud.paths.join(False, proxy_filename), - contents) + util.write_file(proxy_filename, contents) except Exception as e: util.logexc(log, "Failed to write proxy to %s", proxy_filename) elif os.path.isfile(proxy_filename): @@ -90,7 +89,7 @@ def handle(name, cfg, cloud, log, _args): params = mirrors params['RELEASE'] = release params['MIRROR'] = mirror - errors = add_sources(cloud, cfg['apt_sources'], params) + errors = add_sources(cfg['apt_sources'], params) for e in errors: log.warn("Source Error: %s", ':'.join(e)) @@ -196,11 +195,10 @@ def generate_sources_list(codename, mirrors, cloud, log): params = {'codename': codename} for k in mirrors: params[k] = mirrors[k] - out_fn = cloud.paths.join(False, '/etc/apt/sources.list') - templater.render_to_file(template_fn, out_fn, params) + templater.render_to_file(template_fn, '/etc/apt/sources.list', params) -def add_sources(cloud, srclist, template_params=None): +def add_sources(srclist, template_params=None): """ add entries in /etc/apt/sources.list.d for each abbreviated sources.list entry in 'srclist'. When rendering template, also @@ -250,8 +248,7 @@ def add_sources(cloud, srclist, template_params=None): try: contents = "%s\n" % (source) - util.write_file(cloud.paths.join(False, ent['filename']), - contents, omode="ab") + util.write_file(ent['filename'], contents, omode="ab") except: errorlist.append([source, "failed write to file %s" % ent['filename']]) diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index dc046bda..20f24357 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -22,6 +22,7 @@ CA_CERT_PATH = "/usr/share/ca-certificates/" CA_CERT_FILENAME = "cloud-init-ca-certs.crt" CA_CERT_CONFIG = "/etc/ca-certificates.conf" CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/" +CA_CERT_FULL_PATH = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) distros = ['ubuntu', 'debian'] @@ -33,7 +34,7 @@ def update_ca_certs(): util.subp(["update-ca-certificates"], capture=False) -def add_ca_certs(paths, certs): +def add_ca_certs(certs): """ Adds certificates to the system. To actually apply the new certificates you must also call L{update_ca_certs}. @@ -43,27 +44,24 @@ def add_ca_certs(paths, certs): if certs: # First ensure they are strings... cert_file_contents = "\n".join([str(c) for c in certs]) - cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME) - cert_file_fullpath = paths.join(False, cert_file_fullpath) - util.write_file(cert_file_fullpath, cert_file_contents, mode=0644) + util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0644) # Append cert filename to CA_CERT_CONFIG file. - util.write_file(paths.join(False, CA_CERT_CONFIG), - "\n%s" % CA_CERT_FILENAME, omode="ab") + util.write_file(CA_CERT_CONFIG, "\n%s" % CA_CERT_FILENAME, omode="ab") -def remove_default_ca_certs(paths): +def remove_default_ca_certs(): """ Removes all default trusted CA certificates from the system. To actually apply the change you must also call L{update_ca_certs}. """ - util.delete_dir_contents(paths.join(False, CA_CERT_PATH)) - util.delete_dir_contents(paths.join(False, CA_CERT_SYSTEM_PATH)) - util.write_file(paths.join(False, CA_CERT_CONFIG), "", mode=0644) + util.delete_dir_contents(CA_CERT_PATH) + util.delete_dir_contents(CA_CERT_SYSTEM_PATH) + util.write_file(CA_CERT_CONFIG, "", mode=0644) debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no" util.subp(('debconf-set-selections', '-'), debconf_sel) -def handle(name, cfg, cloud, log, _args): +def handle(name, cfg, _cloud, log, _args): """ Call to handle ca-cert sections in cloud-config file. @@ -85,14 +83,14 @@ def handle(name, cfg, cloud, log, _args): # default trusted CA certs first. if ca_cert_cfg.get("remove-defaults", False): log.debug("Removing default certificates") - remove_default_ca_certs(cloud.paths) + remove_default_ca_certs() # If we are given any new trusted CA certs to add, add them. if "trusted" in ca_cert_cfg: trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted") if trusted_certs: log.debug("Adding %d certificates" % len(trusted_certs)) - add_ca_certs(cloud.paths, trusted_certs) + add_ca_certs(trusted_certs) # Update the system with the new cert configuration. log.debug("Updating certificates") diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 6f568261..7a3d6a31 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -26,6 +26,15 @@ from cloudinit import util RUBY_VERSION_DEFAULT = "1.8" +CHEF_DIRS = [ + '/etc/chef', + '/var/log/chef', + '/var/lib/chef', + '/var/cache/chef', + '/var/backups/chef', + '/var/run/chef', +] + def handle(name, cfg, cloud, log, _args): @@ -37,24 +46,15 @@ def handle(name, cfg, cloud, log, _args): chef_cfg = cfg['chef'] # Ensure the chef directories we use exist - c_dirs = [ - '/etc/chef', - '/var/log/chef', - '/var/lib/chef', - '/var/cache/chef', - '/var/backups/chef', - '/var/run/chef', - ] - for d in c_dirs: - util.ensure_dir(cloud.paths.join(False, d)) + for d in CHEF_DIRS: + util.ensure_dir(d) # Set the validation key based on the presence of either 'validation_key' # or 'validation_cert'. In the case where both exist, 'validation_key' # takes precedence for key in ('validation_key', 'validation_cert'): if key in chef_cfg and chef_cfg[key]: - v_fn = cloud.paths.join(False, '/etc/chef/validation.pem') - util.write_file(v_fn, chef_cfg[key]) + util.write_file('/etc/chef/validation.pem', chef_cfg[key]) break # Create the chef config from template @@ -68,8 +68,7 @@ def handle(name, cfg, cloud, log, _args): '_default'), 'validation_name': chef_cfg['validation_name'] } - out_fn = cloud.paths.join(False, '/etc/chef/client.rb') - templater.render_to_file(template_fn, out_fn, params) + templater.render_to_file(template_fn, '/etc/chef/client.rb', params) else: log.warn("No template found, not rendering to /etc/chef/client.rb") @@ -81,8 +80,7 @@ def handle(name, cfg, cloud, log, _args): initial_attributes = chef_cfg['initial_attributes'] for k in list(initial_attributes.keys()): initial_json[k] = initial_attributes[k] - firstboot_fn = cloud.paths.join(False, '/etc/chef/firstboot.json') - util.write_file(firstboot_fn, json.dumps(initial_json)) + util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json)) # If chef is not installed, we install chef based on 'install_type' if not os.path.isfile('/usr/bin/chef-client'): diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 56ab0ce3..02610dd0 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -66,22 +66,16 @@ def handle(_name, cfg, cloud, log, _args): merge_data = [ LSC_BUILTIN_CFG, - cloud.paths.join(True, LSC_CLIENT_CFG_FILE), + LSC_CLIENT_CFG_FILE, ls_cloudcfg, ] merged = merge_together(merge_data) - - lsc_client_fn = cloud.paths.join(False, LSC_CLIENT_CFG_FILE) - lsc_dir = cloud.paths.join(False, os.path.dirname(lsc_client_fn)) - if not os.path.isdir(lsc_dir): - util.ensure_dir(lsc_dir) - contents = StringIO() merged.write(contents) - contents.flush() - util.write_file(lsc_client_fn, contents.getvalue()) - log.debug("Wrote landscape config file to %s", lsc_client_fn) + util.ensure_dir(os.path.dirname(LSC_CLIENT_CFG_FILE)) + util.write_file(LSC_CLIENT_CFG_FILE, contents.getvalue()) + log.debug("Wrote landscape config file to %s", LSC_CLIENT_CFG_FILE) util.write_file(LS_DEFAULT_FILE, "RUN=1\n") util.subp(["service", "landscape-client", "restart"]) diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index 2acdbc6f..b670390d 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -29,6 +29,7 @@ from cloudinit import util PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem" PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem" +SERVER_CFG = '/etc/mcollective/server.cfg' def handle(name, cfg, cloud, log, _args): @@ -48,26 +49,23 @@ def handle(name, cfg, cloud, log, _args): if 'conf' in mcollective_cfg: # Read server.cfg values from the # original file in order to be able to mix the rest up - server_cfg_fn = cloud.paths.join(True, '/etc/mcollective/server.cfg') - mcollective_config = ConfigObj(server_cfg_fn) + mcollective_config = ConfigObj(SERVER_CFG) # See: http://tiny.cc/jh9agw for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems(): if cfg_name == 'public-cert': - pubcert_fn = cloud.paths.join(True, PUBCERT_FILE) - util.write_file(pubcert_fn, cfg, mode=0644) - mcollective_config['plugin.ssl_server_public'] = pubcert_fn + util.write_file(PUBCERT_FILE, cfg, mode=0644) + mcollective_config['plugin.ssl_server_public'] = PUBCERT_FILE mcollective_config['securityprovider'] = 'ssl' elif cfg_name == 'private-cert': - pricert_fn = cloud.paths.join(True, PRICERT_FILE) - util.write_file(pricert_fn, cfg, mode=0600) - mcollective_config['plugin.ssl_server_private'] = pricert_fn + util.write_file(PRICERT_FILE, cfg, mode=0600) + mcollective_config['plugin.ssl_server_private'] = PRICERT_FILE mcollective_config['securityprovider'] = 'ssl' else: if isinstance(cfg, (basestring, str)): # Just set it in the 'main' section mcollective_config[cfg_name] = cfg elif isinstance(cfg, (dict)): - # Iterate throug the config items, create a section + # Iterate through the config items, create a section # if it is needed and then add/or create items as needed if cfg_name not in mcollective_config.sections: mcollective_config[cfg_name] = {} @@ -78,14 +76,12 @@ def handle(name, cfg, cloud, log, _args): mcollective_config[cfg_name] = str(cfg) # We got all our config as wanted we'll rename # the previous server.cfg and create our new one - old_fn = cloud.paths.join(False, '/etc/mcollective/server.cfg.old') - util.rename(server_cfg_fn, old_fn) + util.rename(SERVER_CFG, "%s.old" % (SERVER_CFG)) # Now we got the whole file, write to disk... contents = StringIO() mcollective_config.write(contents) contents = contents.getvalue() - server_cfg_rw = cloud.paths.join(False, '/etc/mcollective/server.cfg') - util.write_file(server_cfg_rw, contents, mode=0644) + util.write_file(SERVER_CFG, contents, mode=0644) # Start mcollective util.subp(['service', 'mcollective', 'start'], capture=False) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 14c965bb..cb772c86 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -28,6 +28,7 @@ from cloudinit import util SHORTNAME_FILTER = r"^[x]{0,1}[shv]d[a-z][0-9]*$" SHORTNAME = re.compile(SHORTNAME_FILTER) WS = re.compile("[%s]+" % (whitespace)) +FSTAB_PATH = "/etc/fstab" def is_mdname(name): @@ -167,8 +168,7 @@ def handle(_name, cfg, cloud, log, _args): cc_lines.append('\t'.join(line)) fstab_lines = [] - fstab = util.load_file(cloud.paths.join(True, "/etc/fstab")) - for line in fstab.splitlines(): + for line in util.load_file(FSTAB_PATH).splitlines(): try: toks = WS.split(line) if toks[3].find(comment) != -1: @@ -179,7 +179,7 @@ def handle(_name, cfg, cloud, log, _args): fstab_lines.extend(cc_lines) contents = "%s\n" % ('\n'.join(fstab_lines)) - util.write_file(cloud.paths.join(False, "/etc/fstab"), contents) + util.write_file(FSTAB_PATH, contents) if needswap: try: @@ -188,9 +188,8 @@ def handle(_name, cfg, cloud, log, _args): util.logexc(log, "Activating swap via 'swapon -a' failed") for d in dirs: - real_dir = cloud.paths.join(False, d) try: - util.ensure_dir(real_dir) + util.ensure_dir(d) except: util.logexc(log, "Failed to make '%s' config-mount", d) diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index ae1349eb..886487f8 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -84,10 +84,10 @@ def handle(name, cfg, cloud, log, args): for (n, path) in pubkeys.iteritems(): try: - all_keys[n] = util.load_file(cloud.paths.join(True, path)) + all_keys[n] = util.load_file(path) except: util.logexc(log, ("%s: failed to open, can not" - " phone home that data"), path) + " phone home that data!"), path) submit_keys = {} for k in post_list: diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 74ee18e1..8fe3af57 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -21,12 +21,32 @@ from StringIO import StringIO import os -import pwd import socket from cloudinit import helpers from cloudinit import util +PUPPET_CONF_PATH = '/etc/puppet/puppet.conf' +PUPPET_SSL_CERT_DIR = '/var/lib/puppet/ssl/certs/' +PUPPET_SSL_DIR = '/var/lib/puppet/ssl' +PUPPET_SSL_CERT_PATH = '/var/lib/puppet/ssl/certs/ca.pem' + + +def _autostart_puppet(log): + # Set puppet to automatically start + if os.path.exists('/etc/default/puppet'): + util.subp(['sed', '-i', + '-e', 's/^START=.*/START=yes/', + '/etc/default/puppet'], capture=False) + elif os.path.exists('/bin/systemctl'): + util.subp(['/bin/systemctl', 'enable', 'puppet.service'], + capture=False) + elif os.path.exists('/sbin/chkconfig'): + util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) + else: + log.warn(("Sorry we do not know how to enable" + " puppet services on this system")) + def handle(name, cfg, cloud, log, _args): # If there isn't a puppet key in the configuration don't do anything @@ -43,8 +63,7 @@ def handle(name, cfg, cloud, log, _args): # ... and then update the puppet configuration if 'conf' in puppet_cfg: # Add all sections from the conf object to puppet.conf - puppet_conf_fn = cloud.paths.join(True, '/etc/puppet/puppet.conf') - contents = util.load_file(puppet_conf_fn) + contents = util.load_file(PUPPET_CONF_PATH) # Create object for reading puppet.conf values puppet_config = helpers.DefaultingConfigParser() # Read puppet.conf values from original file in order to be able to @@ -53,28 +72,19 @@ def handle(name, cfg, cloud, log, _args): cleaned_lines = [i.lstrip() for i in contents.splitlines()] cleaned_contents = '\n'.join(cleaned_lines) puppet_config.readfp(StringIO(cleaned_contents), - filename=puppet_conf_fn) + filename=PUPPET_CONF_PATH) for (cfg_name, cfg) in puppet_cfg['conf'].iteritems(): # Cert configuration is a special case # Dump the puppet master ca certificate in the correct place if cfg_name == 'ca_cert': # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership - pp_ssl_dir = cloud.paths.join(False, '/var/lib/puppet/ssl') - util.ensure_dir(pp_ssl_dir, 0771) - util.chownbyid(pp_ssl_dir, - pwd.getpwnam('puppet').pw_uid, 0) - pp_ssl_certs = cloud.paths.join(False, - '/var/lib/puppet/ssl/certs/') - util.ensure_dir(pp_ssl_certs) - util.chownbyid(pp_ssl_certs, - pwd.getpwnam('puppet').pw_uid, 0) - pp_ssl_ca_certs = cloud.paths.join(False, - ('/var/lib/puppet/' - 'ssl/certs/ca.pem')) - util.write_file(pp_ssl_ca_certs, cfg) - util.chownbyid(pp_ssl_ca_certs, - pwd.getpwnam('puppet').pw_uid, 0) + util.ensure_dir(PUPPET_SSL_DIR, 0771) + util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root') + util.ensure_dir(PUPPET_SSL_CERT_DIR) + util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root') + util.write_file(PUPPET_SSL_CERT_PATH, str(cfg)) + util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root') else: # Iterate throug the config items, we'll use ConfigParser.set # to overwrite or create new items as needed @@ -90,25 +100,11 @@ def handle(name, cfg, cloud, log, _args): puppet_config.set(cfg_name, o, v) # We got all our config as wanted we'll rename # the previous puppet.conf and create our new one - conf_old_fn = cloud.paths.join(False, - '/etc/puppet/puppet.conf.old') - util.rename(puppet_conf_fn, conf_old_fn) - puppet_conf_rw = cloud.paths.join(False, '/etc/puppet/puppet.conf') - util.write_file(puppet_conf_rw, puppet_config.stringify()) + util.rename(PUPPET_CONF_PATH, "%s.old" % (PUPPET_CONF_PATH)) + util.write_file(PUPPET_CONF_PATH, puppet_config.stringify()) - # Set puppet to automatically start - if os.path.exists('/etc/default/puppet'): - util.subp(['sed', '-i', - '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False) - elif os.path.exists('/bin/systemctl'): - util.subp(['/bin/systemctl', 'enable', 'puppet.service'], - capture=False) - elif os.path.exists('/sbin/chkconfig'): - util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False) - else: - log.warn(("Sorry we do not know how to enable" - " puppet services on this system")) + # Set it up so it autostarts + _autostart_puppet(log) # Start puppetd util.subp(['service', 'puppet', 'start'], capture=False) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index e7f27944..b958f332 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -62,7 +62,7 @@ def get_fs_type(st_dev, path, log): raise -def handle(name, cfg, cloud, log, args): +def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] else: @@ -74,11 +74,10 @@ def handle(name, cfg, cloud, log, args): # TODO(harlowja) is the directory ok to be used?? resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run") - resize_root_d = cloud.paths.join(False, resize_root_d) util.ensure_dir(resize_root_d) # TODO(harlowja): allow what is to be resized to be configurable?? - resize_what = cloud.paths.join(False, "/") + resize_what = "/" with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.", dir=resize_root_d, delete=True) as tfh: devpth = tfh.name diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index 78327526..0c2c6880 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -71,8 +71,7 @@ def handle(name, cfg, cloud, log, _args): try: contents = "%s\n" % (content) - util.write_file(cloud.paths.join(False, filename), - contents, omode=omode) + util.write_file(filename, contents, omode=omode) except Exception: util.logexc(log, "Failed to write to %s", filename) diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py index 65064cfb..598c3a3e 100644 --- a/cloudinit/config/cc_runcmd.py +++ b/cloudinit/config/cc_runcmd.py @@ -33,6 +33,6 @@ def handle(name, cfg, cloud, log, _args): cmd = cfg["runcmd"] try: content = util.shellify(cmd) - util.write_file(cloud.paths.join(False, out_fn), content, 0700) + util.write_file(out_fn, content, 0700) except: util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn) diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py index 8a1440d9..f3eede18 100644 --- a/cloudinit/config/cc_salt_minion.py +++ b/cloudinit/config/cc_salt_minion.py @@ -34,8 +34,7 @@ def handle(name, cfg, cloud, log, _args): cloud.distro.install_packages(["salt-minion"]) # Ensure we can configure files at the right dir - config_dir = cloud.paths.join(False, salt_cfg.get("config_dir", - '/etc/salt')) + config_dir = salt_cfg.get("config_dir", '/etc/salt') util.ensure_dir(config_dir) # ... and then update the salt configuration @@ -47,8 +46,7 @@ def handle(name, cfg, cloud, log, _args): # ... copy the key pair if specified if 'public_key' in salt_cfg and 'private_key' in salt_cfg: - pki_dir = cloud.paths.join(False, salt_cfg.get('pki_dir', - '/etc/salt/pki')) + pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki') with util.umask(077): util.ensure_dir(pki_dir) pub_name = os.path.join(pki_dir, 'minion.pub') diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 26c558ad..c6bf62fd 100644 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -114,8 +114,7 @@ def handle(_name, cfg, cloud, log, args): replaced_auth = False # See: man sshd_config - conf_fn = cloud.paths.join(True, ssh_util.DEF_SSHD_CFG) - old_lines = ssh_util.parse_ssh_config(conf_fn) + old_lines = ssh_util.parse_ssh_config(ssh_util.DEF_SSHD_CFG) new_lines = [] i = 0 for (i, line) in enumerate(old_lines): @@ -134,8 +133,7 @@ def handle(_name, cfg, cloud, log, args): pw_auth)) lines = [str(e) for e in new_lines] - ssh_rw_fn = cloud.paths.join(False, ssh_util.DEF_SSHD_CFG) - util.write_file(ssh_rw_fn, "\n".join(lines)) + util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines)) try: cmd = ['service'] diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 32e48c30..b623d476 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -59,7 +59,7 @@ def handle(_name, cfg, cloud, log, _args): # remove the static keys from the pristine image if cfg.get("ssh_deletekeys", True): - key_pth = cloud.paths.join(False, "/etc/ssh/", "ssh_host_*key*") + key_pth = os.path.join("/etc/ssh/", "ssh_host_*key*") for f in glob.glob(key_pth): try: util.del_file(f) @@ -72,8 +72,7 @@ def handle(_name, cfg, cloud, log, _args): if key in KEY_2_FILE: tgt_fn = KEY_2_FILE[key][0] tgt_perms = KEY_2_FILE[key][1] - util.write_file(cloud.paths.join(False, tgt_fn), - val, tgt_perms) + util.write_file(tgt_fn, val, tgt_perms) for (priv, pub) in PRIV_2_PUB.iteritems(): if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']: @@ -94,7 +93,7 @@ def handle(_name, cfg, cloud, log, _args): 'ssh_genkeytypes', GENERATE_KEY_NAMES) for keytype in genkeys: - keyfile = cloud.paths.join(False, KEY_FILE_TPL % (keytype)) + keyfile = KEY_FILE_TPL % (keytype) util.ensure_dir(os.path.dirname(keyfile)) if not os.path.exists(keyfile): cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile] @@ -118,17 +117,16 @@ def handle(_name, cfg, cloud, log, _args): cfgkeys = cfg["ssh_authorized_keys"] keys.extend(cfgkeys) - apply_credentials(keys, user, cloud.paths, - disable_root, disable_root_opts) + apply_credentials(keys, user, disable_root, disable_root_opts) except: util.logexc(log, "Applying ssh credentials failed!") -def apply_credentials(keys, user, paths, disable_root, disable_root_opts): +def apply_credentials(keys, user, disable_root, disable_root_opts): keys = set(keys) if user: - ssh_util.setup_user_keys(keys, user, '', paths) + ssh_util.setup_user_keys(keys, user, '') if disable_root: if not user: @@ -137,4 +135,4 @@ def apply_credentials(keys, user, paths, disable_root, disable_root_opts): else: key_prefix = '' - ssh_util.setup_user_keys(keys, 'root', key_prefix, paths) + ssh_util.setup_user_keys(keys, 'root', key_prefix) diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 8c9a8806..c38bcea2 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -97,9 +97,8 @@ def handle(name, cfg, cloud, log, _args): "logging of ssh fingerprints disabled"), name) hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") - extract_func = ssh_util.extract_authorized_keys (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) for (user_name, _cfg) in users.items(): - (auth_key_fn, auth_key_entries) = extract_func(user_name, cloud.paths) - _pprint_key_entries(user_name, auth_key_fn, - auth_key_entries, hash_meth) + (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name) + _pprint_key_entries(user_name, key_fn, + key_entries, hash_meth) diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index 4d75000f..96103615 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -42,8 +42,7 @@ def handle(name, cfg, cloud, log, _args): raise RuntimeError(("No hosts template could be" " found for distro %s") % (cloud.distro.name)) - out_fn = cloud.paths.join(False, '/etc/hosts') - templater.render_to_file(tpl_fn_name, out_fn, + templater.render_to_file(tpl_fn_name, '/etc/hosts', {'hostname': hostname, 'fqdn': fqdn}) elif manage_hosts == "localhost": diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 2fbb0e9b..869540d2 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -122,8 +122,7 @@ class Distro(object): new_etchosts = StringIO() need_write = False need_change = True - hosts_ro_fn = self._paths.join(True, "/etc/hosts") - for line in util.load_file(hosts_ro_fn).splitlines(): + for line in util.load_file("/etc/hosts").splitlines(): if line.strip().startswith(header): continue if not line.strip() or line.strip().startswith("#"): @@ -147,8 +146,7 @@ class Distro(object): need_write = True if need_write: contents = new_etchosts.getvalue() - util.write_file(self._paths.join(False, "/etc/hosts"), - contents, mode=0644) + util.write_file("/etc/hosts", contents, mode=0644) def _bring_up_interface(self, device_name): cmd = ['ifup', device_name] @@ -262,7 +260,7 @@ class Distro(object): # Import SSH keys if 'ssh_authorized_keys' in kwargs: keys = set(kwargs['ssh_authorized_keys']) or [] - ssh_util.setup_user_keys(keys, name, None, self._paths) + ssh_util.setup_user_keys(keys, name, key_prefix=None) return True diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 88f4e978..cc7e53a0 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -43,7 +43,7 @@ class Distro(distros.Distro): def apply_locale(self, locale, out_fn=None): if not out_fn: - out_fn = self._paths.join(False, '/etc/default/locale') + out_fn = '/etc/default/locale' util.subp(['locale-gen', locale], capture=False) util.subp(['update-locale', locale], capture=False) lines = ["# Created by cloud-init", 'LANG="%s"' % (locale), ""] @@ -54,8 +54,7 @@ class Distro(distros.Distro): self.package_command('install', pkglist) def _write_network(self, settings): - net_fn = self._paths.join(False, "/etc/network/interfaces") - util.write_file(net_fn, settings) + util.write_file("/etc/network/interfaces", settings) return ['all'] def _bring_up_interfaces(self, device_names): @@ -69,12 +68,9 @@ class Distro(distros.Distro): return distros.Distro._bring_up_interfaces(self, device_names) def set_hostname(self, hostname): - out_fn = self._paths.join(False, "/etc/hostname") - self._write_hostname(hostname, out_fn) - if out_fn == '/etc/hostname': - # Only do this if we are running in non-adjusted root mode - LOG.debug("Setting hostname to %s", hostname) - util.subp(['hostname', hostname]) + self._write_hostname(hostname, "/etc/hostname") + LOG.debug("Setting hostname to %s", hostname) + util.subp(['hostname', hostname]) def _write_hostname(self, hostname, out_fn): # "" gives trailing newline. @@ -82,16 +78,14 @@ class Distro(distros.Distro): def update_hostname(self, hostname, prev_fn): hostname_prev = self._read_hostname(prev_fn) - read_fn = self._paths.join(True, "/etc/hostname") - hostname_in_etc = self._read_hostname(read_fn) + hostname_in_etc = self._read_hostname("/etc/hostname") update_files = [] if not hostname_prev or hostname_prev != hostname: update_files.append(prev_fn) if (not hostname_in_etc or (hostname_in_etc == hostname_prev and hostname_in_etc != hostname)): - write_fn = self._paths.join(False, "/etc/hostname") - update_files.append(write_fn) + update_files.append("/etc/hostname") for fn in update_files: try: self._write_hostname(hostname, fn) @@ -103,7 +97,6 @@ class Distro(distros.Distro): LOG.debug(("%s differs from /etc/hostname." " Assuming user maintained hostname."), prev_fn) if "/etc/hostname" in update_files: - # Only do this if we are running in non-adjusted root mode LOG.debug("Setting hostname to %s", hostname) util.subp(['hostname', hostname]) @@ -130,9 +123,8 @@ class Distro(distros.Distro): " no file found at %s") % (tz, tz_file)) # "" provides trailing newline during join tz_lines = ["# Created by cloud-init", str(tz), ""] - tz_fn = self._paths.join(False, "/etc/timezone") - util.write_file(tz_fn, "\n".join(tz_lines)) - util.copy(tz_file, self._paths.join(False, "/etc/localtime")) + util.write_file("/etc/timezone", "\n".join(tz_lines)) + util.copy(tz_file, "/etc/localtime") def package_command(self, command, args=None): e = os.environ.copy() diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index a4b20208..985ce3e5 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -302,14 +302,10 @@ class Paths(object): def __init__(self, path_cfgs, ds=None): self.cfgs = path_cfgs # Populate all the initial paths - self.cloud_dir = self.join(False, - path_cfgs.get('cloud_dir', - '/var/lib/cloud')) + self.cloud_dir = path_cfgs.get('cloud_dir', '/var/lib/cloud') self.instance_link = os.path.join(self.cloud_dir, 'instance') self.boot_finished = os.path.join(self.instance_link, "boot-finished") self.upstart_conf_d = path_cfgs.get('upstart_dir') - if self.upstart_conf_d: - self.upstart_conf_d = self.join(False, self.upstart_conf_d) self.seed_dir = os.path.join(self.cloud_dir, 'seed') # This one isn't joined, since it should just be read-only template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/') @@ -328,29 +324,6 @@ class Paths(object): # Set when a datasource becomes active self.datasource = ds - # joins the paths but also appends a read - # or write root if available - def join(self, read_only, *paths): - if read_only: - root = self.cfgs.get('read_root') - else: - root = self.cfgs.get('write_root') - if not paths: - return root - if len(paths) > 1: - joined = os.path.join(*paths) - else: - joined = paths[0] - if root: - pre_joined = joined - # Need to remove any starting '/' since this - # will confuse os.path.join - joined = joined.lstrip("/") - joined = os.path.join(root, joined) - LOG.debug("Translated %s to adjusted path %s (read-only=%s)", - pre_joined, joined, read_only) - return joined - # get_ipath_cur: get the current instance path for an item def get_ipath_cur(self, name=None): ipath = self.instance_link diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index b22369a8..745627d0 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -20,8 +20,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from email.mime.multipart import MIMEMultipart - import abc import os diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 88a11a1a..dd6b742f 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -212,17 +212,15 @@ def update_authorized_keys(old_entries, keys): return '\n'.join(lines) -def users_ssh_info(username, paths): +def users_ssh_info(username): pw_ent = pwd.getpwnam(username) - if not pw_ent: + if not pw_ent or not pw_ent.pw_dir: raise RuntimeError("Unable to get ssh info for user %r" % (username)) - ssh_dir = paths.join(False, os.path.join(pw_ent.pw_dir, '.ssh')) - return (ssh_dir, pw_ent) + return (os.path.join(pw_ent.pw_dir, '.ssh'), pw_ent) -def extract_authorized_keys(username, paths): - (ssh_dir, pw_ent) = users_ssh_info(username, paths) - sshd_conf_fn = paths.join(True, DEF_SSHD_CFG) +def extract_authorized_keys(username): + (ssh_dir, pw_ent) = users_ssh_info(username) auth_key_fn = None with util.SeLinuxGuard(ssh_dir, recursive=True): try: @@ -231,7 +229,7 @@ def extract_authorized_keys(username, paths): # The following tokens are defined: %% is replaced by a literal # '%', %h is replaced by the home directory of the user being # authenticated and %u is replaced by the username of that user. - ssh_cfg = parse_ssh_config_map(sshd_conf_fn) + ssh_cfg = parse_ssh_config_map(DEF_SSHD_CFG) auth_key_fn = ssh_cfg.get("authorizedkeysfile", '').strip() if not auth_key_fn: auth_key_fn = "%h/.ssh/authorized_keys" @@ -240,7 +238,6 @@ def extract_authorized_keys(username, paths): auth_key_fn = auth_key_fn.replace("%%", '%') if not auth_key_fn.startswith('/'): auth_key_fn = os.path.join(pw_ent.pw_dir, auth_key_fn) - auth_key_fn = paths.join(False, auth_key_fn) except (IOError, OSError): # Give up and use a default key filename auth_key_fn = os.path.join(ssh_dir, 'authorized_keys') @@ -248,14 +245,13 @@ def extract_authorized_keys(username, paths): " in ssh config" " from %r, using 'AuthorizedKeysFile' file" " %r instead"), - sshd_conf_fn, auth_key_fn) - auth_key_entries = parse_authorized_keys(auth_key_fn) - return (auth_key_fn, auth_key_entries) + DEF_SSHD_CFG, auth_key_fn) + return (auth_key_fn, parse_authorized_keys(auth_key_fn)) -def setup_user_keys(keys, username, key_prefix, paths): +def setup_user_keys(keys, username, key_prefix): # Make sure the users .ssh dir is setup accordingly - (ssh_dir, pwent) = users_ssh_info(username, paths) + (ssh_dir, pwent) = users_ssh_info(username) if not os.path.isdir(ssh_dir): util.ensure_dir(ssh_dir, mode=0700) util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid) @@ -267,7 +263,7 @@ def setup_user_keys(keys, username, key_prefix, paths): key_entries.append(parser.parse(str(k), def_opt=key_prefix)) # Extract the old and make the new - (auth_key_fn, auth_key_entries) = extract_authorized_keys(username, paths) + (auth_key_fn, auth_key_entries) = extract_authorized_keys(username) with util.SeLinuxGuard(ssh_dir, recursive=True): content = update_authorized_keys(auth_key_entries, key_entries) util.ensure_dir(os.path.dirname(auth_key_fn), mode=0700) diff --git a/pylintrc b/pylintrc new file mode 100644 index 00000000..ee886510 --- /dev/null +++ b/pylintrc @@ -0,0 +1,19 @@ +[General] +init-hook='import sys; sys.path.append("tests/")' + +[MESSAGES CONTROL] +# See: http://pylint-messages.wikidot.com/all-codes +# W0142: *args and **kwargs are fine. +# W0511: TODOs in code comments are fine. +# W0702: No exception type(s) specified +# W0703: Catch "Exception" +# C0103: Invalid name +# C0111: Missing docstring +disable=W0142,W0511,W0702,W0703,C0103,C0111 + +[REPORTS] +reports=no +include-ids=yes + +[FORMAT] +max-line-length=79 diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/__init__.py b/tests/unittests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_datasource/__init__.py b/tests/unittests/test_datasource/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_distros/__init__.py b/tests/unittests/test_distros/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_filters/__init__.py b/tests/unittests/test_filters/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py index 1e9b9053..773bb312 100644 --- a/tests/unittests/test_filters/test_launch_index.py +++ b/tests/unittests/test_filters/test_launch_index.py @@ -1,14 +1,6 @@ import copy -import os -import sys -top_dir = os.path.join(os.path.dirname(__file__), os.pardir, "helpers.py") -top_dir = os.path.abspath(top_dir) -if os.path.exists(top_dir): - sys.path.insert(0, os.path.dirname(top_dir)) - - -import helpers +from tests.unittests import helpers import itertools diff --git a/tests/unittests/test_handler/__init__.py b/tests/unittests/test_handler/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py index d3df5c50..d73c9fa9 100644 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ b/tests/unittests/test_handler/test_handler_ca_certs.py @@ -77,7 +77,7 @@ class TestConfig(MockerTestCase): """Test that a single cert gets passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1"]}} - self.mock_add(self.paths, ["CERT1"]) + self.mock_add(["CERT1"]) self.mock_update() self.mocker.replay() @@ -87,7 +87,7 @@ class TestConfig(MockerTestCase): """Test that multiple certs get passed to add_ca_certs.""" config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}} - self.mock_add(self.paths, ["CERT1", "CERT2"]) + self.mock_add(["CERT1", "CERT2"]) self.mock_update() self.mocker.replay() @@ -97,7 +97,7 @@ class TestConfig(MockerTestCase): """Test remove_defaults works as expected.""" config = {"ca-certs": {"remove-defaults": True}} - self.mock_remove(self.paths) + self.mock_remove() self.mock_update() self.mocker.replay() @@ -116,8 +116,8 @@ class TestConfig(MockerTestCase): """Test remove_defaults is not called when config value is False.""" config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}} - self.mock_remove(self.paths) - self.mock_add(self.paths, ["CERT1"]) + self.mock_remove() + self.mock_add(["CERT1"]) self.mock_update() self.mocker.replay() @@ -136,7 +136,7 @@ class TestAddCaCerts(MockerTestCase): """Test that no certificate are written if not provided.""" self.mocker.replace(util.write_file, passthrough=False) self.mocker.replay() - cc_ca_certs.add_ca_certs(self.paths, []) + cc_ca_certs.add_ca_certs([]) def test_single_cert(self): """Test adding a single certificate to the trusted CAs.""" @@ -149,7 +149,7 @@ class TestAddCaCerts(MockerTestCase): "\ncloud-init-ca-certs.crt", omode="ab") self.mocker.replay() - cc_ca_certs.add_ca_certs(self.paths, [cert]) + cc_ca_certs.add_ca_certs([cert]) def test_multiple_certs(self): """Test adding multiple certificates to the trusted CAs.""" @@ -163,7 +163,7 @@ class TestAddCaCerts(MockerTestCase): "\ncloud-init-ca-certs.crt", omode="ab") self.mocker.replay() - cc_ca_certs.add_ca_certs(self.paths, certs) + cc_ca_certs.add_ca_certs(certs) class TestUpdateCaCerts(MockerTestCase): @@ -198,4 +198,4 @@ class TestRemoveDefaultCaCerts(MockerTestCase): "ca-certificates ca-certificates/trust_new_crts select no") self.mocker.replay() - cc_ca_certs.remove_default_ca_certs(self.paths) + cc_ca_certs.remove_default_ca_certs() diff --git a/tests/unittests/test_runs/__init__.py b/tests/unittests/test_runs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index 1e852e1e..22d6cf2c 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -1,14 +1,6 @@ import os -import sys -# Allow running this test individually -top_dir = os.path.join(os.path.dirname(__file__), os.pardir, "helpers.py") -top_dir = os.path.abspath(top_dir) -if os.path.exists(top_dir): - sys.path.insert(0, os.path.dirname(top_dir)) - - -import helpers +from tests.unittests import helpers from cloudinit.settings import (PER_INSTANCE) from cloudinit import stages diff --git a/tools/run-pylint b/tools/run-pylint index 7ef44ac5..b74efda9 100755 --- a/tools/run-pylint +++ b/tools/run-pylint @@ -6,23 +6,16 @@ else files=( "$@" ); fi +RC_FILE="pylintrc" +if [ ! -f $RC_FILE ]; then + RC_FILE="../pylintrc" +fi + cmd=( pylint - --reports=n - --include-ids=y - --max-line-length=79 - + --rcfile=$RC_FILE --disable=R --disable=I - - --disable=W0142 # Used * or ** magic - --disable=W0511 # TODO/FIXME note - --disable=W0702 # No exception type(s) specified - --disable=W0703 # Catch "Exception" - - --disable=C0103 # Invalid name - --disable=C0111 # Missing docstring - "${files[@]}" ) -- cgit v1.2.3 From c508fa0c4159f93df077a2d46ed481fd5d8c8803 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 6 Nov 2012 15:28:29 -0500 Subject: tools/Z99-cloud-locale-test.sh: avoid warning when shell is zsh LP: #1073077 --- ChangeLog | 2 ++ tools/Z99-cloud-locale-test.sh | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index d03c0293..1243ce7f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -6,6 +6,8 @@ - cc_landscape: restart landscape after install or config (LP: #1070345) - multipart/archive. do not fail on unknown headers in multipart mime or cloud-archive config (LP: #1065116). + - tools/Z99-cloud-locale-test.sh: avoid warning when user's shell is + zsh (LP: #1073077) 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. diff --git a/tools/Z99-cloud-locale-test.sh b/tools/Z99-cloud-locale-test.sh index 8ad485e8..4012beeb 100755 --- a/tools/Z99-cloud-locale-test.sh +++ b/tools/Z99-cloud-locale-test.sh @@ -22,7 +22,7 @@ locale_warn() { case "$w1" in locale:) bad_names="${bad_names} ${w4}";; *) - key=${w1%%=*} + key=${w1%%\=*} val=${w1#*=} val=${val#\"} val=${val%\"} @@ -31,7 +31,7 @@ locale_warn() { done for bad in $bad_names; do for var in ${vars}; do - [ "${bad}" = "${var%=*}" ] || continue + [ "${bad}" = "${var%\=*}" ] || continue value=${var#*=} [ "${bad_lcs#* ${value}}" = "${bad_lcs}" ] && bad_lcs="${bad_lcs} ${value}" -- cgit v1.2.3 From 7d027a031a3649ac04965a09cd26563ac9d760fd Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 6 Nov 2012 14:13:30 -0800 Subject: Fix the case where a unknown type is seen and it has contents which are in unicode which seems to cause python to blow-up when this happens since 'string-escape' doesn't work on unicode (at least in 2.6). LP: #1075756 --- cloudinit/handlers/__init__.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 99caed1f..e843eb75 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -171,7 +171,11 @@ def walker_callback(pdata, ctype, filename, payload): elif payload: # Extract the first line or 24 bytes for displaying in the log start = _extract_first_or_bytes(payload, 24) - details = "'%s...'" % (start.encode("string-escape")) + try: + details = "'%s...'" % (start.encode("string-escape")) + except TypeError: + # Unicode doesn't support string-escape... + details = "'%s...'" % (start) if ctype == NOT_MULTIPART_TYPE: LOG.warning("Unhandled non-multipart (%s) userdata: %s", ctype, details) -- cgit v1.2.3 From 7ec0ef04b975eb5b4c40f7ae746d706585c73a02 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 6 Nov 2012 14:24:19 -0800 Subject: Use a method instead + at least attempt the unicode-escape path. --- cloudinit/handlers/__init__.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index e843eb75..d847f331 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -160,6 +160,19 @@ def _extract_first_or_bytes(blob, size): return start +def _escape_string(text): + try: + text = "'%s...'" % (text.encode("string-escape")) + except TypeError: + try: + # Unicode doesn't support string-escape... + text = "'%s...'" % (text.encode('unicode-escape')) + except TypeError: + # Give up... + text = "'%s...'" % (text) + return text + + def walker_callback(pdata, ctype, filename, payload): if ctype in PART_CONTENT_TYPES: walker_handle_handler(pdata, ctype, filename, payload) @@ -171,11 +184,7 @@ def walker_callback(pdata, ctype, filename, payload): elif payload: # Extract the first line or 24 bytes for displaying in the log start = _extract_first_or_bytes(payload, 24) - try: - details = "'%s...'" % (start.encode("string-escape")) - except TypeError: - # Unicode doesn't support string-escape... - details = "'%s...'" % (start) + details = _escape_string(start) if ctype == NOT_MULTIPART_TYPE: LOG.warning("Unhandled non-multipart (%s) userdata: %s", ctype, details) -- cgit v1.2.3 From 9850895442afe55079cecf4fd96fe8430ed960ea Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 6 Nov 2012 14:27:56 -0800 Subject: Do the append after escape. --- cloudinit/handlers/__init__.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index d847f331..8d6dcd4d 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -162,14 +162,14 @@ def _extract_first_or_bytes(blob, size): def _escape_string(text): try: - text = "'%s...'" % (text.encode("string-escape")) + return text.encode("string-escape") except TypeError: try: # Unicode doesn't support string-escape... - text = "'%s...'" % (text.encode('unicode-escape')) + return text.encode('unicode-escape') except TypeError: # Give up... - text = "'%s...'" % (text) + pass return text @@ -184,7 +184,7 @@ def walker_callback(pdata, ctype, filename, payload): elif payload: # Extract the first line or 24 bytes for displaying in the log start = _extract_first_or_bytes(payload, 24) - details = _escape_string(start) + details = "'%s...'" % (_escape_string(start)) if ctype == NOT_MULTIPART_TYPE: LOG.warning("Unhandled non-multipart (%s) userdata: %s", ctype, details) -- cgit v1.2.3 From 5ffd19a0fbc473e34ac78f80d264c08ac0125e2b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 7 Nov 2012 09:43:25 -0500 Subject: fix pep8 warnings --- cloudinit/distros/__init__.py | 2 +- tests/unittests/test_datasource/test_configdrive.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 21efe8d9..c848b909 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -439,7 +439,7 @@ def _normalize_groups(grp_cfg): # configuration. # # The output is a dictionary of user -# names => user config which is the standard +# names => user config which is the standard # form used in the rest of cloud-init. Note # the default user will have a special config # entry 'default' which will be marked as true diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 4fa13db8..00379e03 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -6,11 +6,10 @@ import os.path import mocker from mocker import MockerTestCase -from cloudinit.sources import DataSourceConfigDrive as ds +from cloudinit import helpers from cloudinit import settings +from cloudinit.sources import DataSourceConfigDrive as ds from cloudinit import util -from cloudinit import helpers - PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' -- cgit v1.2.3 From df3300e23324f2bb4d0d21a433b03708dc2feaeb Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 7 Nov 2012 10:07:03 -0500 Subject: update config to address name change --- config/cloud.cfg | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/config/cloud.cfg b/config/cloud.cfg index b3411d11..51a05821 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -43,7 +43,8 @@ cloud_config_modules: - set-passwords - grub-dpkg - apt-pipelining - - apt-update-upgrade + - apt-configure + - package-update-upgrade-install - landscape - timezone - puppet -- cgit v1.2.3 From 0da58fe113726c7654bca54b365d95044b44ef87 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 7 Nov 2012 10:17:50 -0500 Subject: add ChangeLog entry --- ChangeLog | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ChangeLog b/ChangeLog index 4287d58f..2896f71a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -3,6 +3,9 @@ - config-drive: map hostname to local-hostname (LP: #1061964) - landscape: install landscape-client package if not installed. only take action if cloud-config is present (LP: #1066115) + - split 'apt-update-upgrade' config module into 'apt-configure' and + 'package-update-upgrade-install'. The 'package-update-upgrade-install' + will be a cross distro module. 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. -- cgit v1.2.3 From 180620470ba9aae4aac804b8bd66d3af8bd71ee4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 7 Nov 2012 10:29:20 -0500 Subject: adjust documentation to account for apt/package aliases --- doc/examples/cloud-config.txt | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index 56a6c35a..04bb5df1 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -3,15 +3,20 @@ # (ie run apt-get update) # # Default: true -# -apt_update: false +# Aliases: apt_update +package_update: false # Upgrade the instance on first boot # (ie run apt-get upgrade) # # Default: false -# -apt_upgrade: true +# Aliases: apt_upgrade +package_upgrade: true + +# Reboot after package install/update if necessary +# Default: false +# Aliases: apt_reboot_if_required +package_reboot_if_required: true # Add apt repositories # -- cgit v1.2.3 From 8b677bc80a39a0eb1a3cc7989c6bffb959fc7a69 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 7 Nov 2012 10:35:39 -0500 Subject: trivial: -name first is faster due to no need for stat --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 8f5646b7..88c90b9b 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ CWD=$(shell pwd) -PY_FILES=$(shell find cloudinit bin tests tools -type f -name "*.py") +PY_FILES=$(shell find cloudinit bin tests tools -name "*.py" -type f ) PY_FILES+="bin/cloud-init" all: test -- cgit v1.2.3 From 1e6fc277a1c8d695c37741cc31f5ddab3d5b5600 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 7 Nov 2012 16:08:17 -0500 Subject: remove dead code from DataSourceEc2 --- cloudinit/sources/DataSourceEc2.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 3686fa10..cff50669 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -86,9 +86,6 @@ class DataSourceEc2(sources.DataSource): def get_instance_id(self): return self.metadata['instance-id'] - def get_availability_zone(self): - return self.metadata['placement']['availability-zone'] - def _get_url_settings(self): mcfg = self.ds_cfg if not mcfg: @@ -198,19 +195,6 @@ class DataSourceEc2(sources.DataSource): return None return ofound - def is_vpc(self): - # See: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/615545 - # Detect that the machine was launched in a VPC. - # But I did notice that when in a VPC, meta-data - # does not have public-ipv4 and public-hostname - # listed as a possibility. - ph = "public-hostname" - p4 = "public-ipv4" - if ((ph not in self.metadata or self.metadata[ph] == "") and - (p4 not in self.metadata or self.metadata[p4] == "")): - return True - return False - @property def availability_zone(self): try: -- cgit v1.2.3 From 7ea02ab1d8ee0f400a84ee2d688e67ffbc449bf0 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 7 Nov 2012 21:00:33 -0800 Subject: Add a makefile yaml checking target and fix the cases where the cc yaml is not correct. --- Makefile | 9 ++++++++- doc/examples/cloud-config.txt | 3 +-- tests/configs/sample1.yaml | 1 - tools/validate-yaml.py | 20 ++++++++++++++++++++ 4 files changed, 29 insertions(+), 4 deletions(-) create mode 100755 tools/validate-yaml.py diff --git a/Makefile b/Makefile index 88c90b9b..2a6be961 100644 --- a/Makefile +++ b/Makefile @@ -2,6 +2,10 @@ CWD=$(shell pwd) PY_FILES=$(shell find cloudinit bin tests tools -name "*.py" -type f ) PY_FILES+="bin/cloud-init" +YAML_FILES=$(shell find cloudinit bin tests tools -name "*.yaml" -type f ) +YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f ) + + all: test pep8: @@ -23,11 +27,14 @@ clean: rm -rf /var/log/cloud-init.log \ /var/lib/cloud/ +yaml: + @$(CWD)/tools/validate-yaml.py $(YAML_FILES) + rpm: ./packages/brpm deb: ./packages/bddeb -.PHONY: test pylint pyflakes 2to3 clean pep8 rpm deb +.PHONY: test pylint pyflakes 2to3 clean pep8 rpm deb yaml diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index 04bb5df1..12bf2c91 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -355,8 +355,7 @@ rsyslog: - ':syslogtag, isequal, "[CLOUDINIT]" /var/log/cloud-foo.log' - content: "*.* @@192.0.2.1:10514" - filename: 01-examplecom.conf - content: | - *.* @@syslogd.example.com + content: "*.* @@syslogd.example.com" # resize_rootfs should the / filesytem be resized on first boot # this allows you to launch an instance with a larger disk / partition diff --git a/tests/configs/sample1.yaml b/tests/configs/sample1.yaml index 24e874ee..6231f293 100644 --- a/tests/configs/sample1.yaml +++ b/tests/configs/sample1.yaml @@ -50,4 +50,3 @@ runcmd: byobu_by_default: user -output: {all: '| tee -a /var/log/cloud-init-output.log'} diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py new file mode 100755 index 00000000..d3218e40 --- /dev/null +++ b/tools/validate-yaml.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python + +"""Try to read a YAML file and report any errors. +""" + +import sys + +import yaml + + +if __name__ == "__main__": + for fn in sys.argv[1:]: + sys.stdout.write("%s" % (fn)) + try: + fh = open(fn, 'r') + yaml.safe_load(fh.read()) + fh.close() + sys.stdout.write(" - ok\n") + except Exception, e: + sys.stdout.write(" - bad (%s)\n" % (e)) -- cgit v1.2.3 From 791598f2929a5b8b6bb380f7f16ec568db96aba6 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 7 Nov 2012 21:34:41 -0800 Subject: Start adding a 'migrator' module that can be used to aid in the moving of older versions of cloud-inits data to newer versions of cloud-inits data. 1. Move the semaphores for the current instance to there canonicalized names and use the canonicalized in the file 'locking' code --- cloudinit/config/cc_migrator.py | 53 +++++++++++++++++++++++++++++++++++++++++ cloudinit/helpers.py | 9 ++++++- config/cloud.cfg | 1 + 3 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 cloudinit/config/cc_migrator.py diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py new file mode 100644 index 00000000..b71d1d17 --- /dev/null +++ b/cloudinit/config/cc_migrator.py @@ -0,0 +1,53 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Joshua Harlow +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os +import shutil + +from cloudinit import helpers +from cloudinit import util + +from cloudinit.settings import PER_ALWAYS + +frequency = PER_ALWAYS + + +def _migrate_canon_sems(cloud): + sem_path = cloud.paths.get_ipath('sem') + if not sem_path or not os.path.exists(sem_path): + return 0 + am_adjusted = 0 + for p in os.listdir(sem_path): + full_path = os.path.join(sem_path, p) + if os.path.isfile(full_path): + canon_p = helpers.canon_sem_name(p) + if canon_p != p: + new_path = os.path.join(sem_path, p) + shutil.move(full_path, new_path) + am_adjusted += 1 + return am_adjusted + + +def handle(name, cfg, cloud, log, _args): + do_migrate = util.get_cfg_option_str(cfg, "migrate", True) + if not util.translate_bool(do_migrate): + log.debug("Skipping module named %s, migration disabled", name) + return + sems_moved = _migrate_canon_sems(cloud) + log.debug("Migrated %s semaphore files to there canonicalized names", + sems_moved) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 985ce3e5..d26625a0 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -71,12 +71,17 @@ class FileLock(object): return "<%s using file %r>" % (util.obj_name(self), self.fn) +def canon_sem_name(name): + return name.replace("-", "_") + + class FileSemaphores(object): - def __init__(self, sem_path): + def __init__(self, sem_path): self.sem_path = sem_path @contextlib.contextmanager def lock(self, name, freq, clear_on_fail=False): + name = canon_sem_name(name) try: yield self._acquire(name, freq) except: @@ -85,6 +90,7 @@ class FileSemaphores(object): raise def clear(self, name, freq): + name = canon_sem_name(name) sem_file = self._get_path(name, freq) try: util.del_file(sem_file) @@ -119,6 +125,7 @@ class FileSemaphores(object): def has_run(self, name, freq): if not freq or freq == PER_ALWAYS: return False + name = canon_sem_name(name) sem_file = self._get_path(name, freq) # This isn't really a good atomic check # but it suffices for where and when cloudinit runs diff --git a/config/cloud.cfg b/config/cloud.cfg index 05bb4eef..ad100fff 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -23,6 +23,7 @@ preserve_hostname: false # The modules that run in the 'init' stage cloud_init_modules: + - migrator - bootcmd - write-files - resizefs -- cgit v1.2.3 From 3de3c535f37e40a79b36997a93fa218534117397 Mon Sep 17 00:00:00 2001 From: harlowja Date: Wed, 7 Nov 2012 23:16:21 -0800 Subject: 1. Check the name and not the full path when applying the canon routine. 2. Add in a function to migrate legacy semaphores to new semaphores. --- cloudinit/config/cc_migrator.py | 34 +++++++++++++++++++++++++++++++--- 1 file changed, 31 insertions(+), 3 deletions(-) diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py index b71d1d17..56f33d42 100644 --- a/cloudinit/config/cc_migrator.py +++ b/cloudinit/config/cc_migrator.py @@ -35,14 +35,41 @@ def _migrate_canon_sems(cloud): for p in os.listdir(sem_path): full_path = os.path.join(sem_path, p) if os.path.isfile(full_path): - canon_p = helpers.canon_sem_name(p) - if canon_p != p: - new_path = os.path.join(sem_path, p) + (name, ext) = os.path.splitext(p) + canon_name = helpers.canon_sem_name(name) + if canon_name != name: + new_path = os.path.join(sem_path, canon_name + ext) shutil.move(full_path, new_path) am_adjusted += 1 return am_adjusted +def _migrate_legacy_sems(cloud, log): + sem_path = cloud.paths.get_ipath('sem') + touch_there = { + 'apt-update-upgrade': [ + 'apt-configure', + 'package-update-upgrade-install', + ], + } + sem_helper = helpers.FileSemaphores(sem_path) + for (mod_name, migrate_to) in touch_there.items(): + possibles = [mod_name, helpers.canon_sem_name(mod_name)] + old_exists = [] + for p in os.listdir(sem_path): + (name, _ext) = os.path.splitext(p) + if name in possibles and os.path.isfile(p): + old_exists.append(p) + for p in old_exists: + util.del_file(os.path.join(sem_path, p)) + (_name, freq) = os.path.splitext(p) + for m in migrate_to: + log.debug("Migrating %s => %s with the same frequency", + p, m) + with sem_helper.lock(m, freq): + pass + + def handle(name, cfg, cloud, log, _args): do_migrate = util.get_cfg_option_str(cfg, "migrate", True) if not util.translate_bool(do_migrate): @@ -51,3 +78,4 @@ def handle(name, cfg, cloud, log, _args): sems_moved = _migrate_canon_sems(cloud) log.debug("Migrated %s semaphore files to there canonicalized names", sems_moved) + _migrate_legacy_sems(cloud, log) -- cgit v1.2.3 From 196badd4cfa5f9f76be1138fb2d073649af3e031 Mon Sep 17 00:00:00 2001 From: harlowja Date: Wed, 7 Nov 2012 23:20:40 -0800 Subject: 1. Ensure that the sem_path exists and is actually a valid value returned. 2. Adjust variable naming --- cloudinit/config/cc_migrator.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py index 56f33d42..58232fc9 100644 --- a/cloudinit/config/cc_migrator.py +++ b/cloudinit/config/cc_migrator.py @@ -46,14 +46,16 @@ def _migrate_canon_sems(cloud): def _migrate_legacy_sems(cloud, log): sem_path = cloud.paths.get_ipath('sem') - touch_there = { + if not sem_path or not os.path.exists(sem_path): + return + legacy_adjust = { 'apt-update-upgrade': [ 'apt-configure', 'package-update-upgrade-install', ], } sem_helper = helpers.FileSemaphores(sem_path) - for (mod_name, migrate_to) in touch_there.items(): + for (mod_name, migrate_to) in legacy_adjust.items(): possibles = [mod_name, helpers.canon_sem_name(mod_name)] old_exists = [] for p in os.listdir(sem_path): -- cgit v1.2.3 From ce14139c0f94b99c8c47192620b0a9faf66a96a2 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 8 Nov 2012 09:30:51 -0500 Subject: revert old zsh fix (revno 697) --- tools/Z99-cloud-locale-test.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/Z99-cloud-locale-test.sh b/tools/Z99-cloud-locale-test.sh index 4012beeb..8ad485e8 100755 --- a/tools/Z99-cloud-locale-test.sh +++ b/tools/Z99-cloud-locale-test.sh @@ -22,7 +22,7 @@ locale_warn() { case "$w1" in locale:) bad_names="${bad_names} ${w4}";; *) - key=${w1%%\=*} + key=${w1%%=*} val=${w1#*=} val=${val#\"} val=${val%\"} @@ -31,7 +31,7 @@ locale_warn() { done for bad in $bad_names; do for var in ${vars}; do - [ "${bad}" = "${var%\=*}" ] || continue + [ "${bad}" = "${var%=*}" ] || continue value=${var#*=} [ "${bad_lcs#* ${value}}" = "${bad_lcs}" ] && bad_lcs="${bad_lcs} ${value}" -- cgit v1.2.3 From 380bd3f3417c640191bdf90f6622f8bbbe12b5c7 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 8 Nov 2012 09:31:11 -0500 Subject: remove unused variable 'cr'. fix usage of 'value' to local 'val' --- tools/Z99-cloud-locale-test.sh | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/tools/Z99-cloud-locale-test.sh b/tools/Z99-cloud-locale-test.sh index 8ad485e8..97c92166 100755 --- a/tools/Z99-cloud-locale-test.sh +++ b/tools/Z99-cloud-locale-test.sh @@ -10,9 +10,7 @@ # locale_warn() { - local cr=" -" - local bad_names="" bad_lcs="" key="" value="" var="" + local bad_names="" bad_lcs="" key="" val="" var="" vars="" local w1 w2 w3 w4 remain # locale is expected to output either: # VARIABLE= @@ -32,9 +30,9 @@ locale_warn() { for bad in $bad_names; do for var in ${vars}; do [ "${bad}" = "${var%=*}" ] || continue - value=${var#*=} - [ "${bad_lcs#* ${value}}" = "${bad_lcs}" ] && - bad_lcs="${bad_lcs} ${value}" + val=${var#*=} + [ "${bad_lcs#* ${val}}" = "${bad_lcs}" ] && + bad_lcs="${bad_lcs} ${val}" break done done -- cgit v1.2.3 From 956e94d16ab9c94471d3829424e8bd5183f3a735 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 8 Nov 2012 09:31:26 -0500 Subject: work with zsh by using 'emulate -L sh'. This makes zsh act like 'sh', but only for the function local function. This way, we do not affect the user's shell, but get the behavior we want. --- tools/Z99-cloud-locale-test.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/Z99-cloud-locale-test.sh b/tools/Z99-cloud-locale-test.sh index 97c92166..3c51f22d 100755 --- a/tools/Z99-cloud-locale-test.sh +++ b/tools/Z99-cloud-locale-test.sh @@ -12,6 +12,11 @@ locale_warn() { local bad_names="" bad_lcs="" key="" val="" var="" vars="" local w1 w2 w3 w4 remain + + # if shell is zsh, act like sh only for this function (-L). + # The behavior change will not permenently affect user's shell. + [ "${ZSH_NAME+zsh}" = "zsh" ] && emulate -L sh + # locale is expected to output either: # VARIABLE= # VARIABLE="value" -- cgit v1.2.3 From d8e82fb7bc9da6259b158804ae3d8343050c67aa Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 8 Nov 2012 08:55:04 -0800 Subject: Add non-zero exit code when bad yamls are found instead of returning zero in that case. --- tools/validate-yaml.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py index d3218e40..eda59cb8 100755 --- a/tools/validate-yaml.py +++ b/tools/validate-yaml.py @@ -9,6 +9,7 @@ import yaml if __name__ == "__main__": + bads = 0 for fn in sys.argv[1:]: sys.stdout.write("%s" % (fn)) try: @@ -18,3 +19,8 @@ if __name__ == "__main__": sys.stdout.write(" - ok\n") except Exception, e: sys.stdout.write(" - bad (%s)\n" % (e)) + bads += 1 + if bads > 0: + sys.exit(1) + else: + sys.exit(0) -- cgit v1.2.3 From 154ad87b29344ea4d29d92f8559f61bb6efe6530 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 8 Nov 2012 16:30:57 -0800 Subject: Ensure that at needed stages the local variables of the init class are reset so that when they are regenerated that they will use the updated data instead of using previous data (since they weren't reset). LP: #1076811 --- cloudinit/stages.py | 31 +++++++++++++-- tests/data/user_data.1.txt | 15 +++++++ tests/unittests/test_merging.py | 62 +++++++++++++++++++++++++++++ tests/unittests/test_runs/test_merge_run.py | 52 ++++++++++++++++++++++++ tests/unittests/test_util.py | 59 --------------------------- tools/run-pep8 | 13 +++++- 6 files changed, 169 insertions(+), 63 deletions(-) create mode 100644 tests/data/user_data.1.txt create mode 100644 tests/unittests/test_merging.py create mode 100644 tests/unittests/test_runs/test_merge_run.py diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 4ed1a750..e0cf1cbe 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -36,6 +36,8 @@ from cloudinit.handlers import cloud_config as cc_part from cloudinit.handlers import shell_script as ss_part from cloudinit.handlers import upstart_job as up_part +from cloudinit.sources import DataSourceNone + from cloudinit import cloud from cloudinit import config from cloudinit import distros @@ -58,8 +60,16 @@ class Init(object): self._cfg = None self._paths = None self._distro = None - # Created only when a fetch occurs - self.datasource = None + # Changed only when a fetch occurs + self.datasource = DataSourceNone.DataSourceNone({}, None, None) + + def _reset(self, ds=False): + # Recreated on access + self._cfg = None + self._paths = None + self._distro = None + if ds: + self.datasource = DataSourceNone.DataSourceNone({}, None, None) @property def distro(self): @@ -236,7 +246,7 @@ class Init(object): self.datasource = ds # Ensure we adjust our path members datasource # now that we have one (thus allowing ipath to be used) - self.paths.datasource = ds + self._reset() return ds def _get_instance_subdirs(self): @@ -296,6 +306,10 @@ class Init(object): util.write_file(iid_fn, "%s\n" % iid) util.write_file(os.path.join(dp, 'previous-instance-id'), "%s\n" % (previous_iid)) + # Ensure needed components are regenerated + # after change of instance which may cause + # change of configuration + self._reset() return iid def fetch(self): @@ -409,6 +423,17 @@ class Init(object): handlers.call_end(mod, data, frequency) called.append(mod) + # Perform post-consumption adjustments so that + # modules that run during the init stage reflect + # this consumed set. + # + # They will be recreated on future access... + self._reset() + # Note(harlowja): the 'active' datasource will have + # references to the previous config, distro, paths + # objects before the load of the userdata happened, + # this is expected. + class Modules(object): def __init__(self, init, cfg_files=None): diff --git a/tests/data/user_data.1.txt b/tests/data/user_data.1.txt new file mode 100644 index 00000000..4c4543de --- /dev/null +++ b/tests/data/user_data.1.txt @@ -0,0 +1,15 @@ +#cloud-config +write_files: +- content: blah + path: /etc/blah.ini + permissions: 493 + +system_info: + package_mirrors: + - arches: [i386, amd64, blah] + failsafe: + primary: http://my.archive.mydomain.com/ubuntu + security: http://my.security.mydomain.com/ubuntu + search: + primary: [] + security: [] diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py new file mode 100644 index 00000000..0037b966 --- /dev/null +++ b/tests/unittests/test_merging.py @@ -0,0 +1,62 @@ +from mocker import MockerTestCase + +from cloudinit import util + + +class TestMergeDict(MockerTestCase): + def test_simple_merge(self): + """Test simple non-conflict merge.""" + source = {"key1": "value1"} + candidate = {"key2": "value2"} + result = util.mergedict(source, candidate) + self.assertEqual({"key1": "value1", "key2": "value2"}, result) + + def test_nested_merge(self): + """Test nested merge.""" + source = {"key1": {"key1.1": "value1.1"}} + candidate = {"key1": {"key1.2": "value1.2"}} + result = util.mergedict(source, candidate) + self.assertEqual( + {"key1": {"key1.1": "value1.1", "key1.2": "value1.2"}}, result) + + def test_merge_does_not_override(self): + """Test that candidate doesn't override source.""" + source = {"key1": "value1", "key2": "value2"} + candidate = {"key1": "value2", "key2": "NEW VALUE"} + result = util.mergedict(source, candidate) + self.assertEqual(source, result) + + def test_empty_candidate(self): + """Test empty candidate doesn't change source.""" + source = {"key": "value"} + candidate = {} + result = util.mergedict(source, candidate) + self.assertEqual(source, result) + + def test_empty_source(self): + """Test empty source is replaced by candidate.""" + source = {} + candidate = {"key": "value"} + result = util.mergedict(source, candidate) + self.assertEqual(candidate, result) + + def test_non_dict_candidate(self): + """Test non-dict candidate is discarded.""" + source = {"key": "value"} + candidate = "not a dict" + result = util.mergedict(source, candidate) + self.assertEqual(source, result) + + def test_non_dict_source(self): + """Test non-dict source is not modified with a dict candidate.""" + source = "not a dict" + candidate = {"key": "value"} + result = util.mergedict(source, candidate) + self.assertEqual(source, result) + + def test_neither_dict(self): + """Test if neither candidate or source is dict source wins.""" + source = "source" + candidate = "candidate" + result = util.mergedict(source, candidate) + self.assertEqual(source, result) diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py new file mode 100644 index 00000000..04c03730 --- /dev/null +++ b/tests/unittests/test_runs/test_merge_run.py @@ -0,0 +1,52 @@ +import os + +from tests.unittests import helpers + +from cloudinit.settings import (PER_INSTANCE) +from cloudinit import stages +from cloudinit import util + + +class TestSimpleRun(helpers.FilesystemMockingTestCase): + def _patchIn(self, root): + self.restore() + self.patchOS(root) + self.patchUtils(root) + + def test_none_ds(self): + new_root = self.makeDir() + self.replicateTestRoot('simple_ubuntu', new_root) + cfg = { + 'datasource_list': ['None'], + 'cloud_init_modules': ['write-files'], + } + ud = self.readResource('user_data.1.txt') + cloud_cfg = util.yaml_dumps(cfg) + util.ensure_dir(os.path.join(new_root, 'etc', 'cloud')) + util.write_file(os.path.join(new_root, 'etc', + 'cloud', 'cloud.cfg'), cloud_cfg) + self._patchIn(new_root) + + # Now start verifying whats created + initer = stages.Init() + initer.read_cfg() + initer.initialize() + initer.fetch() + initer.datasource.userdata_raw = ud + iid = initer.instancify() + initer.update() + initer.cloudify().run('consume_userdata', + initer.consume_userdata, + args=[PER_INSTANCE], + freq=PER_INSTANCE) + mirrors = initer.distro.get_option('package_mirrors') + self.assertEquals(1, len(mirrors)) + mirror = mirrors[0] + self.assertEquals(mirror['arches'], ['i386', 'amd64', 'blah']) + mods = stages.Modules(initer) + (which_ran, failures) = mods.run_section('cloud_init_modules') + self.assertTrue(len(failures) == 0) + self.assertTrue(os.path.exists('/etc/blah.ini')) + self.assertIn('write-files', which_ran) + contents = util.load_file('/etc/blah.ini') + self.assertEquals(contents, 'blah') diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 96962b91..02611581 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -28,65 +28,6 @@ class FakeSelinux(object): self.restored.append(path) -class TestMergeDict(MockerTestCase): - def test_simple_merge(self): - """Test simple non-conflict merge.""" - source = {"key1": "value1"} - candidate = {"key2": "value2"} - result = util.mergedict(source, candidate) - self.assertEqual({"key1": "value1", "key2": "value2"}, result) - - def test_nested_merge(self): - """Test nested merge.""" - source = {"key1": {"key1.1": "value1.1"}} - candidate = {"key1": {"key1.2": "value1.2"}} - result = util.mergedict(source, candidate) - self.assertEqual( - {"key1": {"key1.1": "value1.1", "key1.2": "value1.2"}}, result) - - def test_merge_does_not_override(self): - """Test that candidate doesn't override source.""" - source = {"key1": "value1", "key2": "value2"} - candidate = {"key1": "value2", "key2": "NEW VALUE"} - result = util.mergedict(source, candidate) - self.assertEqual(source, result) - - def test_empty_candidate(self): - """Test empty candidate doesn't change source.""" - source = {"key": "value"} - candidate = {} - result = util.mergedict(source, candidate) - self.assertEqual(source, result) - - def test_empty_source(self): - """Test empty source is replaced by candidate.""" - source = {} - candidate = {"key": "value"} - result = util.mergedict(source, candidate) - self.assertEqual(candidate, result) - - def test_non_dict_candidate(self): - """Test non-dict candidate is discarded.""" - source = {"key": "value"} - candidate = "not a dict" - result = util.mergedict(source, candidate) - self.assertEqual(source, result) - - def test_non_dict_source(self): - """Test non-dict source is not modified with a dict candidate.""" - source = "not a dict" - candidate = {"key": "value"} - result = util.mergedict(source, candidate) - self.assertEqual(source, result) - - def test_neither_dict(self): - """Test if neither candidate or source is dict source wins.""" - source = "source" - candidate = "candidate" - result = util.mergedict(source, candidate) - self.assertEqual(source, result) - - class TestGetCfgOptionListOrStr(TestCase): def test_not_found_no_default(self): """None is returned if key is not found and no default given.""" diff --git a/tools/run-pep8 b/tools/run-pep8 index ad55d420..1dfa92c3 100755 --- a/tools/run-pep8 +++ b/tools/run-pep8 @@ -21,10 +21,21 @@ else base=`pwd`/tools/ fi +IGNORE="E501" # Line too long (these are caught by pylint) + +# King Arthur: Be quiet! ... Be Quiet! I Order You to Be Quiet. +IGNORE="$IGNORE,E121" # Continuation line indentation is not a multiple of four +IGNORE="$IGNORE,E123" # Closing bracket does not match indentation of opening bracket's line +IGNORE="$IGNORE,E124" # Closing bracket missing visual indentation +IGNORE="$IGNORE,E125" # Continuation line does not distinguish itself from next logical line +IGNORE="$IGNORE,E126" # Continuation line over-indented for hanging indent +IGNORE="$IGNORE,E127" # Continuation line over-indented for visual indent +IGNORE="$IGNORE,E128" # Continuation line under-indented for visual indent + cmd=( ${base}/hacking.py - --ignore=E501 # Line too long (these are caught by pylint) + --ignore="$IGNORE" "${files[@]}" ) -- cgit v1.2.3 From b80c2401123e16b9038ff3fb6f6d660717ee68e1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 8 Nov 2012 17:37:16 -0800 Subject: Fix the case where on a redhat based system the fully qualified domain name should end up in /etc/sysconfig/network by passing the fqdn to the update and set hostname methods and using it accordingly. LP: #1076759 --- cloudinit/config/cc_set_hostname.py | 10 ++++++---- cloudinit/config/cc_update_hostname.py | 8 +++++--- cloudinit/distros/__init__.py | 4 ++-- cloudinit/distros/debian.py | 4 ++-- cloudinit/distros/rhel.py | 25 +++++++++++++++++-------- 5 files changed, 32 insertions(+), 19 deletions(-) diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index b0f27ebf..2b32fc94 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -27,9 +27,11 @@ def handle(name, cfg, cloud, log, _args): " not setting the hostname in module %s"), name) return - (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud) + (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) try: - log.debug("Setting hostname to %s", hostname) - cloud.distro.set_hostname(hostname) + log.debug("Setting the hostname to %s (%s)", fqdn, hostname) + cloud.distro.set_hostname(hostname, fqdn) except Exception: - util.logexc(log, "Failed to set hostname to %s", hostname) + util.logexc(log, "Failed to set the hostname to %s (%s)", + fqdn, hostname) + raise diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index 1d6679ea..52225cd8 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -32,10 +32,12 @@ def handle(name, cfg, cloud, log, _args): " not updating the hostname in module %s"), name) return - (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud) + (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) try: prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname") - cloud.distro.update_hostname(hostname, prev_fn) + log.debug("Updating hostname to %s (%s)", fqdn, hostname) + cloud.distro.update_hostname(hostname, fqdn, prev_fn) except Exception: - util.logexc(log, "Failed to set the hostname to %s", hostname) + util.logexc(log, "Failed to update the hostname to %s (%s)", + fqdn, hostname) raise diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 869540d2..bd04ba79 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -58,11 +58,11 @@ class Distro(object): return self._cfg.get(opt_name, default) @abc.abstractmethod - def set_hostname(self, hostname): + def set_hostname(self, hostname, fqdn=None): raise NotImplementedError() @abc.abstractmethod - def update_hostname(self, hostname, prev_hostname_fn): + def update_hostname(self, hostname, fqdn, prev_hostname_fn): raise NotImplementedError() @abc.abstractmethod diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index cc7e53a0..ed4070b4 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -67,7 +67,7 @@ class Distro(distros.Distro): else: return distros.Distro._bring_up_interfaces(self, device_names) - def set_hostname(self, hostname): + def set_hostname(self, hostname, fqdn=None): self._write_hostname(hostname, "/etc/hostname") LOG.debug("Setting hostname to %s", hostname) util.subp(['hostname', hostname]) @@ -76,7 +76,7 @@ class Distro(distros.Distro): # "" gives trailing newline. util.write_file(out_fn, "%s\n" % str(hostname), 0644) - def update_hostname(self, hostname, prev_fn): + def update_hostname(self, hostname, fqdn, prev_fn): hostname_prev = self._read_hostname(prev_fn) hostname_in_etc = self._read_hostname("/etc/hostname") update_files = [] diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index bf3c18d2..e4c27216 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -146,8 +146,13 @@ class Distro(distros.Distro): lines.insert(0, _make_header()) util.write_file(fn, "\n".join(lines), 0644) - def set_hostname(self, hostname): - self._write_hostname(hostname, '/etc/sysconfig/network') + def set_hostname(self, hostname, fqdn=None): + # See: http://bit.ly/TwitgL + # Should be fqdn if we can use it + sysconfig_hostname = fqdn + if not sysconfig_hostname: + sysconfig_hostname = hostname + self._write_hostname(sysconfig_hostname, '/etc/sysconfig/network') LOG.debug("Setting hostname to %s", hostname) util.subp(['hostname', hostname]) @@ -165,28 +170,32 @@ class Distro(distros.Distro): } self._update_sysconfig_file(out_fn, host_cfg) - def update_hostname(self, hostname, prev_file): + def update_hostname(self, hostname, fqdn, prev_file): + # See: http://bit.ly/TwitgL + # Should be fqdn if we can use it + sysconfig_hostname = fqdn + if not sysconfig_hostname: + sysconfig_hostname = hostname hostname_prev = self._read_hostname(prev_file) hostname_in_sys = self._read_hostname("/etc/sysconfig/network") update_files = [] - if not hostname_prev or hostname_prev != hostname: + if not hostname_prev or hostname_prev != sysconfig_hostname: update_files.append(prev_file) if (not hostname_in_sys or (hostname_in_sys == hostname_prev - and hostname_in_sys != hostname)): + and hostname_in_sys != sysconfig_hostname)): update_files.append("/etc/sysconfig/network") for fn in update_files: try: - self._write_hostname(hostname, fn) + self._write_hostname(sysconfig_hostname, fn) except: util.logexc(LOG, "Failed to write hostname %s to %s", - hostname, fn) + sysconfig_hostname, fn) if (hostname_in_sys and hostname_prev and hostname_in_sys != hostname_prev): LOG.debug(("%s differs from /etc/sysconfig/network." " Assuming user maintained hostname."), prev_file) if "/etc/sysconfig/network" in update_files: - # Only do this if we are running in non-adjusted root mode LOG.debug("Setting hostname to %s", hostname) util.subp(['hostname', hostname]) -- cgit v1.2.3 From bd01f3466e10ca515a8e8aec42d00201f40cbd53 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 8 Nov 2012 17:41:36 -0800 Subject: Forgot the test! --- .../test_handler/test_handler_set_hostname.py | 58 ++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 tests/unittests/test_handler/test_handler_set_hostname.py diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py new file mode 100644 index 00000000..a1aba62f --- /dev/null +++ b/tests/unittests/test_handler/test_handler_set_hostname.py @@ -0,0 +1,58 @@ +from cloudinit.config import cc_set_hostname + +from cloudinit import cloud +from cloudinit import distros +from cloudinit import helpers +from cloudinit import util + +from tests.unittests import helpers as t_help + +import logging + +from StringIO import StringIO + +from configobj import ConfigObj + +LOG = logging.getLogger(__name__) + + +class TestHostname(t_help.FilesystemMockingTestCase): + def setUp(self): + super(TestHostname, self).setUp() + self.tmp = self.makeDir(prefix="unittest_") + + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def test_write_hostname_rhel(self): + cfg = { + 'hostname': 'blah.blah.blah.yahoo.com', + } + distro = self._fetch_distro('rhel') + paths = helpers.Paths({}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + self.patchOS(self.tmp) + cc_set_hostname.handle('cc_set_hostname', + cfg, cc, LOG, []) + contents = util.load_file("/etc/sysconfig/network") + n_cfg = ConfigObj(StringIO(contents)) + self.assertEquals({'HOSTNAME': 'blah.blah.blah.yahoo.com'}, + dict(n_cfg)) + + def test_write_hostname_debian(self): + cfg = { + 'hostname': 'blah.blah.blah.yahoo.com', + } + distro = self._fetch_distro('debian') + paths = helpers.Paths({}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_set_hostname.handle('cc_set_hostname', + cfg, cc, LOG, []) + contents = util.load_file("/etc/hostname") + self.assertEquals('blah', contents.strip()) -- cgit v1.2.3 From b0f6c7bfa94a5ba302debdc16a175cb0017f9634 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 9 Nov 2012 11:16:03 -0800 Subject: Fix the none return problem. --- cloudinit/ec2_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 06b302f2..eb5b3884 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -49,6 +49,7 @@ def _unlazy_dict(mp): return mp for (k, v) in mp.items(): _unlazy_dict(v) + return mp def get_instance_userdata(api_version, metadata_address): -- cgit v1.2.3 From 8c006684034c13719171672836edfc65bf02ebe9 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 9 Nov 2012 14:40:41 -0800 Subject: Fix pep8 warnings. --- cloudinit/distros/__init__.py | 2 +- tools/run-pep8 | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index bd04ba79..3392a065 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -81,7 +81,7 @@ class Distro(object): def _get_arch_package_mirror_info(self, arch=None): mirror_info = self.get_option("package_mirrors", []) - if arch == None: + if not arch: arch = self.get_primary_arch() return _get_arch_package_mirror_info(mirror_info, arch) diff --git a/tools/run-pep8 b/tools/run-pep8 index 1dfa92c3..20e594bc 100755 --- a/tools/run-pep8 +++ b/tools/run-pep8 @@ -31,6 +31,7 @@ IGNORE="$IGNORE,E125" # Continuation line does not distinguish itself from next IGNORE="$IGNORE,E126" # Continuation line over-indented for hanging indent IGNORE="$IGNORE,E127" # Continuation line over-indented for visual indent IGNORE="$IGNORE,E128" # Continuation line under-indented for visual indent +IGNORE="$IGNORE,E502" # The backslash is redundant between brackets cmd=( ${base}/hacking.py -- cgit v1.2.3 From 1169dcc5f18fd9a5adbf353bec87e48d563550a5 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 9 Nov 2012 15:28:35 -0800 Subject: Fix the merging of group configuration when that group configuration is a dict => members. LP: #1077245 --- cloudinit/distros/__init__.py | 32 +++++++++++++++++++--- .../test_distros/test_user_data_normalize.py | 22 +++++++++++++++ 2 files changed, 50 insertions(+), 4 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 3392a065..2d01efc3 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -425,12 +425,36 @@ def _get_arch_package_mirror_info(package_mirrors, arch): # is the standard form used in the rest # of cloud-init def _normalize_groups(grp_cfg): - if isinstance(grp_cfg, (str, basestring, list)): + if isinstance(grp_cfg, (str, basestring)): + grp_cfg = grp_cfg.strip().split(",") + if isinstance(grp_cfg, (list)): c_grp_cfg = {} - for i in util.uniq_merge(grp_cfg): - c_grp_cfg[i] = [] + for i in grp_cfg: + if isinstance(i, (dict)): + for k, v in i.items(): + if k not in c_grp_cfg: + if isinstance(v, (list)): + c_grp_cfg[k] = list(v) + elif isinstance(v, (basestring, str)): + c_grp_cfg[k] = [v] + else: + raise TypeError("Bad group member type %s" % + util.obj_name(v)) + else: + if isinstance(v, (list)): + c_grp_cfg[k].extend(v) + elif isinstance(v, (basestring, str)): + c_grp_cfg[k].append(v) + else: + raise TypeError("Bad group member type %s" % + util.obj_name(v)) + elif isinstance(i, (str, basestring)): + if i not in c_grp_cfg: + c_grp_cfg[i] = [] + else: + raise TypeError("Unknown group name type %s" % + util.obj_name(i)) grp_cfg = c_grp_cfg - groups = {} if isinstance(grp_cfg, (dict)): for (grp_name, grp_members) in grp_cfg.items(): diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index 8f0d8896..50400c8a 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -30,6 +30,28 @@ class TestUGNormalize(MockerTestCase): def _norm(self, cfg, distro): return distros.normalize_users_groups(cfg, distro) + def test_group_dict(self): + distro = self._make_distro('ubuntu') + g = {'groups': [ + { + 'ubuntu': ['foo', 'bar'], + 'bob': 'users', + }, + 'cloud-users', + { + 'bob': 'users2', + }, + ] + } + (users, groups) = self._norm(g, distro) + self.assertIn('ubuntu', groups) + ub_members = groups['ubuntu'] + self.assertEquals(sorted(['foo', 'bar']), sorted(ub_members)) + self.assertIn('bob', groups) + b_members = groups['bob'] + self.assertEquals(sorted(['users', 'users2']), + sorted(b_members)) + def test_basic_groups(self): distro = self._make_distro('ubuntu') ug_cfg = { -- cgit v1.2.3 From a17a69c35c1de0a6bd6f054f76d3da9e4a9c5364 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 10 Nov 2012 10:15:16 -0800 Subject: Sudoers.d creation cleanups + tests. --- cloudinit/distros/__init__.py | 20 +++++++++++------ tests/unittests/helpers.py | 1 + tests/unittests/test_distros/test_generic.py | 32 +++++++++++++++++++++++++--- tests/unittests/test_runs/test_merge_run.py | 2 +- 4 files changed, 45 insertions(+), 10 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 2d01efc3..d2cb0a8b 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -283,8 +283,10 @@ class Distro(object): # Ensure the dir is included and that # it actually exists as a directory sudoers_contents = '' + base_exists = False if os.path.exists(sudo_base): sudoers_contents = util.load_file(sudo_base) + base_exists = True found_include = False for line in sudoers_contents.splitlines(): line = line.strip() @@ -299,18 +301,24 @@ class Distro(object): found_include = True break if not found_include: - sudoers_contents += "\n#includedir %s\n" % (path) try: - if not os.path.exists(sudo_base): + if not base_exists: + lines = [('# See sudoers(5) for more information' + ' on "#include" directives:'), '', + '# Added by cloud-init', + "#includedir %s" % (path), ''] + sudoers_contents = "\n".join(lines) util.write_file(sudo_base, sudoers_contents, 0440) else: - with open(sudo_base, 'a') as f: - f.write(sudoers_contents) - LOG.debug("added '#includedir %s' to %s" % (path, sudo_base)) + lines = ['', '# Added by cloud-init', + "#includedir %s" % (path), ''] + sudoers_contents = "\n".join(lines) + util.append_file(sudo_base, sudoers_contents) + LOG.debug("Added '#includedir %s' to %s" % (path, sudo_base)) except IOError as e: util.logexc(LOG, "Failed to write %s" % sudo_base, e) raise e - util.ensure_dir(path, 0755) + util.ensure_dir(path, 0750) def write_sudo_rules(self, user, diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 2c5dcad2..e8080668 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -103,6 +103,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): def patchUtils(self, new_root): patch_funcs = { util: [('write_file', 1), + ('append_file', 1), ('load_file', 1), ('ensure_dir', 1), ('chmod', 1), diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py index 2df4c2f0..704699b5 100644 --- a/tests/unittests/test_distros/test_generic.py +++ b/tests/unittests/test_distros/test_generic.py @@ -1,6 +1,9 @@ -from mocker import MockerTestCase - from cloudinit import distros +from cloudinit import util + +from tests.unittests import helpers + +import os unknown_arch_info = { 'arches': ['default'], @@ -27,7 +30,7 @@ gpmi = distros._get_package_mirror_info # pylint: disable=W0212 gapmi = distros._get_arch_package_mirror_info # pylint: disable=W0212 -class TestGenericDistro(MockerTestCase): +class TestGenericDistro(helpers.FilesystemMockingTestCase): def return_first(self, mlist): if not mlist: @@ -52,6 +55,29 @@ class TestGenericDistro(MockerTestCase): # Make a temp directoy for tests to use. self.tmp = self.makeDir() + def test_sudoers_ensure_new(self): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + d.ensure_sudo_dir("/b") + contents = util.load_file("/etc/sudoers") + self.assertIn("includedir /b", contents) + self.assertTrue(os.path.isdir("/b")) + + def test_sudoers_ensure_append(self): + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + util.write_file("/etc/sudoers", "josh, josh\n") + d.ensure_sudo_dir("/b") + contents = util.load_file("/etc/sudoers") + self.assertIn("includedir /b", contents) + self.assertTrue(os.path.isdir("/b")) + self.assertIn("josh", contents) + self.assertEquals(2, contents.count("josh")) + def test_arch_package_mirror_info_unknown(self): """for an unknown arch, we should get back that with arch 'default'.""" arch_mirrors = gapmi(package_mirrors, arch="unknown") diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py index 04c03730..36de97ae 100644 --- a/tests/unittests/test_runs/test_merge_run.py +++ b/tests/unittests/test_runs/test_merge_run.py @@ -7,7 +7,7 @@ from cloudinit import stages from cloudinit import util -class TestSimpleRun(helpers.FilesystemMockingTestCase): +class TestMergeRun(helpers.FilesystemMockingTestCase): def _patchIn(self, root): self.restore() self.patchOS(root) -- cgit v1.2.3 From efa2dfa699bc9222105850641a2820ceda9bfe67 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 10 Nov 2012 21:57:06 -0500 Subject: update ChangeLog LP: #1076811 --- ChangeLog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ChangeLog b/ChangeLog index 8f304db3..52f47e3c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -16,6 +16,12 @@ - Remove usage of paths.join, as all code should run through util helpers - Fix pylint complaining about tests folder 'helpers.py' not being found - Add a pylintrc file that is used instead options hidden in 'run_pylint' + - fix bug where cloud-config from user-data could not affect system_info + settings [revno 703] (LP: #1076811) + - for write fqdn to system config for rh/fedora [revno 704] + - add yaml/cloud config examples checking tool [revno 706] + - Fix the merging of group configuration when that group configuration is a + dict => members. [revno 707] 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. -- cgit v1.2.3 From 9cb3130b0b87eabe458df7dc113f00431ff3648e Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 10 Nov 2012 22:24:01 -0500 Subject: revert unrelated whitespace changes that creeped in. --- cloudinit/distros/__init__.py | 38 +++++++++++++++------------- cloudinit/sources/DataSourceAltCloud.py | 2 +- cloudinit/util.py | 3 ++- tests/unittests/test_runs/test_simple_run.py | 6 ++--- tools/hacking.py | 4 +-- 5 files changed, 27 insertions(+), 26 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 8a98e334..d2cb0a8b 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -187,23 +187,23 @@ class Distro(object): # inputs. If something goes wrong, we can end up with a system # that nobody can login to. adduser_opts = { - "gecos": '--comment', - "homedir": '--home', - "primary_group": '--gid', - "groups": '--groups', - "passwd": '--password', - "shell": '--shell', - "expiredate": '--expiredate', - "inactive": '--inactive', - "selinux_user": '--selinux-user', - } + "gecos": '--comment', + "homedir": '--home', + "primary_group": '--gid', + "groups": '--groups', + "passwd": '--password', + "shell": '--shell', + "expiredate": '--expiredate', + "inactive": '--inactive', + "selinux_user": '--selinux-user', + } adduser_opts_flags = { - "no_user_group": '--no-user-group', - "system": '--system', - "no_log_init": '--no-log-init', - "no_create_home": "-M", - } + "no_user_group": '--no-user-group', + "system": '--system', + "no_log_init": '--no-log-init', + "no_create_home": "-M", + } # Now check the value and create the command for option in kwargs: @@ -320,9 +320,11 @@ class Distro(object): raise e util.ensure_dir(path, 0750) - def write_sudo_rules(self, user, rules, sudo_file=None): - if not sudo_file: - sudo_file = "/etc/sudoers.d/90-cloud-init-users" + def write_sudo_rules(self, + user, + rules, + sudo_file="/etc/sudoers.d/90-cloud-init-users", + ): content_header = "# user rules for %s" % user content = "%s\n%s %s\n\n" % (content_header, user, rules) diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 9812bdcb..d7e1204f 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -47,7 +47,7 @@ META_DATA_NOT_SUPPORTED = { 'instance-id': 455, 'local-hostname': 'localhost', 'placement': {}, -} + } def read_user_data_callback(mount_dir): diff --git a/cloudinit/util.py b/cloudinit/util.py index 4f5b15ee..7890a3d6 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1193,7 +1193,8 @@ def yaml_dumps(obj): indent=4, explicit_start=True, explicit_end=True, - default_flow_style=False) + default_flow_style=False, + ) return formatted diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index 60ef812a..22d6cf2c 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -37,13 +37,11 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): self.replicateTestRoot('simple_ubuntu', new_root) cfg = { 'datasource_list': ['None'], - 'write_files': [ - { + 'write_files': [{ 'path': '/etc/blah.ini', 'content': 'blah', 'permissions': 0755, - }, - ], + }], 'cloud_init_modules': ['write-files'], } cloud_cfg = util.yaml_dumps(cfg) diff --git a/tools/hacking.py b/tools/hacking.py index 26a07c53..11163df3 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -66,8 +66,8 @@ def cloud_import_alphabetical(physical_line, line_number, lines): # handle import x # use .lower since capitalization shouldn't dictate order split_line = import_normalize(physical_line.strip()).lower().split() - split_previous = import_normalize(lines[line_number - 2]) - split_previous = split_previous.strip().lower().split() + split_previous = import_normalize(lines[line_number - 2] + ).strip().lower().split() # with or without "as y" length = [2, 4] if (len(split_line) in length and len(split_previous) in length and -- cgit v1.2.3 From d7a42777c83668111d9bb496f581605b33a0b4f2 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 10 Nov 2012 22:27:49 -0500 Subject: 1 pep8 and 1 pylint fix --- cloudinit/helpers.py | 2 +- tests/unittests/test_runs/test_merge_run.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index d26625a0..794f00ec 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -76,7 +76,7 @@ def canon_sem_name(name): class FileSemaphores(object): - def __init__(self, sem_path): + def __init__(self, sem_path): self.sem_path = sem_path @contextlib.contextmanager diff --git a/tests/unittests/test_runs/test_merge_run.py b/tests/unittests/test_runs/test_merge_run.py index 36de97ae..d9c3a455 100644 --- a/tests/unittests/test_runs/test_merge_run.py +++ b/tests/unittests/test_runs/test_merge_run.py @@ -33,7 +33,7 @@ class TestMergeRun(helpers.FilesystemMockingTestCase): initer.initialize() initer.fetch() initer.datasource.userdata_raw = ud - iid = initer.instancify() + _iid = initer.instancify() initer.update() initer.cloudify().run('consume_userdata', initer.consume_userdata, -- cgit v1.2.3 From 3248ac9bbb2008e88a3bd9c030ba0fcbc14b7fce Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 10 Nov 2012 22:32:49 -0500 Subject: whitespace / indentation cleanups These changes were pulled out of the previous merge (cc_yum_add_repo) as they were unrelated there. Re-applying them here. --- cloudinit/distros/__init__.py | 38 +++++++++++++--------------- cloudinit/sources/DataSourceAltCloud.py | 2 +- cloudinit/util.py | 3 +-- tests/unittests/test_runs/test_simple_run.py | 6 +++-- tools/hacking.py | 4 +-- 5 files changed, 26 insertions(+), 27 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index d2cb0a8b..8a98e334 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -187,23 +187,23 @@ class Distro(object): # inputs. If something goes wrong, we can end up with a system # that nobody can login to. adduser_opts = { - "gecos": '--comment', - "homedir": '--home', - "primary_group": '--gid', - "groups": '--groups', - "passwd": '--password', - "shell": '--shell', - "expiredate": '--expiredate', - "inactive": '--inactive', - "selinux_user": '--selinux-user', - } + "gecos": '--comment', + "homedir": '--home', + "primary_group": '--gid', + "groups": '--groups', + "passwd": '--password', + "shell": '--shell', + "expiredate": '--expiredate', + "inactive": '--inactive', + "selinux_user": '--selinux-user', + } adduser_opts_flags = { - "no_user_group": '--no-user-group', - "system": '--system', - "no_log_init": '--no-log-init', - "no_create_home": "-M", - } + "no_user_group": '--no-user-group', + "system": '--system', + "no_log_init": '--no-log-init', + "no_create_home": "-M", + } # Now check the value and create the command for option in kwargs: @@ -320,11 +320,9 @@ class Distro(object): raise e util.ensure_dir(path, 0750) - def write_sudo_rules(self, - user, - rules, - sudo_file="/etc/sudoers.d/90-cloud-init-users", - ): + def write_sudo_rules(self, user, rules, sudo_file=None): + if not sudo_file: + sudo_file = "/etc/sudoers.d/90-cloud-init-users" content_header = "# user rules for %s" % user content = "%s\n%s %s\n\n" % (content_header, user, rules) diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index d7e1204f..9812bdcb 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -47,7 +47,7 @@ META_DATA_NOT_SUPPORTED = { 'instance-id': 455, 'local-hostname': 'localhost', 'placement': {}, - } +} def read_user_data_callback(mount_dir): diff --git a/cloudinit/util.py b/cloudinit/util.py index 7890a3d6..4f5b15ee 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1193,8 +1193,7 @@ def yaml_dumps(obj): indent=4, explicit_start=True, explicit_end=True, - default_flow_style=False, - ) + default_flow_style=False) return formatted diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py index 22d6cf2c..60ef812a 100644 --- a/tests/unittests/test_runs/test_simple_run.py +++ b/tests/unittests/test_runs/test_simple_run.py @@ -37,11 +37,13 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase): self.replicateTestRoot('simple_ubuntu', new_root) cfg = { 'datasource_list': ['None'], - 'write_files': [{ + 'write_files': [ + { 'path': '/etc/blah.ini', 'content': 'blah', 'permissions': 0755, - }], + }, + ], 'cloud_init_modules': ['write-files'], } cloud_cfg = util.yaml_dumps(cfg) diff --git a/tools/hacking.py b/tools/hacking.py index 11163df3..26a07c53 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -66,8 +66,8 @@ def cloud_import_alphabetical(physical_line, line_number, lines): # handle import x # use .lower since capitalization shouldn't dictate order split_line = import_normalize(physical_line.strip()).lower().split() - split_previous = import_normalize(lines[line_number - 2] - ).strip().lower().split() + split_previous = import_normalize(lines[line_number - 2]) + split_previous = split_previous.strip().lower().split() # with or without "as y" length = [2, 4] if (len(split_line) in length and len(split_previous) in length and -- cgit v1.2.3 From 71ba36704132ff8597dfc0e45b34e0c4424e239f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sun, 11 Nov 2012 21:49:10 -0500 Subject: config-drive-v2: populate metadata['public-keys'] from 'public_keys' other datasources populate 'public-keys' rather than 'public_keys' and there is a more complete handler in the base DataSource. So, to take advantage of that, have DataSourceConfigDrive copy public_keys to public-keys, and remove the 'get_public_ssh_keys' from the DataSourcEConfigDrive. LP: #1077700 --- cloudinit/sources/DataSourceConfigDrive.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 9729cfb9..dbbedce1 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -218,11 +218,6 @@ class DataSourceConfigDrive(sources.DataSource): return True - def get_public_ssh_keys(self): - if not 'public-keys' in self.metadata: - return [] - return self.metadata['public-keys'] - class DataSourceConfigDriveNet(DataSourceConfigDrive): def __init__(self, sys_cfg, distro, paths): @@ -331,6 +326,13 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"): except KeyError: raise BrokenConfigDriveDir("No uuid entry in metadata") + # other datasources (and config-drive-v1) populate metadata['public-keys'] + # where as with config-drive-v2, that would be 'public_keys'. So, just + # copy the field if it is present + if ('public_keys' in results['metadata'] and not + 'public-keys' in results['metadata']): + results['public-keys'] = results['public_keys'] + def read_content_path(item): # do not use os.path.join here, as content_path starts with / cpath = os.path.sep.join((source_dir, "openstack", -- cgit v1.2.3 From 54828f025dede5d5bc1d26419083e6014f69212e Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Nov 2012 09:55:39 -0500 Subject: add comment to get_instance_userdata reguarding empty/un-provided userdata --- cloudinit/ec2_utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index eb5b3884..a278ef04 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -53,6 +53,10 @@ def _unlazy_dict(mp): def get_instance_userdata(api_version, metadata_address): + # Note: boto.utils.get_instance_metadata returns '' for empty string + # so the change from non-true to '' is not specifically necessary, but + # this way cloud-init will get consistent behavior even if boto changed + # in the future to return a None on "no user-data provided". ud = boto_utils.get_instance_userdata(api_version, None, metadata_address) if not ud: ud = '' -- cgit v1.2.3 From 7fd838c187ad004d124c9293d91fdb4fca083f66 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Nov 2012 09:55:48 -0500 Subject: add ChangeLog entry --- ChangeLog | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ChangeLog b/ChangeLog index 5408a941..a208ab0d 100644 --- a/ChangeLog +++ b/ChangeLog @@ -23,6 +23,8 @@ - Fix the merging of group configuration when that group configuration is a dict => members. [revno 707] - add yum_add_repo configuration module for adding additional yum repos + - work around the lazy loading of get_instance_metadata in boto >= 2.6.0 + by fully walking the dictionary. (LP: #1068801) 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. -- cgit v1.2.3 From 2fabf3951d79ba67455a00895b5357fccf28f4f3 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Nov 2012 11:57:14 -0500 Subject: REVERT revno 714: config-drive-v2: populate metadata['public-keys'] from 'public_keys' --- cloudinit/sources/DataSourceConfigDrive.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index dbbedce1..9729cfb9 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -218,6 +218,11 @@ class DataSourceConfigDrive(sources.DataSource): return True + def get_public_ssh_keys(self): + if not 'public-keys' in self.metadata: + return [] + return self.metadata['public-keys'] + class DataSourceConfigDriveNet(DataSourceConfigDrive): def __init__(self, sys_cfg, distro, paths): @@ -326,13 +331,6 @@ def read_config_drive_dir_v2(source_dir, version="2012-08-10"): except KeyError: raise BrokenConfigDriveDir("No uuid entry in metadata") - # other datasources (and config-drive-v1) populate metadata['public-keys'] - # where as with config-drive-v2, that would be 'public_keys'. So, just - # copy the field if it is present - if ('public_keys' in results['metadata'] and not - 'public-keys' in results['metadata']): - results['public-keys'] = results['public_keys'] - def read_content_path(item): # do not use os.path.join here, as content_path starts with / cpath = os.path.sep.join((source_dir, "openstack", -- cgit v1.2.3 From 8730e143ec07372107d794abe9f4857ead6d4718 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Nov 2012 12:23:44 -0500 Subject: pep8 and pylint fixups --- cloudinit/ec2_utils.py | 9 +++------ cloudinit/sources/DataSourceCloudStack.py | 2 +- cloudinit/sources/DataSourceEc2.py | 2 +- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index a278ef04..32bf3968 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -17,10 +17,7 @@ # along with this program. If not, see . import pkg_resources -from pkg_resources import parse_version - -import cloudinit.util as util -import cloudinit.url_helper as uh +from pkg_resources import parse_version as pver import boto.utils as boto_utils @@ -36,7 +33,7 @@ import boto.utils as boto_utils BOTO_LAZY = False try: _boto_lib = pkg_resources.get_distribution('boto') - if _boto_lib.parsed_version > parse_version("2.5.2"): + if _boto_lib.parsed_version > pver("2.5.2"): # pylint: disable=E1103 BOTO_LAZY = True except pkg_resources.DistributionNotFound: pass @@ -47,7 +44,7 @@ def _unlazy_dict(mp): return mp if not BOTO_LAZY: return mp - for (k, v) in mp.items(): + for (_k, v) in mp.items(): _unlazy_dict(v) return mp diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 78cf24d7..076dba5a 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -116,7 +116,7 @@ class DataSourceCloudStack(sources.DataSource): return False start_time = time.time() self.userdata_raw = ec2.get_instance_userdata(self.api_ver, - self.metadata_address) + self.metadata_address) self.metadata = ec2.get_instance_metadata(self.api_ver, self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 6f51dfae..2db53446 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -65,7 +65,7 @@ class DataSourceEc2(sources.DataSource): return False start_time = time.time() self.userdata_raw = ec2.get_instance_userdata(self.api_ver, - self.metadata_address) + self.metadata_address) self.metadata = ec2.get_instance_metadata(self.api_ver, self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", -- cgit v1.2.3 From 7ba753720cd95bfca61c82445cf9c7882fe5d6f1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Nov 2012 12:26:49 -0500 Subject: config-drive-v2: support public keys This does a couple things: * separates out the 'normalize_public_keys' from the DataSource's get_public_ssh_keys * uses that from config-drive datasource * supports config drive v1 or v2 public-keys * adds a test. LP: #1077700 --- ChangeLog | 1 + cloudinit/sources/DataSourceConfigDrive.py | 7 +-- cloudinit/sources/__init__.py | 56 ++++++++++++---------- .../unittests/test_datasource/test_configdrive.py | 26 ++++++++++ 4 files changed, 61 insertions(+), 29 deletions(-) diff --git a/ChangeLog b/ChangeLog index de1bcbff..a68e196e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -26,6 +26,7 @@ - work around the lazy loading of get_instance_metadata in boto >= 2.6.0 by fully walking the dictionary. (LP: #1068801) Added dependency on distribute's python-pkg-resources + - fix public key importing with config-drive-v2 datasource (LP: #1077700) 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 9729cfb9..c7826851 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -219,9 +219,10 @@ class DataSourceConfigDrive(sources.DataSource): return True def get_public_ssh_keys(self): - if not 'public-keys' in self.metadata: - return [] - return self.metadata['public-keys'] + name = "public_keys" + if self.version == 1: + name = "public-keys" + return sources.normalize_pubkey_data(self.metadata.get(name)) class DataSourceConfigDriveNet(DataSourceConfigDrive): diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 745627d0..96baff90 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -100,32 +100,7 @@ class DataSource(object): return {} def get_public_ssh_keys(self): - keys = [] - - if not self.metadata or 'public-keys' not in self.metadata: - return keys - - if isinstance(self.metadata['public-keys'], (basestring, str)): - return str(self.metadata['public-keys']).splitlines() - - if isinstance(self.metadata['public-keys'], (list, set)): - return list(self.metadata['public-keys']) - - if isinstance(self.metadata['public-keys'], (dict)): - for (_keyname, klist) in self.metadata['public-keys'].iteritems(): - # lp:506332 uec metadata service responds with - # data that makes boto populate a string for 'klist' rather - # than a list. - if isinstance(klist, (str, basestring)): - klist = [klist] - if isinstance(klist, (list, set)): - for pkey in klist: - # There is an empty string at - # the end of the keylist, trim it - if pkey: - keys.append(pkey) - - return keys + return normalize_pubkey_data(self.metadata.get('public-keys')) def _remap_device(self, short_name): # LP: #611137 @@ -208,6 +183,35 @@ class DataSource(object): availability_zone=self.availability_zone) +def normalize_pubkey_data(pubkey_data): + keys = [] + + if not pubkey_data: + return keys + + if isinstance(pubkey_data, (basestring, str)): + return str(pubkey_data).splitlines() + + if isinstance(pubkey_data, (list, set)): + return list(pubkey_data) + + if isinstance(pubkey_data, (dict)): + for (_keyname, klist) in pubkey_data.iteritems(): + # lp:506332 uec metadata service responds with + # data that makes boto populate a string for 'klist' rather + # than a list. + if isinstance(klist, (str, basestring)): + klist = [klist] + if isinstance(klist, (list, set)): + for pkey in klist: + # There is an empty string at + # the end of the keylist, trim it + if pkey: + keys.append(pkey) + + return keys + + def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list): ds_list = list_sources(cfg_list, ds_deps, pkg_list) ds_names = [util.obj_name(f) for f in ds_list] diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 00379e03..aa5b98ed 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -288,6 +288,32 @@ class TestConfigDriveDataSource(MockerTestCase): finally: util.find_devs_with = orig_find_devs_with + def test_pubkeys_v2(self): + """Verify that public-keys work in config-drive-v2.""" + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + myds = cfg_ds_from_dir(self.tmp) + self.assertEqual(myds.get_public_ssh_keys(), + [OSTACK_META['public_keys']['mykey']]) + + +def cfg_ds_from_dir(seed_d): + found = ds.read_config_drive_dir(seed_d) + cfg_ds = ds.DataSourceConfigDrive(settings.CFG_BUILTIN, None, + helpers.Paths({})) + populate_ds_from_read_config(cfg_ds, seed_d, found) + return cfg_ds + + +def populate_ds_from_read_config(cfg_ds, source, results): + """Patch the DataSourceConfigDrive from the results of + read_config_drive_dir hopefully in line with what it would have + if cfg_ds.get_data had been successfully called""" + cfg_ds.source = source + cfg_ds.metadata = results.get('metadata') + cfg_ds.ec2_metadata = results.get('ec2-metadata') + cfg_ds.userdata_raw = results.get('userdata') + cfg_ds.version = results.get('cfgdrive_ver') + def populate_dir(seed_dir, files): for (name, content) in files.iteritems(): -- cgit v1.2.3 From 5a084e641dea06b440f4a539147254bc775640a9 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Nov 2012 13:07:12 -0500 Subject: mark changelog entry for lp:1075980 --- ChangeLog | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ChangeLog b/ChangeLog index a68e196e..6d80671a 100644 --- a/ChangeLog +++ b/ChangeLog @@ -27,6 +27,8 @@ by fully walking the dictionary. (LP: #1068801) Added dependency on distribute's python-pkg-resources - fix public key importing with config-drive-v2 datasource (LP: #1077700) + - handle renaming and fixing up of marker names (LP: 1075980) [revno 710] + this relieves that burden from the distro/packaging. 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. -- cgit v1.2.3 From fe68049611e12591a632d5264d9ba1b67b0cfa0d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Nov 2012 13:40:57 -0500 Subject: stages.py: fix issue that resulted in broken data source searching This just replaces the portions of stages.py that were checking or setting self.datasource to a DataSourceNone to use a static/global datasource. And then, check for None is done against that. --- cloudinit/stages.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index e0cf1cbe..9c231994 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -49,6 +49,7 @@ from cloudinit import util LOG = logging.getLogger(__name__) +NULL_DATA_SOURCE = DataSourceNone.DataSourceNone({}, None, None) class Init(object): def __init__(self, ds_deps=None): @@ -61,7 +62,7 @@ class Init(object): self._paths = None self._distro = None # Changed only when a fetch occurs - self.datasource = DataSourceNone.DataSourceNone({}, None, None) + self.datasource = NULL_DATA_SOURCE def _reset(self, ds=False): # Recreated on access @@ -69,7 +70,7 @@ class Init(object): self._paths = None self._distro = None if ds: - self.datasource = DataSourceNone.DataSourceNone({}, None, None) + self.datasource = NULL_DATA_SOURCE @property def distro(self): @@ -201,7 +202,7 @@ class Init(object): return None def _write_to_cache(self): - if not self.datasource: + if self.datasource is NULL_DATA_SOURCE: return False pickled_fn = self.paths.get_ipath_cur("obj_pkl") try: @@ -227,7 +228,7 @@ class Init(object): return (cfg_list, pkg_list) def _get_data_source(self): - if self.datasource: + if self.datasource is not NULL_DATA_SOURCE: return self.datasource ds = self._restore_from_cache() if ds: -- cgit v1.2.3 From 4944bc286970da1ca59b7d57509e51b6817c60c2 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Nov 2012 14:29:14 -0500 Subject: test_handler_yum_add_repo: fix broken test --- tests/unittests/test_handler/test_handler_yum_add_repo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py index c7fcddc4..8df592f9 100644 --- a/tests/unittests/test_handler/test_handler_yum_add_repo.py +++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py @@ -35,8 +35,8 @@ class TestConfig(helpers.FilesystemMockingTestCase): } self.patchUtils(self.tmp) cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) - with self.assertRaises(IOError): - util.load_file("/etc/yum.repos.d/epel_testing.repo") + self.assertRaises(IOError, util.load_file, + "/etc/yum.repos.d/epel_testing.repo") def test_write_config(self): cfg = { -- cgit v1.2.3 From 90af12849c373d3b729c4d7d84dbb69c2ecd885a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Nov 2012 15:26:19 -0500 Subject: change the "null datasource" for stages.py to None Previous commit set up an object for checking "None" in the stages class. Here, change that to be None. The benefit of the DataSourceNone was so that the stages was generally functional even with that "None" object. The negative affect of that is that you could go down the error path much longer when using it. If we use None type, then self.datasource. will fail immediately and loudly. --- cloudinit/stages.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 9c231994..e83d72da 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -49,7 +49,7 @@ from cloudinit import util LOG = logging.getLogger(__name__) -NULL_DATA_SOURCE = DataSourceNone.DataSourceNone({}, None, None) +NULL_DATA_SOURCE = None class Init(object): def __init__(self, ds_deps=None): -- cgit v1.2.3 From 9267bfbed5129060ea3365d5f91b57fee2639647 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 12 Nov 2012 13:53:46 -0800 Subject: Update changelog for previous commits. Add in entry for LP: 1068801 and LP: 1075756 as well as LP: 1077245 and add in reason why we are switching back to using 'None' as the initial datasource value. --- ChangeLog | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/ChangeLog b/ChangeLog index 6d80671a..4cae8b32 100644 --- a/ChangeLog +++ b/ChangeLog @@ -29,6 +29,23 @@ - fix public key importing with config-drive-v2 datasource (LP: #1077700) - handle renaming and fixing up of marker names (LP: 1075980) [revno 710] this relieves that burden from the distro/packaging. + - datasource: move back to using the value 'None' for the initial datasource + since it causes early failure if used, instead of transient and + not easily-detectable later failure + - group config: fix how group members weren't being translated correctly + when the group: [member, member...] format was used (LP: #1077245) + - sysconfig: fix how the /etc/sysconfig/network should be using the fully + qualified domain name instead of the partially qualified domain name + which is used in the ubuntu/debian case (LP: #1076759) + - fix how string escaping was not working when the string was a unicode + string which was causing the warning message not to be written + out (LP: #1075756) + - for boto > 0.6.0 there was a lazy load of the metadata added, when cloud-init + runs the usage of this lazy loading is hidden and since that lazy loading + will be performed on future attribute access we must traverse the lazy loaded + dictionary and force it to full expand so that if cloud-init blocks the ec2 + metadata port the lazy loaded dictionary will continue working properly + instead of trying to make additional url calls which will fail (LP: #1068801) 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. -- cgit v1.2.3 From 82e90789f11b5371b352a477b75cad0c5d1457ec Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 12 Nov 2012 14:30:08 -0800 Subject: Cleanup of /etc/hosts ordering and pep8/pylint adjustments. Fix how the comparison of a fqdn and its aliases was done via sorting instead of existence checking which is the better way to check if a alias already exists as well as cleanup the new files pep8/pylint issues. LP: #1078097 --- cloudinit/distros/__init__.py | 19 ++++++++++++------- cloudinit/distros/parsers/__init__.py | 1 + cloudinit/distros/parsers/hostname.py | 2 -- cloudinit/distros/parsers/resolv_conf.py | 4 +--- cloudinit/distros/parsers/sys_conf.py | 2 +- 5 files changed, 15 insertions(+), 13 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index fa7cc1ca..4bde2393 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -189,15 +189,20 @@ class Distro(object): else: need_change = True for entry in prev_info: - if sorted(entry) == sorted([fqdn, hostname]): - # Exists already, leave it be - need_change = False - break + entry_fqdn = None + entry_aliases = [] + if len(entry) >= 1: + entry_fqdn = entry[0] + if len(entry) >= 2: + entry_aliases = entry[1:] + if entry_fqdn is not None and entry_fqdn == fqdn: + if hostname in entry_aliases: + # Exists already, leave it be + need_change = False if need_change: - # Doesn't exist, change the first - # entry to be this entry + # Doesn't exist, add that entry in... new_entries = list(prev_info) - new_entries[0] = [fqdn, hostname] + new_entries.append([fqdn, hostname]) eh.del_entries(local_ip) for entry in new_entries: if len(entry) == 1: diff --git a/cloudinit/distros/parsers/__init__.py b/cloudinit/distros/parsers/__init__.py index 8334a5e6..1c413eaa 100644 --- a/cloudinit/distros/parsers/__init__.py +++ b/cloudinit/distros/parsers/__init__.py @@ -16,6 +16,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . + def chop_comment(text, comment_chars): comment_locations = [text.find(c) for c in comment_chars] comment_locations = [c for c in comment_locations if c != -1] diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py index 7e19f017..617b3c36 100644 --- a/cloudinit/distros/parsers/hostname.py +++ b/cloudinit/distros/parsers/hostname.py @@ -86,5 +86,3 @@ class HostnameConf(object): raise IOError("Multiple hostnames (%s) found!" % (hostnames_found)) return entries - - diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index 377ada6b..5733c25a 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -127,7 +127,7 @@ class ResolvConf(object): # Hard restriction on only 6 search domains raise ValueError(("Adding %r would go beyond the " "'6' maximum search domains") % (search_domain)) - s_list = " ".join(new_sds) + s_list = " ".join(new_sds) if len(s_list) > 256: # Some hard limit on 256 chars total raise ValueError(("Adding %r would go beyond the " @@ -167,5 +167,3 @@ class ResolvConf(object): raise IOError("Unexpected resolv.conf option %s" % (cfg_opt)) entries.append(("option", [cfg_opt, cfg_values, tail])) return entries - - diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py index 5cd765fc..20ca1871 100644 --- a/cloudinit/distros/parsers/sys_conf.py +++ b/cloudinit/distros/parsers/sys_conf.py @@ -40,7 +40,7 @@ SHELL_VAR_REGEXES = [ # Things like $?, $0, $-, $@ re.compile(r"\$[0-9#\?\-@\*]"), # Things like ${blah:1} - but this one - # gets very complex so just try the + # gets very complex so just try the # simple path re.compile(r"\$\{.+\}"), ] -- cgit v1.2.3 From 29b4adef881852169c6a0c5c95168c768cb6ffc5 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 12 Nov 2012 14:34:01 -0800 Subject: Append changelog additions. LP: #1078097 --- ChangeLog | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ChangeLog b/ChangeLog index 672482ba..1582dfb5 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,15 @@ 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) + - use a set of helper/parsing classes to perform system configuration + file modification in a manner that provides a nice object oriented + interface to those objects as well as makes it possible to test + those parsing entities without having to invoke distro class code. + - Created parsers for: + - /etc/sysconfig + - /etc/hostname + - resolv.conf + - /etc/hosts 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. -- cgit v1.2.3 From 9c64c5d0e01e48612fe37d3304b1f6eb70181cae Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Nov 2012 22:11:34 -0500 Subject: pylint and pep8 fixes --- cloudinit/stages.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index e83d72da..94a267df 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -36,8 +36,6 @@ from cloudinit.handlers import cloud_config as cc_part from cloudinit.handlers import shell_script as ss_part from cloudinit.handlers import upstart_job as up_part -from cloudinit.sources import DataSourceNone - from cloudinit import cloud from cloudinit import config from cloudinit import distros @@ -51,6 +49,7 @@ LOG = logging.getLogger(__name__) NULL_DATA_SOURCE = None + class Init(object): def __init__(self, ds_deps=None): if ds_deps is not None: -- cgit v1.2.3 From 949e1759342b1e60c100855aaf250165bcb9997e Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Nov 2012 22:15:29 -0500 Subject: add 'finalcmd' module for running code after cloud-init-final This allows the user to easily run stuff even after cloud-init-final has finished. The initial reason for it is to be able to run /sbin/poweroff and not have cloud-init complain loudly that it is being killed. LP: #1064665 --- ChangeLog | 2 + cloudinit/config/cc_finalcmd.py | 139 ++++++++++++++++++++++++++++++++++++++++ config/cloud.cfg | 1 + doc/examples/cloud-config.txt | 18 ++++++ 4 files changed, 160 insertions(+) create mode 100644 cloudinit/config/cc_finalcmd.py diff --git a/ChangeLog b/ChangeLog index 4cae8b32..93c3af04 100644 --- a/ChangeLog +++ b/ChangeLog @@ -46,6 +46,8 @@ dictionary and force it to full expand so that if cloud-init blocks the ec2 metadata port the lazy loaded dictionary will continue working properly instead of trying to make additional url calls which will fail (LP: #1068801) + - add 'finalcmd' config module to execute 'finalcmd' entries like + 'runcmd' but detached from cloud-init (LP: #1064665) 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. diff --git a/cloudinit/config/cc_finalcmd.py b/cloudinit/config/cc_finalcmd.py new file mode 100644 index 00000000..442ad12b --- /dev/null +++ b/cloudinit/config/cc_finalcmd.py @@ -0,0 +1,139 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2011 Canonical Ltd. +# +# Author: Scott Moser +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from cloudinit.settings import PER_INSTANCE +from cloudinit import util + +import errno +import os +import subprocess +import sys +import time + +frequency = PER_INSTANCE + + +def handle(_name, cfg, _cloud, log, _args): + + finalcmds = cfg.get("finalcmd") + + if not finalcmds: + log.debug("No final commands") + return + + mypid = os.getpid() + cmdline = util.load_file("/proc/%s/cmdline") + + if not cmdline: + log.warn("Failed to get cmdline of current process") + return + + try: + timeout = float(cfg.get("finalcmd_timeout", 30.0)) + except ValueError: + log.warn("failed to convert finalcmd_timeout '%s' to float" % + cfg.get("finalcmd_timeout", 30.0)) + return + + devnull_fp = open("/dev/null", "w") + + shellcode = util.shellify(finalcmds) + + # note, after the fork, we do not use any of cloud-init's functions + # that would attempt to log. The primary reason for that is + # to allow the 'finalcmd' the ability to do just about anything + # and not depend on syslog services. + # Basically, it should "just work" to have finalcmd of: + # - sleep 30 + # - /sbin/poweroff + finalcmd_d = os.path.join(cloud.get_ipath_cur(), "finalcmds") + + util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, + runfinal, (shellcode, finalcmd_d, devnull_fp)) + + +def execmd(exe_args, data_in=None, output=None): + try: + proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, + stdout=output, stderr=subprocess.STDERR) + proc.communicate(data_in) + except Exception as e: + return 254 + return proc.returncode() + + +def runfinal(shellcode, finalcmd_d, output=None): + ret = execmd(("/bin/sh",), data_in=shellcode, output=output) + if not (finalcmd_d and os.path.isdir(finalcmd_d)): + sys.exit(ret) + + fails = 0 + if ret != 0: + fails = 1 + + # now runparts the final command dir + for exe_name in sorted(os.listdir(finalcmd_d)): + exe_path = os.path.join(finalcmd_d, exe_name) + if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK): + ret = execmd(exe_path, data_in=None, output=output) + if ret != 0: + fails += 1 + sys.exit(fails) + + +def run_after_pid_gone(pid, pidcmdline, timeout, func, args): + # wait until pid, with /proc/pid/cmdline contents of pidcmdline + # is no longer alive. After it is gone, or timeout has passed + # execute func(args) + msg = "ERROR: Uncaught error" + end_time = time.time() + timeout + + cmdline_f = "/proc/%s/cmdline" % pid + + while True: + if time.time() > end_time: + msg = "timeout reached before %s ended" % pid + break + + try: + cmdline = "" + with open(cmdline_f) as fp: + cmdline = fp.read() + if cmdline != pidcmdline: + msg = "cmdline changed for %s [now: %s]" % (pid, cmdline) + break + + except IOError as ioerr: + if ioerr.errno == errno.ENOENT: + msg = "pidfile '%s' gone" % cmdline_f + else: + msg = "ERROR: IOError: %s" % ioerr + raise + break + + except Exception as e: + msg = "ERROR: Exception: %s" % e + raise + + if msg.startswith("ERROR:"): + sys.stderr.write(msg) + sys.stderr.write("Not executing finalcmd") + sys.exit(1) + + sys.stderr.write("calling %s with %s\n" % (func, args)) + sys.exit(func(*args)) diff --git a/config/cloud.cfg b/config/cloud.cfg index ad100fff..249a593d 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -69,6 +69,7 @@ cloud_final_modules: - keys-to-console - phone-home - final-message + - finalcmd # System and/or distro specific settings # (not accessible to handlers/transforms) diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index 12bf2c91..4fc5f351 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -256,6 +256,24 @@ bootcmd: - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] +# final commands +# default: none +# This can be used to execute commands after and fully detached from +# a cloud-init stage. The initial purpose of it was to allow 'poweroff' +# detached from cloud-init. If poweroff was run from 'runcmd' or userdata +# then messages may be spewed from cloud-init about logging failing or other +# issues as a result of the system being turned off. +# +# You probably are better off using 'runcmd' for this. +# +# The output of finalcmd will redirected redirected to /dev/null +# If you want output to be seen, take care to do so in your commands +# themselves. See example. +finalcmd: + - sleep 30 + - "echo $(date -R): powering off > /dev/console" + - /sbin/poweroff + # cloud_config_modules: # default: # cloud_config_modules: -- cgit v1.2.3 From 8d5e1e108b0cdd3af872383da7654bec91355b5f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 12 Nov 2012 19:17:48 -0800 Subject: Remove dup and un-needed entries. --- ChangeLog | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/ChangeLog b/ChangeLog index 4cae8b32..e577b618 100644 --- a/ChangeLog +++ b/ChangeLog @@ -23,15 +23,9 @@ - Fix the merging of group configuration when that group configuration is a dict => members. [revno 707] - add yum_add_repo configuration module for adding additional yum repos - - work around the lazy loading of get_instance_metadata in boto >= 2.6.0 - by fully walking the dictionary. (LP: #1068801) - Added dependency on distribute's python-pkg-resources - fix public key importing with config-drive-v2 datasource (LP: #1077700) - handle renaming and fixing up of marker names (LP: 1075980) [revno 710] this relieves that burden from the distro/packaging. - - datasource: move back to using the value 'None' for the initial datasource - since it causes early failure if used, instead of transient and - not easily-detectable later failure - group config: fix how group members weren't being translated correctly when the group: [member, member...] format was used (LP: #1077245) - sysconfig: fix how the /etc/sysconfig/network should be using the fully @@ -46,6 +40,7 @@ dictionary and force it to full expand so that if cloud-init blocks the ec2 metadata port the lazy loaded dictionary will continue working properly instead of trying to make additional url calls which will fail (LP: #1068801) + - Added dependency on distribute's python-pkg-resources 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. -- cgit v1.2.3 From 546b9444158d00875100fdd523fccae76226f346 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Nov 2012 22:21:20 -0500 Subject: pass execmd a list, not tuple or string --- cloudinit/config/cc_finalcmd.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_finalcmd.py b/cloudinit/config/cc_finalcmd.py index 442ad12b..d921a444 100644 --- a/cloudinit/config/cc_finalcmd.py +++ b/cloudinit/config/cc_finalcmd.py @@ -78,7 +78,7 @@ def execmd(exe_args, data_in=None, output=None): def runfinal(shellcode, finalcmd_d, output=None): - ret = execmd(("/bin/sh",), data_in=shellcode, output=output) + ret = execmd(["/bin/sh",], data_in=shellcode, output=output) if not (finalcmd_d and os.path.isdir(finalcmd_d)): sys.exit(ret) @@ -90,7 +90,7 @@ def runfinal(shellcode, finalcmd_d, output=None): for exe_name in sorted(os.listdir(finalcmd_d)): exe_path = os.path.join(finalcmd_d, exe_name) if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK): - ret = execmd(exe_path, data_in=None, output=output) + ret = execmd([exe_path], data_in=None, output=output) if ret != 0: fails += 1 sys.exit(fails) -- cgit v1.2.3 From d5aeda9535ab530fd2c09e6ad37443c9013c3b4d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 12 Nov 2012 22:08:26 -0800 Subject: Fix variable. --- cloudinit/distros/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 464ae550..3fc4483b 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -146,8 +146,7 @@ class Distro(object): def _select_hostname(self, hostname, fqdn): raise NotImplementedError() - def update_hostname(self, hostname, fqdn, - previous_hostname_filename): + def update_hostname(self, hostname, fqdn, prev_hostname_fn): applying_hostname = hostname hostname = self._select_hostname(hostname, fqdn) prev_hostname = self._read_hostname(prev_hostname_fn) -- cgit v1.2.3 From c8c2ff830f917a7ddecab21eeecb99a20c2e9805 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 12 Nov 2012 22:14:31 -0800 Subject: Pylint and pep8 cleanups. --- cloudinit/distros/parsers/hosts.py | 1 - cloudinit/distros/rhel.py | 10 +++++----- tests/unittests/test_distros/test_sysconfig.py | 1 - 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py index 958a7c31..94c97051 100644 --- a/cloudinit/distros/parsers/hosts.py +++ b/cloudinit/distros/parsers/hosts.py @@ -90,4 +90,3 @@ class HostsConf(object): pieces = "\t".join(pieces) contents.write("%s%s\n" % (pieces, tail)) return contents.getvalue() - diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 7df01c62..bc0877d5 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -48,7 +48,7 @@ class Distro(distros.Distro): clock_conf_fn = "/etc/sysconfig/clock" locale_conf_fn = '/etc/sysconfig/i18n' network_conf_fn = "/etc/sysconfig/network" - hostname_conf_fn = "/etc/sysconfig/network" + hostname_conf_fn = "/etc/sysconfig/network" network_script_tpl = '/etc/sysconfig/network-scripts/ifcfg-%s' resolve_conf_fn = "/etc/resolv.conf" tz_local_fn = "/etc/localtime" @@ -65,11 +65,11 @@ class Distro(distros.Distro): self.package_command('install', pkglist) def _adjust_resolve(self, dns_servers, search_servers): - r_conf = ResolvConf(util.load_file(self.resolve_conf_fn)) try: + r_conf = ResolvConf(util.load_file(self.resolve_conf_fn)) r_conf.parse() except IOError: - util.logexc(LOG, + util.logexc(LOG, "Failed at parsing %s reverting to an empty instance", self.resolve_conf_fn) r_conf = ResolvConf('') @@ -178,10 +178,10 @@ class Distro(distros.Distro): def _read_conf(self, fn): exists = False - if os.path.isfile(fn): + try: contents = util.load_file(fn).splitlines() exists = True - else: + except IOError: contents = [] return (exists, SysConf(contents)) diff --git a/tests/unittests/test_distros/test_sysconfig.py b/tests/unittests/test_distros/test_sysconfig.py index 1e34909d..0c651407 100644 --- a/tests/unittests/test_distros/test_sysconfig.py +++ b/tests/unittests/test_distros/test_sysconfig.py @@ -80,4 +80,3 @@ USEMD5=no''' contents = str(conf) self.assertIn("Z=d", contents) self.assertIn("BLAH=b", contents) - -- cgit v1.2.3 From 76005c391a5e59e794db42a9f9a4d46fec679854 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 12 Nov 2012 22:18:21 -0800 Subject: Update the changelog with functions moved. --- ChangeLog | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ChangeLog b/ChangeLog index 13b5a8ed..20b81aa2 100644 --- a/ChangeLog +++ b/ChangeLog @@ -50,6 +50,10 @@ - /etc/hostname - resolv.conf - /etc/hosts + - Moved duplicated functionality into the root level distro class including: +  - apply_hostname +  - set_hostname +  - *various shared configuration file names/paths* 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. -- cgit v1.2.3 From 8e86b79e8a5ab299ca77ec5e69facb807ede322f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 12 Nov 2012 22:19:40 -0800 Subject: Remove these lines which are not needed. --- cloudinit/distros/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 3fc4483b..10e07e82 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -41,8 +41,6 @@ LOG = logging.getLogger(__name__) class Distro(object): __metaclass__ = abc.ABCMeta - default_user = None - default_user_groups = None hosts_fn = "/etc/hosts" ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users" hostname_conf_fn = "/etc/hostname" -- cgit v1.2.3 From cd8fbfffdf7ba9ba79d39ee7b4a223d1bd7863b4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 13 Nov 2012 08:48:19 -0500 Subject: replace 'with self.assertRaises(Exception):' with different form I'm guessing that with self.assertRaises(Exception): something here is an acceptable form in a newer or older python. But my python (2.7.3-0ubuntu7) doesn't like it. Interestingly, python unittest doc says: | Changed in version 2.7: Added the ability to use assertRaises() as | a context manager. --- tests/unittests/test_distros/test_resolv.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/test_distros/test_resolv.py index d947dda0..6b6ff6aa 100644 --- a/tests/unittests/test_distros/test_resolv.py +++ b/tests/unittests/test_distros/test_resolv.py @@ -38,8 +38,7 @@ class TestResolvHelper(MockerTestCase): self.assertNotIn('10.3', rp.nameservers) self.assertEquals(len(rp.nameservers), 3) rp.add_nameserver('10.2') - with self.assertRaises(ValueError): - rp.add_nameserver('10.3') + self.assertRaises(ValueError, rp.add_nameserver, '10.3') self.assertNotIn('10.3', rp.nameservers) def test_search_domains(self): @@ -58,6 +57,5 @@ class TestResolvHelper(MockerTestCase): self.assertEquals(len(rp.search_domains), 5) rp.add_search_domain('bbb4.y.com') self.assertEquals(len(rp.search_domains), 6) - with self.assertRaises(ValueError): - rp.add_search_domain('bbb5.y.com') + self.assertRaises(ValueError, rp.add_search_domain, 'bbb5.y.com') self.assertEquals(len(rp.search_domains), 6) -- cgit v1.2.3 From 71928e8c70900843dce4aa3ae84fcd278e4b887a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 13 Nov 2012 08:57:57 -0500 Subject: rename module 'finalcmd' to power-state-change --- cloudinit/config/cc_finalcmd.py | 139 ------------------------------ cloudinit/config/cc_power_state_change.py | 139 ++++++++++++++++++++++++++++++ config/cloud.cfg | 2 +- 3 files changed, 140 insertions(+), 140 deletions(-) delete mode 100644 cloudinit/config/cc_finalcmd.py create mode 100644 cloudinit/config/cc_power_state_change.py diff --git a/cloudinit/config/cc_finalcmd.py b/cloudinit/config/cc_finalcmd.py deleted file mode 100644 index d921a444..00000000 --- a/cloudinit/config/cc_finalcmd.py +++ /dev/null @@ -1,139 +0,0 @@ -# vi: ts=4 expandtab -# -# Copyright (C) 2011 Canonical Ltd. -# -# Author: Scott Moser -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 3, as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -from cloudinit.settings import PER_INSTANCE -from cloudinit import util - -import errno -import os -import subprocess -import sys -import time - -frequency = PER_INSTANCE - - -def handle(_name, cfg, _cloud, log, _args): - - finalcmds = cfg.get("finalcmd") - - if not finalcmds: - log.debug("No final commands") - return - - mypid = os.getpid() - cmdline = util.load_file("/proc/%s/cmdline") - - if not cmdline: - log.warn("Failed to get cmdline of current process") - return - - try: - timeout = float(cfg.get("finalcmd_timeout", 30.0)) - except ValueError: - log.warn("failed to convert finalcmd_timeout '%s' to float" % - cfg.get("finalcmd_timeout", 30.0)) - return - - devnull_fp = open("/dev/null", "w") - - shellcode = util.shellify(finalcmds) - - # note, after the fork, we do not use any of cloud-init's functions - # that would attempt to log. The primary reason for that is - # to allow the 'finalcmd' the ability to do just about anything - # and not depend on syslog services. - # Basically, it should "just work" to have finalcmd of: - # - sleep 30 - # - /sbin/poweroff - finalcmd_d = os.path.join(cloud.get_ipath_cur(), "finalcmds") - - util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, - runfinal, (shellcode, finalcmd_d, devnull_fp)) - - -def execmd(exe_args, data_in=None, output=None): - try: - proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, - stdout=output, stderr=subprocess.STDERR) - proc.communicate(data_in) - except Exception as e: - return 254 - return proc.returncode() - - -def runfinal(shellcode, finalcmd_d, output=None): - ret = execmd(["/bin/sh",], data_in=shellcode, output=output) - if not (finalcmd_d and os.path.isdir(finalcmd_d)): - sys.exit(ret) - - fails = 0 - if ret != 0: - fails = 1 - - # now runparts the final command dir - for exe_name in sorted(os.listdir(finalcmd_d)): - exe_path = os.path.join(finalcmd_d, exe_name) - if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK): - ret = execmd([exe_path], data_in=None, output=output) - if ret != 0: - fails += 1 - sys.exit(fails) - - -def run_after_pid_gone(pid, pidcmdline, timeout, func, args): - # wait until pid, with /proc/pid/cmdline contents of pidcmdline - # is no longer alive. After it is gone, or timeout has passed - # execute func(args) - msg = "ERROR: Uncaught error" - end_time = time.time() + timeout - - cmdline_f = "/proc/%s/cmdline" % pid - - while True: - if time.time() > end_time: - msg = "timeout reached before %s ended" % pid - break - - try: - cmdline = "" - with open(cmdline_f) as fp: - cmdline = fp.read() - if cmdline != pidcmdline: - msg = "cmdline changed for %s [now: %s]" % (pid, cmdline) - break - - except IOError as ioerr: - if ioerr.errno == errno.ENOENT: - msg = "pidfile '%s' gone" % cmdline_f - else: - msg = "ERROR: IOError: %s" % ioerr - raise - break - - except Exception as e: - msg = "ERROR: Exception: %s" % e - raise - - if msg.startswith("ERROR:"): - sys.stderr.write(msg) - sys.stderr.write("Not executing finalcmd") - sys.exit(1) - - sys.stderr.write("calling %s with %s\n" % (func, args)) - sys.exit(func(*args)) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py new file mode 100644 index 00000000..d921a444 --- /dev/null +++ b/cloudinit/config/cc_power_state_change.py @@ -0,0 +1,139 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2011 Canonical Ltd. +# +# Author: Scott Moser +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from cloudinit.settings import PER_INSTANCE +from cloudinit import util + +import errno +import os +import subprocess +import sys +import time + +frequency = PER_INSTANCE + + +def handle(_name, cfg, _cloud, log, _args): + + finalcmds = cfg.get("finalcmd") + + if not finalcmds: + log.debug("No final commands") + return + + mypid = os.getpid() + cmdline = util.load_file("/proc/%s/cmdline") + + if not cmdline: + log.warn("Failed to get cmdline of current process") + return + + try: + timeout = float(cfg.get("finalcmd_timeout", 30.0)) + except ValueError: + log.warn("failed to convert finalcmd_timeout '%s' to float" % + cfg.get("finalcmd_timeout", 30.0)) + return + + devnull_fp = open("/dev/null", "w") + + shellcode = util.shellify(finalcmds) + + # note, after the fork, we do not use any of cloud-init's functions + # that would attempt to log. The primary reason for that is + # to allow the 'finalcmd' the ability to do just about anything + # and not depend on syslog services. + # Basically, it should "just work" to have finalcmd of: + # - sleep 30 + # - /sbin/poweroff + finalcmd_d = os.path.join(cloud.get_ipath_cur(), "finalcmds") + + util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, + runfinal, (shellcode, finalcmd_d, devnull_fp)) + + +def execmd(exe_args, data_in=None, output=None): + try: + proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, + stdout=output, stderr=subprocess.STDERR) + proc.communicate(data_in) + except Exception as e: + return 254 + return proc.returncode() + + +def runfinal(shellcode, finalcmd_d, output=None): + ret = execmd(["/bin/sh",], data_in=shellcode, output=output) + if not (finalcmd_d and os.path.isdir(finalcmd_d)): + sys.exit(ret) + + fails = 0 + if ret != 0: + fails = 1 + + # now runparts the final command dir + for exe_name in sorted(os.listdir(finalcmd_d)): + exe_path = os.path.join(finalcmd_d, exe_name) + if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK): + ret = execmd([exe_path], data_in=None, output=output) + if ret != 0: + fails += 1 + sys.exit(fails) + + +def run_after_pid_gone(pid, pidcmdline, timeout, func, args): + # wait until pid, with /proc/pid/cmdline contents of pidcmdline + # is no longer alive. After it is gone, or timeout has passed + # execute func(args) + msg = "ERROR: Uncaught error" + end_time = time.time() + timeout + + cmdline_f = "/proc/%s/cmdline" % pid + + while True: + if time.time() > end_time: + msg = "timeout reached before %s ended" % pid + break + + try: + cmdline = "" + with open(cmdline_f) as fp: + cmdline = fp.read() + if cmdline != pidcmdline: + msg = "cmdline changed for %s [now: %s]" % (pid, cmdline) + break + + except IOError as ioerr: + if ioerr.errno == errno.ENOENT: + msg = "pidfile '%s' gone" % cmdline_f + else: + msg = "ERROR: IOError: %s" % ioerr + raise + break + + except Exception as e: + msg = "ERROR: Exception: %s" % e + raise + + if msg.startswith("ERROR:"): + sys.stderr.write(msg) + sys.stderr.write("Not executing finalcmd") + sys.exit(1) + + sys.stderr.write("calling %s with %s\n" % (func, args)) + sys.exit(func(*args)) diff --git a/config/cloud.cfg b/config/cloud.cfg index 249a593d..c1d8ea0d 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -69,7 +69,7 @@ cloud_final_modules: - keys-to-console - phone-home - final-message - - finalcmd + - power-state-change # System and/or distro specific settings # (not accessible to handlers/transforms) -- cgit v1.2.3 From 58886d757ebd832e8c0de45981a51242370d40fc Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 13 Nov 2012 08:59:21 -0500 Subject: pep8 and pylint --- cloudinit/config/cc_power_state_change.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index d921a444..67e0316b 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -28,7 +28,7 @@ import time frequency = PER_INSTANCE -def handle(_name, cfg, _cloud, log, _args): +def handle(_name, cfg, cloud, log, _args): finalcmds = cfg.get("finalcmd") @@ -70,15 +70,15 @@ def handle(_name, cfg, _cloud, log, _args): def execmd(exe_args, data_in=None, output=None): try: proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, - stdout=output, stderr=subprocess.STDERR) + stdout=output, stderr=subprocess.STDOUT) proc.communicate(data_in) - except Exception as e: + except Exception: return 254 return proc.returncode() def runfinal(shellcode, finalcmd_d, output=None): - ret = execmd(["/bin/sh",], data_in=shellcode, output=output) + ret = execmd(["/bin/sh"], data_in=shellcode, output=output) if not (finalcmd_d and os.path.isdir(finalcmd_d)): sys.exit(ret) -- cgit v1.2.3 From 2113e89b6816d2c9d442103698414cd189ca3412 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 13 Nov 2012 11:18:22 -0500 Subject: implement power_state with tests. --- ChangeLog | 4 +- cloudinit/config/cc_power_state_change.py | 122 +++++++++++---------- doc/examples/cloud-config.txt | 39 ++++--- .../test_handler/test_handler_power_state.py | 88 +++++++++++++++ 4 files changed, 175 insertions(+), 78 deletions(-) create mode 100644 tests/unittests/test_handler/test_handler_power_state.py diff --git a/ChangeLog b/ChangeLog index 57bfc1a4..23612228 100644 --- a/ChangeLog +++ b/ChangeLog @@ -43,8 +43,8 @@ - Added dependency on distribute's python-pkg-resources - use a set of helper/parsing classes to perform system configuration for easier test. (/etc/sysconfig, /etc/hostname, resolv.conf, /etc/hosts) - - add 'finalcmd' config module to execute 'finalcmd' entries like - 'runcmd' but detached from cloud-init (LP: #1064665) + - add power_state_change config module for shutting down stystem after + cloud-init finishes. (LP: #1064665) 0.7.0: - add a 'exception_cb' argument to 'wait_for_url'. If provided, this method will be called back with the exception received and the message. diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 67e0316b..07de548c 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -21,6 +21,7 @@ from cloudinit import util import errno import os +import re import subprocess import sys import time @@ -28,83 +29,91 @@ import time frequency = PER_INSTANCE -def handle(_name, cfg, cloud, log, _args): +def handle(_name, cfg, _cloud, log, _args): - finalcmds = cfg.get("finalcmd") - - if not finalcmds: - log.debug("No final commands") + try: + (args, timeout) = load_power_state(cfg) + if args is None: + log.debug("no power_state provided. doing nothing") + return + except Exception as e: + log.warn("%s Not performing power state change!" % str(e)) return mypid = os.getpid() cmdline = util.load_file("/proc/%s/cmdline") if not cmdline: - log.warn("Failed to get cmdline of current process") - return - - try: - timeout = float(cfg.get("finalcmd_timeout", 30.0)) - except ValueError: - log.warn("failed to convert finalcmd_timeout '%s' to float" % - cfg.get("finalcmd_timeout", 30.0)) + log.warn("power_state: failed to get cmdline of current process") return devnull_fp = open("/dev/null", "w") - shellcode = util.shellify(finalcmds) + log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args))) + + util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, execmd, + [args, devnull_fp]) + + +def load_power_state(cfg): + # returns a tuple of shutdown_command, timeout + # shutdown_command is None if no config found + pstate = cfg.get('power_state') + + if pstate is None: + return (None, None) + + if not isinstance(pstate, dict): + raise TypeError("power_state is not a dict.") - # note, after the fork, we do not use any of cloud-init's functions - # that would attempt to log. The primary reason for that is - # to allow the 'finalcmd' the ability to do just about anything - # and not depend on syslog services. - # Basically, it should "just work" to have finalcmd of: - # - sleep 30 - # - /sbin/poweroff - finalcmd_d = os.path.join(cloud.get_ipath_cur(), "finalcmds") + opt_map = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'} - util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, - runfinal, (shellcode, finalcmd_d, devnull_fp)) + mode = pstate.get("mode") + if mode not in opt_map: + raise TypeError("power_state[mode] required, must be one of: %s." % + ','.join(opt_map.keys())) + delay = pstate.get("delay", "now") + if delay != "now" and not re.match("\+[0-9]+", delay): + raise TypeError("power_state[delay] must be 'now' or '+m' (minutes).") -def execmd(exe_args, data_in=None, output=None): + args = ["shutdown", opt_map[mode], delay] + if pstate.get("message"): + args.append(pstate.get("message")) + + try: + timeout = float(pstate.get('timeout', 30.0)) + except ValueError: + raise ValueError("failed to convert timeout '%s' to float." % + pstate['timeout']) + + return (args, timeout) + + +def execmd(exe_args, output=None, data_in=None): try: proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, stdout=output, stderr=subprocess.STDOUT) proc.communicate(data_in) except Exception: - return 254 - return proc.returncode() + sys.exit(254) + sys.exit(proc.returncode()) -def runfinal(shellcode, finalcmd_d, output=None): - ret = execmd(["/bin/sh"], data_in=shellcode, output=output) - if not (finalcmd_d and os.path.isdir(finalcmd_d)): - sys.exit(ret) - - fails = 0 - if ret != 0: - fails = 1 - - # now runparts the final command dir - for exe_name in sorted(os.listdir(finalcmd_d)): - exe_path = os.path.join(finalcmd_d, exe_name) - if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK): - ret = execmd([exe_path], data_in=None, output=output) - if ret != 0: - fails += 1 - sys.exit(fails) - - -def run_after_pid_gone(pid, pidcmdline, timeout, func, args): +def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args): # wait until pid, with /proc/pid/cmdline contents of pidcmdline # is no longer alive. After it is gone, or timeout has passed # execute func(args) - msg = "ERROR: Uncaught error" + msg = None end_time = time.time() + timeout cmdline_f = "/proc/%s/cmdline" % pid + def fatal(msg): + if log: + log.warn(msg) + sys.exit(254) + while True: if time.time() > end_time: msg = "timeout reached before %s ended" % pid @@ -122,18 +131,15 @@ def run_after_pid_gone(pid, pidcmdline, timeout, func, args): if ioerr.errno == errno.ENOENT: msg = "pidfile '%s' gone" % cmdline_f else: - msg = "ERROR: IOError: %s" % ioerr - raise + fatal("IOError during wait: %s" % ioerr) break except Exception as e: - msg = "ERROR: Exception: %s" % e - raise + fatal("Unexpected Exception: %s" % e) - if msg.startswith("ERROR:"): - sys.stderr.write(msg) - sys.stderr.write("Not executing finalcmd") - sys.exit(1) + if not msg: + fatal("Unexpected error in run_after_pid_gone") - sys.stderr.write("calling %s with %s\n" % (func, args)) - sys.exit(func(*args)) + if log: + log.debug(msg) + func(*args) diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt index 4fc5f351..09298655 100644 --- a/doc/examples/cloud-config.txt +++ b/doc/examples/cloud-config.txt @@ -256,24 +256,6 @@ bootcmd: - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] -# final commands -# default: none -# This can be used to execute commands after and fully detached from -# a cloud-init stage. The initial purpose of it was to allow 'poweroff' -# detached from cloud-init. If poweroff was run from 'runcmd' or userdata -# then messages may be spewed from cloud-init about logging failing or other -# issues as a result of the system being turned off. -# -# You probably are better off using 'runcmd' for this. -# -# The output of finalcmd will redirected redirected to /dev/null -# If you want output to be seen, take care to do so in your commands -# themselves. See example. -finalcmd: - - sleep 30 - - "echo $(date -R): powering off > /dev/console" - - /sbin/poweroff - # cloud_config_modules: # default: # cloud_config_modules: @@ -596,3 +578,24 @@ manual_cache_clean: False # A list of key types (first token of a /etc/ssh/ssh_key_*.pub file) # that should be skipped when outputting key fingerprints and keys # to the console respectively. + +## poweroff or reboot system after finished +# default: none +# +# power_state can be used to make the system shutdown, reboot or +# halt after boot is finished. This same thing can be acheived by +# user-data scripts or by runcmd by simply invoking 'shutdown'. +# +# Doing it this way ensures that cloud-init is entirely finished with +# modules that would be executed, and avoids any error/log messages +# that may go to the console as a result of system services like +# syslog being taken down while cloud-init is running. +# +# delay: form accepted by shutdown. default is 'now'. other format +# accepted is +m (m in minutes) +# mode: required. must be one of 'poweroff', 'halt', 'reboot' +# message: provided as the message argument to 'shutdown'. default is none. +power_state: + delay: 30 + mode: poweroff + message: Bye Bye diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py new file mode 100644 index 00000000..1149fedc --- /dev/null +++ b/tests/unittests/test_handler/test_handler_power_state.py @@ -0,0 +1,88 @@ +from unittest import TestCase + +from cloudinit.config import cc_power_state_change as psc + + +class TestLoadPowerState(TestCase): + def setUp(self): + super(self.__class__, self).setUp() + + def test_no_config(self): + # completely empty config should mean do nothing + (cmd, _timeout) = psc.load_power_state({}) + self.assertEqual(cmd, None) + + def test_irrelevant_config(self): + # no power_state field in config should return None for cmd + (cmd, _timeout) = psc.load_power_state({'foo': 'bar'}) + self.assertEqual(cmd, None) + + def test_invalid_mode(self): + cfg = {'power_state': {'mode': 'gibberish'}} + self.assertRaises(TypeError, psc.load_power_state, cfg) + + cfg = {'power_state': {'mode': ''}} + self.assertRaises(TypeError, psc.load_power_state, cfg) + + def test_empty_mode(self): + cfg = {'power_state': {'message': 'goodbye'}} + self.assertRaises(TypeError, psc.load_power_state, cfg) + + def test_valid_modes(self): + cfg = {'power_state': {}} + for mode in ('halt', 'poweroff', 'reboot'): + cfg['power_state']['mode'] = mode + check_lps_ret(psc.load_power_state(cfg), mode=mode) + + def test_invalid_delay(self): + cfg = {'power_state': {'mode': 'poweroff', 'delay': 'goodbye'}} + self.assertRaises(TypeError, psc.load_power_state, cfg) + + def test_valid_delay(self): + cfg = {'power_state': {'mode': 'poweroff', 'delay': ''}} + for delay in ("now", "+1", "+30"): + cfg['power_state']['delay'] = delay + check_lps_ret(psc.load_power_state(cfg)) + + def test_message_present(self): + cfg = {'power_state': {'mode': 'poweroff', 'message': 'GOODBYE'}} + ret = psc.load_power_state(cfg) + check_lps_ret(psc.load_power_state(cfg)) + self.assertIn(cfg['power_state']['message'], ret[0]) + + def test_no_message(self): + # if message is not present, then no argument should be passed for it + cfg = {'power_state': {'mode': 'poweroff'}} + (cmd, _timeout) = psc.load_power_state(cfg) + self.assertNotIn("", cmd) + check_lps_ret(psc.load_power_state(cfg)) + self.assertTrue(len(cmd) == 3) + + +def check_lps_ret(psc_return, mode=None): + if len(psc_return) != 2: + raise TypeError("length returned = %d" % len(psc_return)) + + errs = [] + cmd = psc_return[0] + timeout = psc_return[1] + + if not 'shutdown' in psc_return[0][0]: + errs.append("string 'shutdown' not in cmd") + + if mode is not None: + opt = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}[mode] + if opt not in psc_return[0]: + errs.append("opt '%s' not in cmd: %s" % (opt, cmd)) + + if len(cmd) != 3 and len(cmd) != 4: + errs.append("Invalid command length: %s" % len(cmd)) + + try: + float(timeout) + except: + errs.append("timeout failed convert to float") + + if len(errs): + lines = ["Errors in result: %s" % str(psc_return)] + errs + raise Exception('\n'.join(lines)) -- cgit v1.2.3 From f3fb7f7d7c83d277634cfe43b82912801ef67311 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 13 Nov 2012 12:07:03 -0500 Subject: sort PKG_MP entries --- packages/bddeb | 6 +++--- packages/brpm | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/packages/bddeb b/packages/bddeb index 2cfddb99..bda3170d 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -28,13 +28,13 @@ import argparse # use in our debian 'control' file, this is a translation of the 'requires' # file pypi package name to a debian/ubuntu package name. PKG_MP = { + 'argparse': 'python-argparse', 'boto': 'python-boto', + 'cheetah': 'python-cheetah', 'configobj': 'python-configobj', 'oauth': 'python-oauth', - 'pyyaml': 'python-yaml', 'prettytable': 'python-prettytable', - 'argparse': 'python-argparse', - 'cheetah': 'python-cheetah', + 'pyyaml': 'python-yaml', } DEBUILD_ARGS = ["-us", "-S", "-uc"] diff --git a/packages/brpm b/packages/brpm index e6b03609..eea2a046 100755 --- a/packages/brpm +++ b/packages/brpm @@ -34,13 +34,13 @@ from cloudinit import util # this is a translation of the 'requires' # file pypi package name to a redhat/fedora package name. PKG_MP = { + 'argparse': 'python-argparse', 'boto': 'python-boto', 'cheetah': 'python-cheetah', - 'prettytable': 'python-prettytable', - 'oauth': 'python-oauth', 'configobj': 'python-configobj', + 'oauth': 'python-oauth', + 'prettytable': 'python-prettytable', 'pyyaml': 'PyYAML', - 'argparse': 'python-argparse', } # Subdirectories of the ~/rpmbuild dir -- cgit v1.2.3 From 517f52af3c7fa39b0303f337681f77e2d2a4ff6f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 13 Nov 2012 12:08:45 -0500 Subject: add dependency for 'pkg_resources' --- ChangeLog | 2 +- packages/bddeb | 1 + packages/brpm | 1 + packages/debian/control.in | 1 - 4 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 21584bb7..14efc4b1 100644 --- a/ChangeLog +++ b/ChangeLog @@ -40,7 +40,7 @@ dictionary and force it to full expand so that if cloud-init blocks the ec2 metadata port the lazy loaded dictionary will continue working properly instead of trying to make additional url calls which will fail (LP: #1068801) - - Added dependency on distribute's python-pkg-resources + - Added dependency: python-pkg-resources (deb), python-setuptools (rpm) - use a set of helper/parsing classes to perform system configuration for easier test. (/etc/sysconfig, /etc/hostname, resolv.conf, /etc/hosts) 0.7.0: diff --git a/packages/bddeb b/packages/bddeb index bda3170d..a92dad70 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -33,6 +33,7 @@ PKG_MP = { 'cheetah': 'python-cheetah', 'configobj': 'python-configobj', 'oauth': 'python-oauth', + 'pkg_resources': 'python-pkg-resources', 'prettytable': 'python-prettytable', 'pyyaml': 'python-yaml', } diff --git a/packages/brpm b/packages/brpm index eea2a046..0b1e33cf 100755 --- a/packages/brpm +++ b/packages/brpm @@ -39,6 +39,7 @@ PKG_MP = { 'cheetah': 'python-cheetah', 'configobj': 'python-configobj', 'oauth': 'python-oauth', + 'pkg_resources': 'python-setuptools', 'prettytable': 'python-prettytable', 'pyyaml': 'PyYAML', } diff --git a/packages/debian/control.in b/packages/debian/control.in index dace0356..edb5aff5 100644 --- a/packages/debian/control.in +++ b/packages/debian/control.in @@ -19,7 +19,6 @@ Standards-Version: 3.9.3 Package: cloud-init Architecture: all Depends: cloud-utils, - distribute, procps, python, #for $r in $requires -- cgit v1.2.3 From 7ea93af0599fc10d69c059e86e3f6e027b8ba7b4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 13 Nov 2012 13:00:35 -0500 Subject: 'name' for default user must be all lower case. if 'name' for the user had something other than all lower case, then pwd.getpwname would fail. --- config/cloud.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/cloud.cfg b/config/cloud.cfg index ad100fff..15cdb473 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -77,7 +77,7 @@ system_info: distro: ubuntu # Default user name + that default users groups (if added/used) default_user: - name: Ubuntu + name: ubuntu lock_passwd: True gecos: Ubuntu groups: [adm, audio, cdrom, dialout, floppy, video, plugdev, dip, netdev] -- cgit v1.2.3 From d8b4f9245331cb8b6f04d63a3a62e45563486512 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 13 Nov 2012 13:09:45 -0500 Subject: fix read of /proc/cmdline --- cloudinit/config/cc_power_state_change.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 07de548c..7f6e780d 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -41,7 +41,7 @@ def handle(_name, cfg, _cloud, log, _args): return mypid = os.getpid() - cmdline = util.load_file("/proc/%s/cmdline") + cmdline = util.load_file("/proc/%s/cmdline" % mypid) if not cmdline: log.warn("power_state: failed to get cmdline of current process") -- cgit v1.2.3 From efd02682c7cfb054490308443f7f9facf83363b5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 13 Nov 2012 13:24:37 -0500 Subject: clean up exit to single function call --- cloudinit/config/cc_power_state_change.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 7f6e780d..22f1aade 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -28,6 +28,8 @@ import time frequency = PER_INSTANCE +EXIT_FAIL = 254 + def handle(_name, cfg, _cloud, log, _args): @@ -90,14 +92,19 @@ def load_power_state(cfg): return (args, timeout) +def doexit(sysexit): + os._exit(sysexit) # pylint: disable=W0212 + + def execmd(exe_args, output=None, data_in=None): try: proc = subprocess.Popen(exe_args, stdin=subprocess.PIPE, stdout=output, stderr=subprocess.STDOUT) proc.communicate(data_in) + ret = proc.returncode except Exception: - sys.exit(254) - sys.exit(proc.returncode()) + doexit(EXIT_FAIL) + doexit(ret) def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args): @@ -112,7 +119,7 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args): def fatal(msg): if log: log.warn(msg) - sys.exit(254) + doexit(EXIT_FAIL) while True: if time.time() > end_time: -- cgit v1.2.3 From 10147364638595d6c1a7077cc47c205413ef13a6 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 13 Nov 2012 11:02:03 -0800 Subject: Even when using boto < 2.6 force the unlazying to occur It seems like its possible that boto 2.5.2 and below have the lazy loading metadata dictionary so as a precaution we will always take the hit of unlazying the metadata dictionary by traversing it which in the non-lazy dictionary case has no effect (its marginal). This also removes the need to check the boto version and the dependency on setup tools just for this case. --- cloudinit/ec2_utils.py | 34 +++++++++++++--------------------- 1 file changed, 13 insertions(+), 21 deletions(-) diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index 32bf3968..46b93f39 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -16,34 +16,26 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import pkg_resources -from pkg_resources import parse_version as pver - import boto.utils as boto_utils -# Versions of boto >= 2.6.0 try to lazily load -# the metadata backing, which doesn't work so well -# in cloud-init especially since the metadata is -# serialized and actions are performed where the -# metadata server may be blocked (thus the datasource -# will start failing) resulting in url exceptions -# when fields that do exist (or would have existed) -# do not exist due to the blocking that occurred. - -BOTO_LAZY = False -try: - _boto_lib = pkg_resources.get_distribution('boto') - if _boto_lib.parsed_version > pver("2.5.2"): # pylint: disable=E1103 - BOTO_LAZY = True -except pkg_resources.DistributionNotFound: - pass +# Versions of boto >= 2.6.0 (and possibly 2.5.2) +# try to lazily load the metadata backing, which +# doesn't work so well in cloud-init especially +# since the metadata is serialized and actions are +# performed where the metadata server may be blocked +# (thus the datasource will start failing) resulting +# in url exceptions when fields that do exist (or +# would have existed) do not exist due to the blocking +# that occurred. def _unlazy_dict(mp): if not isinstance(mp, (dict)): return mp - if not BOTO_LAZY: - return mp + # Walk over the keys/values which + # forces boto to unlazy itself and + # has no effect on dictionaries that + # already have there items. for (_k, v) in mp.items(): _unlazy_dict(v) return mp -- cgit v1.2.3 From 8239df4493d81db1d245eaa51fdfe5458e2d1e4f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 13 Nov 2012 14:02:14 -0500 Subject: add sleep, allow errno '3' (ESRCH) on the open of /proc/pid/cmdline The sleep is added here simply to not completely spin cpu on waiting for the parent pid to die. the allowing of errno 3 is because I was getting this. I dont have a perfect explanation, but I suspect that the 'open' was actually getting this back from the /proc filesystem after the pid had died. Possibly in the window between when the 'open' was done and the 'read()' was done. --- cloudinit/config/cc_power_state_change.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 22f1aade..02434322 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -121,6 +121,8 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args): log.warn(msg) doexit(EXIT_FAIL) + known_errnos = (errno.ENOENT, errno.ESRCH) + while True: if time.time() > end_time: msg = "timeout reached before %s ended" % pid @@ -135,8 +137,8 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args): break except IOError as ioerr: - if ioerr.errno == errno.ENOENT: - msg = "pidfile '%s' gone" % cmdline_f + if ioerr.errno in known_errnos: + msg = "pidfile '%s' gone [%d]" % (cmdline_f, ioerr.errno) else: fatal("IOError during wait: %s" % ioerr) break @@ -144,6 +146,8 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args): except Exception as e: fatal("Unexpected Exception: %s" % e) + time.sleep(.25) + if not msg: fatal("Unexpected error in run_after_pid_gone") -- cgit v1.2.3 From df38cb7a9f99fbb5fdaddeb08e43d74af30372c8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 13 Nov 2012 14:15:07 -0500 Subject: use os.devnull [trivial] --- cloudinit/config/cc_power_state_change.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 02434322..a2e6920d 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -49,7 +49,7 @@ def handle(_name, cfg, _cloud, log, _args): log.warn("power_state: failed to get cmdline of current process") return - devnull_fp = open("/dev/null", "w") + devnull_fp = open(os.devnull, "w") log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args))) -- cgit v1.2.3 From bf3c3bb2f5f443aa44a0ff85037861b73c3aad65 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 13 Nov 2012 15:25:57 -0500 Subject: make migrator walk the "cloud" path also the migrator was not renaming items in the "cloud" semaphore path. Those were items that would run once only. Now we just check both ipath('sem') and cpath('sem') --- cloudinit/config/cc_migrator.py | 64 +++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 31 deletions(-) diff --git a/cloudinit/config/cc_migrator.py b/cloudinit/config/cc_migrator.py index 58232fc9..facaa538 100644 --- a/cloudinit/config/cc_migrator.py +++ b/cloudinit/config/cc_migrator.py @@ -28,48 +28,50 @@ frequency = PER_ALWAYS def _migrate_canon_sems(cloud): - sem_path = cloud.paths.get_ipath('sem') - if not sem_path or not os.path.exists(sem_path): - return 0 + paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem')) am_adjusted = 0 - for p in os.listdir(sem_path): - full_path = os.path.join(sem_path, p) - if os.path.isfile(full_path): - (name, ext) = os.path.splitext(p) - canon_name = helpers.canon_sem_name(name) - if canon_name != name: - new_path = os.path.join(sem_path, canon_name + ext) - shutil.move(full_path, new_path) - am_adjusted += 1 + for sem_path in paths: + if not sem_path or not os.path.exists(sem_path): + continue + for p in os.listdir(sem_path): + full_path = os.path.join(sem_path, p) + if os.path.isfile(full_path): + (name, ext) = os.path.splitext(p) + canon_name = helpers.canon_sem_name(name) + if canon_name != name: + new_path = os.path.join(sem_path, canon_name + ext) + shutil.move(full_path, new_path) + am_adjusted += 1 return am_adjusted def _migrate_legacy_sems(cloud, log): - sem_path = cloud.paths.get_ipath('sem') - if not sem_path or not os.path.exists(sem_path): - return legacy_adjust = { 'apt-update-upgrade': [ 'apt-configure', 'package-update-upgrade-install', ], } - sem_helper = helpers.FileSemaphores(sem_path) - for (mod_name, migrate_to) in legacy_adjust.items(): - possibles = [mod_name, helpers.canon_sem_name(mod_name)] - old_exists = [] - for p in os.listdir(sem_path): - (name, _ext) = os.path.splitext(p) - if name in possibles and os.path.isfile(p): - old_exists.append(p) - for p in old_exists: - util.del_file(os.path.join(sem_path, p)) - (_name, freq) = os.path.splitext(p) - for m in migrate_to: - log.debug("Migrating %s => %s with the same frequency", - p, m) - with sem_helper.lock(m, freq): - pass + paths = (cloud.paths.get_ipath('sem'), cloud.paths.get_cpath('sem')) + for sem_path in paths: + if not sem_path or not os.path.exists(sem_path): + continue + sem_helper = helpers.FileSemaphores(sem_path) + for (mod_name, migrate_to) in legacy_adjust.items(): + possibles = [mod_name, helpers.canon_sem_name(mod_name)] + old_exists = [] + for p in os.listdir(sem_path): + (name, _ext) = os.path.splitext(p) + if name in possibles and os.path.isfile(p): + old_exists.append(p) + for p in old_exists: + util.del_file(os.path.join(sem_path, p)) + (_name, freq) = os.path.splitext(p) + for m in migrate_to: + log.debug("Migrating %s => %s with the same frequency", + p, m) + with sem_helper.lock(m, freq): + pass def handle(name, cfg, cloud, log, _args): -- cgit v1.2.3 From e016e7c7837c70f5fce0c3b3d9bd944a8d43f9f0 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 13 Nov 2012 15:27:00 -0500 Subject: check for a marker file by the normal name also This check is a waste of a stat any time after the migrator module had run. As it would take care of moving markers. However, if the user runs: sudo cloud-init modules --mode final after an upgrade, they'd otherwise run any module that had a '-' in its name again. To avoid that, we just return true in that case, and inform the user how to run the migrator themselves. --- cloudinit/helpers.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 794f00ec..2077401c 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -125,12 +125,23 @@ class FileSemaphores(object): def has_run(self, name, freq): if not freq or freq == PER_ALWAYS: return False - name = canon_sem_name(name) - sem_file = self._get_path(name, freq) + + cname = canon_sem_name(name) + sem_file = self._get_path(cname, freq) # This isn't really a good atomic check # but it suffices for where and when cloudinit runs if os.path.exists(sem_file): return True + + # this case could happen if the migrator module hadn't run yet + # but the item had run before we did canon_sem_name. + if cname != name and os.path.exists(self._get_path(name, freq)): + LOG.warn("%s has run without canonicalized name [%s].\n" + "likely the migrator has not yet run. It will run next boot.\n" + "run manually with: cloud-init single --name=migrator" + % (name, cname)) + return True + return False def _get_path(self, name, freq): -- cgit v1.2.3 From c7c6ac0aa83192ffc267c27878712652dade35d1 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 13 Nov 2012 13:47:30 -0800 Subject: Only attempt to read the previous hostname file if it exists. Instead of always reading the previous hostname file even if it did not exist lets only read it if it is a valid variable and is actually a existent file instead of just attempting to read it always. LP: #1078452 --- cloudinit/distros/__init__.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 10e07e82..ea0bac23 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -146,17 +146,37 @@ class Distro(object): def update_hostname(self, hostname, fqdn, prev_hostname_fn): applying_hostname = hostname + + # Determine what the actual written hostname should be hostname = self._select_hostname(hostname, fqdn) - prev_hostname = self._read_hostname(prev_hostname_fn) + + # If the previous hostname file exists lets see if we + # can get a hostname from it + if prev_hostname_fn and os.path.exists(prev_hostname_fn): + prev_hostname = self._read_hostname(prev_hostname_fn) + else: + prev_hostname = None + + # Lets get where we should write the system hostname + # and what the system hostname is (sys_fn, sys_hostname) = self._read_system_hostname() update_files = [] + + # If there is no previous hostname or it differs + # from what we want, lets update it or create the + # file in the first place if not prev_hostname or prev_hostname != hostname: update_files.append(prev_hostname_fn) + # If the system hostname is different than the previous + # one or the desired one lets update it as well if (not sys_hostname) or (sys_hostname == prev_hostname and sys_hostname != hostname): update_files.append(sys_fn) + # Remove duplicates (incase the previous config filename) + # is the same as the system config filename, don't bother + # doing it twice update_files = set([f for f in update_files if f]) LOG.debug("Attempting to update hostname to %s in %s files", hostname, len(update_files)) @@ -173,6 +193,8 @@ class Distro(object): LOG.debug("%s differs from %s, assuming user maintained hostname.", prev_hostname_fn, sys_fn) + # If the system hostname file name was provided set the + # non-fqdn as the transient hostname. if sys_fn in update_files: self._apply_hostname(applying_hostname) -- cgit v1.2.3 From e91fbc058cdd709a561863202231076788323782 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 13 Nov 2012 15:24:53 -0800 Subject: Update how errors are handled when writing and reading hostnames. --- cloudinit/distros/debian.py | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index b6e7654f..7422f4f0 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -88,37 +88,37 @@ class Distro(distros.Distro): return hostname def _write_hostname(self, your_hostname, out_fn): - conf = self._read_hostname_conf(out_fn) + conf = None + try: + # Try to update the previous one + # so lets see if we can read it first. + conf = self._read_hostname_conf(out_fn) + except IOError: + pass if not conf: conf = HostnameConf('') - conf.parse() conf.set_hostname(your_hostname) util.write_file(out_fn, str(conf), 0644) def _read_system_hostname(self): - conf = self._read_hostname_conf(self.hostname_conf_fn) - if conf: - sys_hostname = conf.hostname - else: - sys_hostname = None + sys_hostname = self._read_hostname(self.hostname_conf_fn) return (self.hostname_conf_fn, sys_hostname) def _read_hostname_conf(self, filename): - try: - conf = HostnameConf(util.load_file(filename)) - conf.parse() - return conf - except IOError: - util.logexc(LOG, "Error reading hostname from %s", filename) - return None + conf = HostnameConf(util.load_file(filename)) + conf.parse() + return conf def _read_hostname(self, filename, default=None): - conf = self._read_hostname_conf(filename) - if not conf: - return default - if not conf.hostname: + hostname = None + try: + conf = self._read_hostname_conf(filename) + hostname = conf.hostname + except IOError: + pass + if not hostname: return default - return conf.hostname + return hostname def _get_localhost_ip(self): # Note: http://www.leonardoborda.com/blog/127-0-1-1-ubuntu-debian/ -- cgit v1.2.3 From 5c63171d2ac45a1931130d14e33579beb6902fdb Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 13 Nov 2012 15:49:46 -0800 Subject: Create a utility testcase class that fixes some of the 2.6 missing pieces - Add a helper testcase class that can add additional features into the unit test class as we need for features that are useful to have which starts with features that are missing including assertIn and assertNotIn LP: #1078473 --- tests/unittests/helpers.py | 23 ++++++++++++++++++++++ .../test_handler/test_handler_power_state.py | 6 +++--- 2 files changed, 26 insertions(+), 3 deletions(-) diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index e8080668..92540b0c 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -1,4 +1,6 @@ import os +import sys +import unittest from mocker import MockerTestCase @@ -7,6 +9,27 @@ from cloudinit import util import shutil +# Handle how 2.6 doesn't have the assertIn or assertNotIn +_PY_VER = sys.version_info +_PY_MAJOR, _PY_MINOR = _PY_VER[0:2] +if (_PY_MAJOR, _PY_MINOR) <= (2, 6): + # For now add these on, taken from python 2.7 + slightly adjusted + class TestCase(unittest.TestCase): + def assertIn(self, member, container, msg=None): + if member not in container: + standardMsg = '%r not found in %r' % (member, container) + self.fail(self._formatMessage(msg, standardMsg)) + + def assertNotIn(self, member, container, msg=None): + if member in container: + standardMsg = '%r unexpectedly found in %r' + standardMsg = standardMsg % (member, container) + self.fail(self._formatMessage(msg, standardMsg)) + +else: + class TestCase(unittest.TestCase): + pass + # Makes the old path start # with new base instead of whatever diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py index 1149fedc..f6e37fa5 100644 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ b/tests/unittests/test_handler/test_handler_power_state.py @@ -1,9 +1,9 @@ -from unittest import TestCase - from cloudinit.config import cc_power_state_change as psc +from tests.unittests import helpers as t_help + -class TestLoadPowerState(TestCase): +class TestLoadPowerState(t_help.TestCase): def setUp(self): super(self.__class__, self).setUp() -- cgit v1.2.3 From c94c94bca6bb51ec6b0e4df43aeb9d87e978d0fe Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 14 Nov 2012 14:28:18 -0500 Subject: set shell for default ubuntu user to /bin/bash --- config/cloud.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/config/cloud.cfg b/config/cloud.cfg index d49ef7b8..e0306a49 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -82,6 +82,7 @@ system_info: lock_passwd: True gecos: Ubuntu groups: [adm, audio, cdrom, dialout, floppy, video, plugdev, dip, netdev] + shell: /bin/bash # Other config here will be given to the distro class and/or path classes paths: cloud_dir: /var/lib/cloud/ -- cgit v1.2.3 From 5ce32eb77572d93d37ec01af452955151c913e9c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 14 Nov 2012 15:27:47 -0500 Subject: open 0.7.2 --- ChangeLog | 1 + 1 file changed, 1 insertion(+) diff --git a/ChangeLog b/ChangeLog index cc0dc607..427aadc5 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,4 @@ +0.7.2: 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) -- cgit v1.2.3 From f97befb58244a4b3e72e73fe6d90772a4b3584c9 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 14 Nov 2012 15:28:16 -0500 Subject: add debian watch --- ChangeLog | 1 + packages/debian/watch | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 packages/debian/watch diff --git a/ChangeLog b/ChangeLog index 427aadc5..cac92bce 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,4 +1,5 @@ 0.7.2: + - add a debian watch file 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) diff --git a/packages/debian/watch b/packages/debian/watch new file mode 100644 index 00000000..0f7a600b --- /dev/null +++ b/packages/debian/watch @@ -0,0 +1,2 @@ +version=3 +https://launchpad.net/cloud-init/+download .*/\+download/cloud-init-(.+)\.tar.gz -- cgit v1.2.3 From e0b30aa70356b0d2c29ca83419a847be2918afe8 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 14 Nov 2012 13:32:14 -0800 Subject: Bump the version to 0.7.2 --- cloudinit/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/version.py b/cloudinit/version.py index 12ff620a..024d5118 100644 --- a/cloudinit/version.py +++ b/cloudinit/version.py @@ -20,7 +20,7 @@ from distutils import version as vr def version(): - return vr.StrictVersion("0.7.1") + return vr.StrictVersion("0.7.2") def version_string(): -- cgit v1.2.3 From 3be1f230048268253889d4d365d06bcd0cbcef77 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 14 Nov 2012 14:00:48 -0800 Subject: Add a check on the changelog version comparing to the code version. --- Makefile | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 2a6be961..b659836f 100644 --- a/Makefile +++ b/Makefile @@ -5,8 +5,10 @@ PY_FILES+="bin/cloud-init" YAML_FILES=$(shell find cloudinit bin tests tools -name "*.yaml" -type f ) YAML_FILES+=$(shell find doc/examples -name "cloud-config*.txt" -type f ) +CHANGELOG_VERSION=$(shell $(CWD)/tools/read-version) +CODE_VERSION=$(shell python -c "from cloudinit import version; print version.version_string()") -all: test +all: test check_version pep8: @$(CWD)/tools/run-pep8 $(PY_FILES) @@ -20,6 +22,12 @@ pyflakes: test: @nosetests $(noseopts) tests/ +check_version: + @if [ "$(CHANGELOG_VERSION)" != "$(CODE_VERSION)" ]; then \ + echo "Error: ChangeLog version $(CHANGELOG_VERSION)" \ + "not equal to code version $(CODE_VERSION)"; exit 2; \ + else true; fi + 2to3: 2to3 $(PY_FILES) @@ -36,5 +44,5 @@ rpm: deb: ./packages/bddeb -.PHONY: test pylint pyflakes 2to3 clean pep8 rpm deb yaml +.PHONY: test pylint pyflakes 2to3 clean pep8 rpm deb yaml check_version -- cgit v1.2.3 From 75c3482a8685151407c186ce5b1f3b8af3db49d4 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 14 Nov 2012 19:22:38 -0800 Subject: Fix sudoers being written multiple times when strings are used. LP: #1079002 --- cloudinit/distros/__init__.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index ea0bac23..24e6f637 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -24,7 +24,6 @@ from StringIO import StringIO import abc -import collections import itertools import os import re @@ -421,7 +420,7 @@ class Distro(object): '', "# User rules for %s" % user, ] - if isinstance(rules, collections.Iterable): + if isinstance(rules, (list, tuple)): for rule in rules: lines.append("%s %s" % (user, rule)) else: -- cgit v1.2.3 From 7b9540b0a17cfcdb94bb133f1413b3a3f68433ef Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 14 Nov 2012 22:40:01 -0800 Subject: Add a test to make sure this doesn't happen again. --- tests/unittests/test_distros/test_generic.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py index 704699b5..63a4af29 100644 --- a/tests/unittests/test_distros/test_generic.py +++ b/tests/unittests/test_distros/test_generic.py @@ -55,6 +55,29 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): # Make a temp directoy for tests to use. self.tmp = self.makeDir() + def test_sudoers_ensure_rules(self): + rules = ['ALL=(ALL:ALL) ALL'] + rules.append('B-ALL=(ALL:ALL) ALL') + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + os.makedirs(os.path.join(self.tmp, "etc")) + os.makedirs(os.path.join(self.tmp, "etc", 'sudoers.d')) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + d.write_sudo_rules("harlowja", rules) + contents = util.load_file(d.ci_sudoers_fn) + self.restore() + lines = contents.splitlines() + found_amount = 0 + expected = ['harlowja ALL=(ALL:ALL) ALL'] + expected.append('harlowja B-ALL=(ALL:ALL) ALL') + for e in expected: + for line in lines: + line = line.strip() + if line == e: + found_amount += 1 + self.assertEquals(2, found_amount) + def test_sudoers_ensure_new(self): cls = distros.fetch("ubuntu") d = cls("ubuntu", {}, None) -- cgit v1.2.3 From ae6c2665d51fffe5fdc2a15b7f97af0d8c1484af Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 14 Nov 2012 23:11:20 -0800 Subject: Add the string sudoers rule test case as well. --- tests/unittests/test_distros/test_generic.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py index 63a4af29..8ceb141a 100644 --- a/tests/unittests/test_distros/test_generic.py +++ b/tests/unittests/test_distros/test_generic.py @@ -56,6 +56,27 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): self.tmp = self.makeDir() def test_sudoers_ensure_rules(self): + rules = 'ALL=(ALL:ALL) ALL' + cls = distros.fetch("ubuntu") + d = cls("ubuntu", {}, None) + os.makedirs(os.path.join(self.tmp, "etc")) + os.makedirs(os.path.join(self.tmp, "etc", 'sudoers.d')) + self.patchOS(self.tmp) + self.patchUtils(self.tmp) + d.write_sudo_rules("harlowja", rules) + contents = util.load_file(d.ci_sudoers_fn) + self.restore() + lines = contents.splitlines() + found_amount = 0 + expected = ['harlowja ALL=(ALL:ALL) ALL'] + for e in expected: + for line in lines: + line = line.strip() + if line == e: + found_amount += 1 + self.assertEquals(1, found_amount) + + def test_sudoers_ensure_rules_list(self): rules = ['ALL=(ALL:ALL) ALL'] rules.append('B-ALL=(ALL:ALL) ALL') cls = distros.fetch("ubuntu") -- cgit v1.2.3 From 6aaa0482f44f66f9bb3c89e133c25b3bde755a5e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 14 Nov 2012 23:24:09 -0800 Subject: Cleanup the tests slightly. --- tests/unittests/test_distros/test_generic.py | 65 ++++++++++++++++------------ 1 file changed, 37 insertions(+), 28 deletions(-) diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py index 8ceb141a..3ca769b4 100644 --- a/tests/unittests/test_distros/test_generic.py +++ b/tests/unittests/test_distros/test_generic.py @@ -55,8 +55,7 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): # Make a temp directoy for tests to use. self.tmp = self.makeDir() - def test_sudoers_ensure_rules(self): - rules = 'ALL=(ALL:ALL) ALL' + def _write_load_sudoers(self, user, rules): cls = distros.fetch("ubuntu") d = cls("ubuntu", {}, None) os.makedirs(os.path.join(self.tmp, "etc")) @@ -66,38 +65,48 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): d.write_sudo_rules("harlowja", rules) contents = util.load_file(d.ci_sudoers_fn) self.restore() - lines = contents.splitlines() + return contents + + def _count_in(self, lines_look_for, text_content): found_amount = 0 - expected = ['harlowja ALL=(ALL:ALL) ALL'] - for e in expected: - for line in lines: + for e in lines_look_for: + for line in text_content.splitlines(): line = line.strip() if line == e: found_amount += 1 - self.assertEquals(1, found_amount) + return found_amount - def test_sudoers_ensure_rules_list(self): - rules = ['ALL=(ALL:ALL) ALL'] - rules.append('B-ALL=(ALL:ALL) ALL') - cls = distros.fetch("ubuntu") - d = cls("ubuntu", {}, None) - os.makedirs(os.path.join(self.tmp, "etc")) - os.makedirs(os.path.join(self.tmp, "etc", 'sudoers.d')) - self.patchOS(self.tmp) - self.patchUtils(self.tmp) - d.write_sudo_rules("harlowja", rules) - contents = util.load_file(d.ci_sudoers_fn) - self.restore() - lines = contents.splitlines() - found_amount = 0 + def test_sudoers_ensure_rules(self): + rules = 'ALL=(ALL:ALL) ALL' + contents = self._write_load_sudoers('harlowja', rules) expected = ['harlowja ALL=(ALL:ALL) ALL'] - expected.append('harlowja B-ALL=(ALL:ALL) ALL') - for e in expected: - for line in lines: - line = line.strip() - if line == e: - found_amount += 1 - self.assertEquals(2, found_amount) + self.assertEquals(len(expected), self._count_in(expected, contents)) + not_expected = [ + 'harlowja A', + 'harlowja L', + 'harlowja L', + ] + self.assertEquals(0, self._count_in(not_expected, contents)) + + def test_sudoers_ensure_rules_list(self): + rules = [ + 'ALL=(ALL:ALL) ALL', + 'B-ALL=(ALL:ALL) ALL', + 'C-ALL=(ALL:ALL) ALL', + ] + contents = self._write_load_sudoers('harlowja', rules) + expected = [ + 'harlowja ALL=(ALL:ALL) ALL', + 'harlowja B-ALL=(ALL:ALL) ALL', + 'harlowja C-ALL=(ALL:ALL) ALL', + ] + self.assertEquals(len(expected), self._count_in(expected, contents)) + not_expected = [ + 'harlowja A', + 'harlowja L', + 'harlowja L', + ] + self.assertEquals(0, self._count_in(not_expected, contents)) def test_sudoers_ensure_new(self): cls = distros.fetch("ubuntu") -- cgit v1.2.3 From ef915a6ec712d89b9e0b3672947571976a49b68f Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 15 Nov 2012 12:32:05 -0800 Subject: Raise a type error when a sudoers rule is not an accepted type. --- cloudinit/distros/__init__.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 24e6f637..e724a418 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -423,8 +423,11 @@ class Distro(object): if isinstance(rules, (list, tuple)): for rule in rules: lines.append("%s %s" % (user, rule)) - else: + elif isinstance(rules, (basestring, str)): lines.append("%s %s" % (user, rules)) + else: + msg = "Can not create sudoers rule addition with type %r" + raise TypeError(msg % (util.obj_name(rules))) content = "\n".join(lines) self.ensure_sudo_dir(os.path.dirname(sudo_file)) -- cgit v1.2.3 From 7a7cf335453146a775c8566e5a4ed4716a03a874 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 19 Nov 2012 09:25:09 -0500 Subject: add 'sudo' entry for default user in default config/cloud.cfg LP: #1080717 --- ChangeLog | 1 + config/cloud.cfg | 1 + 2 files changed, 2 insertions(+) diff --git a/ChangeLog b/ChangeLog index cac92bce..767b13d6 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,6 @@ 0.7.2: - add a debian watch file + - add 'sudo' entry to ubuntu's default user (LP: #1080717) 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) diff --git a/config/cloud.cfg b/config/cloud.cfg index e0306a49..a8c74486 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -82,6 +82,7 @@ system_info: lock_passwd: True gecos: Ubuntu groups: [adm, audio, cdrom, dialout, floppy, video, plugdev, dip, netdev] + sudo: ["ALL=(ALL) NOPASSWD:ALL"] shell: /bin/bash # Other config here will be given to the distro class and/or path classes paths: -- cgit v1.2.3 From 3cb9a6ed620ab9200a18bf69cdac5ac518ca214c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 20 Nov 2012 01:04:31 -0500 Subject: pep8 and pylint --- cloudinit/distros/__init__.py | 1 + tests/unittests/test_distros/test_generic.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index e724a418..6a684b89 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -429,6 +429,7 @@ class Distro(object): msg = "Can not create sudoers rule addition with type %r" raise TypeError(msg % (util.obj_name(rules))) content = "\n".join(lines) + content += "\n" # trailing newline self.ensure_sudo_dir(os.path.dirname(sudo_file)) if not os.path.exists(sudo_file): diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py index 3ca769b4..7befb8c8 100644 --- a/tests/unittests/test_distros/test_generic.py +++ b/tests/unittests/test_distros/test_generic.py @@ -55,7 +55,7 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): # Make a temp directoy for tests to use. self.tmp = self.makeDir() - def _write_load_sudoers(self, user, rules): + def _write_load_sudoers(self, _user, rules): cls = distros.fetch("ubuntu") d = cls("ubuntu", {}, None) os.makedirs(os.path.join(self.tmp, "etc")) -- cgit v1.2.3 From d324a2cb0b10a4cd1b1b05dd23d0040ab3e9621c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 20 Nov 2012 01:05:36 -0500 Subject: fix "resize_root: noblock" resize_root: noblock has been broken in the 0.7.0 series. Using it would disable resizing. LP: #1080985 --- ChangeLog | 1 + cloudinit/config/cc_resizefs.py | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 767b13d6..bd52f182 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,7 @@ 0.7.2: - add a debian watch file - add 'sudo' entry to ubuntu's default user (LP: #1080717) + - fix resizefs module when 'noblock' was provided (LP: #1080985) 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index b958f332..70294eda 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -32,6 +32,8 @@ RESIZE_FS_PREFIXES_CMDS = [ ('xfs', 'xfs_growfs'), ] +NOBLOCK = "noblock" + def nodeify_path(devpth, where, log): try: @@ -68,7 +70,7 @@ def handle(name, cfg, _cloud, log, args): else: resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True) - if not util.translate_bool(resize_root): + if not util.translate_bool(resize_root, addons=[NOBLOCK]): log.debug("Skipping module named %s, resizing disabled", name) return @@ -110,7 +112,7 @@ def handle(name, cfg, _cloud, log, args): log.debug("Resizing %s (%s) using %s", resize_what, fs_type, resizer) resize_cmd = [resizer, devpth] - if resize_root == "noblock": + if resize_root == NOBLOCK: # Fork to a child that will run # the resize command util.fork_cb(do_resize, resize_cmd, log) @@ -120,7 +122,7 @@ def handle(name, cfg, _cloud, log, args): do_resize(resize_cmd, log) action = 'Resized' - if resize_root == "noblock": + if resize_root == NOBLOCK: action = 'Resizing (via forking)' log.debug("%s root filesystem (type=%s, maj=%i, min=%i, val=%s)", action, fs_type, os.major(st_dev), os.minor(st_dev), resize_root) -- cgit v1.2.3 From 52a1884822ecb9474e12e6c16b62dbd0728a4a0e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 28 Nov 2012 10:41:42 -0800 Subject: Check for running inside RHEL and adjust the logging options. It seems like at least RHEL does not have the "--stderr" option but instead only supports the short version "-s" so add a check that will switch from the long version to the short version when RHEL is detected. LP: #1083715 --- tools/write-ssh-key-fingerprints | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/tools/write-ssh-key-fingerprints b/tools/write-ssh-key-fingerprints index aa1f3c38..d69c7fcc 100755 --- a/tools/write-ssh-key-fingerprints +++ b/tools/write-ssh-key-fingerprints @@ -1,5 +1,18 @@ #!/bin/sh + +logger_opts="-p user.info -t ec2" + +if [ -f "/etc/redhat-release" ] +then + # Seems like rhel only supports the short version + logger_opts="$logger_opts -s" +else + logger_opts="$logger_opts --stderr" +fi + +# Redirect stderr to stdout exec 2>&1 + fp_blist=",${1}," key_blist=",${2}," { @@ -16,9 +29,9 @@ done echo "-----END SSH HOST KEY FINGERPRINTS-----" echo "#############################################################" -} | logger -p user.info --stderr -t "ec2" +} | logger $logger_opts -echo -----BEGIN SSH HOST KEY KEYS----- +echo "-----BEGIN SSH HOST KEY KEYS-----" for f in /etc/ssh/ssh_host_*key.pub; do [ -f "$f" ] || continue read ktype line < "$f" @@ -26,4 +39,4 @@ for f in /etc/ssh/ssh_host_*key.pub; do [ "${key_blist#*,$ktype,}" = "${key_blist}" ] || continue cat $f done -echo -----END SSH HOST KEY KEYS----- +echo "-----END SSH HOST KEY KEYS-----" -- cgit v1.2.3 From 974e76eab2e43718802c8ef845e6696637e46930 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 1 Dec 2012 21:46:27 -0500 Subject: make sure no blank lines before cloud-init entry in ca-certificates.conf when /etc/ca-certificates.conf is read by update-ca-certificates lines after a blank line get ignored. Here, ensure that there are no blank lines, and no duplicate entries for cloud-init are added. LP: #1077020 --- ChangeLog | 2 + cloudinit/config/cc_ca_certs.py | 9 +++- .../test_handler/test_handler_ca_certs.py | 50 +++++++++++++++++++--- 3 files changed, 55 insertions(+), 6 deletions(-) diff --git a/ChangeLog b/ChangeLog index bd52f182..13afb2c2 100644 --- a/ChangeLog +++ b/ChangeLog @@ -2,6 +2,8 @@ - add a debian watch file - add 'sudo' entry to ubuntu's default user (LP: #1080717) - fix resizefs module when 'noblock' was provided (LP: #1080985) + - make sure there is no blank line before cloud-init entry in + there are no blank lines in /etc/ca-certificates.conf (LP: #1077020) 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index 20f24357..4f2a46a1 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -45,8 +45,15 @@ def add_ca_certs(certs): # First ensure they are strings... cert_file_contents = "\n".join([str(c) for c in certs]) util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0644) + # Append cert filename to CA_CERT_CONFIG file. - util.write_file(CA_CERT_CONFIG, "\n%s" % CA_CERT_FILENAME, omode="ab") + # We have to strip the content because blank lines in the file + # causes subsequent entries to be ignored. (LP: #1077020) + orig = util.load_file(CA_CERT_CONFIG) + cur_cont = '\n'.join([l for l in orig.splitlines() + if l != CA_CERT_FILENAME]) + out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME) + util.write_file(CA_CERT_CONFIG, out, omode="wb") def remove_default_ca_certs(): diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py index d73c9fa9..0558023a 100644 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ b/tests/unittests/test_handler/test_handler_ca_certs.py @@ -138,15 +138,47 @@ class TestAddCaCerts(MockerTestCase): self.mocker.replay() cc_ca_certs.add_ca_certs([]) - def test_single_cert(self): - """Test adding a single certificate to the trusted CAs.""" + def test_single_cert_trailing_cr(self): + """Test adding a single certificate to the trusted CAs + when existing ca-certificates has trailing newline""" cert = "CERT1\nLINE2\nLINE3" + ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n" + expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n" + + mock_write = self.mocker.replace(util.write_file, passthrough=False) + mock_load = self.mocker.replace(util.load_file, passthrough=False) + + mock_write("/usr/share/ca-certificates/cloud-init-ca-certs.crt", + cert, mode=0644) + + mock_load("/etc/ca-certificates.conf") + self.mocker.result(ca_certs_content) + + mock_write("/etc/ca-certificates.conf", expected, omode="wb") + self.mocker.replay() + + cc_ca_certs.add_ca_certs([cert]) + + def test_single_cert_no_trailing_cr(self): + """Test adding a single certificate to the trusted CAs + when existing ca-certificates has no trailing newline""" + cert = "CERT1\nLINE2\nLINE3" + + ca_certs_content = "line1\nline2\nline3" + mock_write = self.mocker.replace(util.write_file, passthrough=False) + mock_load = self.mocker.replace(util.load_file, passthrough=False) + mock_write("/usr/share/ca-certificates/cloud-init-ca-certs.crt", cert, mode=0644) + + mock_load("/etc/ca-certificates.conf") + self.mocker.result(ca_certs_content) + mock_write("/etc/ca-certificates.conf", - "\ncloud-init-ca-certs.crt", omode="ab") + "%s\n%s\n" % (ca_certs_content, "cloud-init-ca-certs.crt"), + omode="wb") self.mocker.replay() cc_ca_certs.add_ca_certs([cert]) @@ -157,10 +189,18 @@ class TestAddCaCerts(MockerTestCase): expected_cert_file = "\n".join(certs) mock_write = self.mocker.replace(util.write_file, passthrough=False) + mock_load = self.mocker.replace(util.load_file, passthrough=False) + mock_write("/usr/share/ca-certificates/cloud-init-ca-certs.crt", expected_cert_file, mode=0644) - mock_write("/etc/ca-certificates.conf", - "\ncloud-init-ca-certs.crt", omode="ab") + + ca_certs_content = "line1\nline2\nline3" + mock_load("/etc/ca-certificates.conf") + self.mocker.result(ca_certs_content) + + out = "%s\n%s\n" % (ca_certs_content, "cloud-init-ca-certs.crt") + mock_write("/etc/ca-certificates.conf", out, omode="wb") + self.mocker.replay() cc_ca_certs.add_ca_certs(certs) -- cgit v1.2.3 From 1e7b96743314f566814848ad05c5bc7271a5de91 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 3 Dec 2012 20:56:36 -0500 Subject: ChangeLog: mention fix of lp:1079002 --- ChangeLog | 1 + 1 file changed, 1 insertion(+) diff --git a/ChangeLog b/ChangeLog index 13afb2c2..c02334a9 100644 --- a/ChangeLog +++ b/ChangeLog @@ -4,6 +4,7 @@ - fix resizefs module when 'noblock' was provided (LP: #1080985) - make sure there is no blank line before cloud-init entry in there are no blank lines in /etc/ca-certificates.conf (LP: #1077020) + - fix sudoers writing when entry is a string (LP: #1079002) 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) -- cgit v1.2.3 From 75d991b2e807d8bf26a2b94791870b86c43a1c96 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 4 Dec 2012 10:04:14 -0500 Subject: replace if..else based on presense of /etc/redhat-release with use of -s instead of using '--stderr' on non-rhel based on the presense of /etc/redhat-release, just use the short form '-s' everywhere. --- tools/write-ssh-key-fingerprints | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/tools/write-ssh-key-fingerprints b/tools/write-ssh-key-fingerprints index d69c7fcc..6c3451fd 100755 --- a/tools/write-ssh-key-fingerprints +++ b/tools/write-ssh-key-fingerprints @@ -2,13 +2,9 @@ logger_opts="-p user.info -t ec2" -if [ -f "/etc/redhat-release" ] -then - # Seems like rhel only supports the short version - logger_opts="$logger_opts -s" -else - logger_opts="$logger_opts --stderr" -fi +# rhels' version of logger_opts does not support long +# for of -s (--stderr), so use short form. +logger_opts="$logger_opts -s" # Redirect stderr to stdout exec 2>&1 -- cgit v1.2.3 From 5c5041367a8630543d84a9edf9dd4321f0c69718 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 6 Dec 2012 09:36:57 -0500 Subject: cloudinit/stages.py: separate _read_base_cfg() into static function The Init._read_base_cfg() was really a static function, this just moves it to its own static function. Its not used anywhere else at the moment. --- cloudinit/stages.py | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 94a267df..d9391f39 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -157,27 +157,12 @@ class Init(object): self._cfg = self._read_cfg(extra_fns) # LOG.debug("Loaded 'init' config %s", self._cfg) - def _read_base_cfg(self): - base_cfgs = [] - default_cfg = util.get_builtin_cfg() - kern_contents = util.read_cc_from_cmdline() - # Kernel/cmdline parameters override system config - if kern_contents: - base_cfgs.append(util.load_yaml(kern_contents, default={})) - # Anything in your conf.d location?? - # or the 'default' cloud.cfg location??? - base_cfgs.append(util.read_conf_with_confd(CLOUD_CONFIG)) - # And finally the default gets to play - if default_cfg: - base_cfgs.append(default_cfg) - return util.mergemanydict(base_cfgs) - def _read_cfg(self, extra_fns): no_cfg_paths = helpers.Paths({}, self.datasource) merger = helpers.ConfigMerger(paths=no_cfg_paths, datasource=self.datasource, additional_fns=extra_fns, - base_cfg=self._read_base_cfg()) + base_cfg=fetch_base_config()) return merger.cfg def _restore_from_cache(self): @@ -575,3 +560,23 @@ class Modules(object): raw_mods = self._read_modules(section_name) mostly_mods = self._fixup_modules(raw_mods) return self._run_modules(mostly_mods) + + +def fetch_base_config(): + base_cfgs = [] + default_cfg = util.get_builtin_cfg() + kern_contents = util.read_cc_from_cmdline() + + # Kernel/cmdline parameters override system config + if kern_contents: + base_cfgs.append(util.load_yaml(kern_contents, default={})) + + # Anything in your conf.d location?? + # or the 'default' cloud.cfg location??? + base_cfgs.append(util.read_conf_with_confd(CLOUD_CONFIG)) + + # And finally the default gets to play + if default_cfg: + base_cfgs.append(default_cfg) + + return util.mergemanydict(base_cfgs) -- cgit v1.2.3 From cbd1ca764ed265460c3a79729a27ca8e3841390c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 12 Dec 2012 10:39:43 -0500 Subject: add 'omnibus' as an install type for chef. Thanks to Anatoliy Dobrosynets --- ChangeLog | 1 + cloudinit/config/cc_chef.py | 15 ++++++++++++++- doc/examples/cloud-config-chef.txt | 9 ++++++++- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index fbfd3385..af1e024d 100644 --- a/ChangeLog +++ b/ChangeLog @@ -7,6 +7,7 @@ - fix sudoers writing when entry is a string (LP: #1079002) - tools/write-ssh-key-fingerprints: use '-s' rather than '--stderr' option (LP: #1083715) + - support omnibus installer for chef [Anatoliy Dobrosynets] 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 7a3d6a31..607f789e 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -22,6 +22,7 @@ import json import os from cloudinit import templater +from cloudinit import url_helper from cloudinit import util RUBY_VERSION_DEFAULT = "1.8" @@ -35,6 +36,8 @@ CHEF_DIRS = [ '/var/run/chef', ] +OMNIBUS_URL = "https://www.opscode.com/chef/install.sh" + def handle(name, cfg, cloud, log, _args): @@ -83,7 +86,9 @@ def handle(name, cfg, cloud, log, _args): util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json)) # If chef is not installed, we install chef based on 'install_type' - if not os.path.isfile('/usr/bin/chef-client'): + if (not os.path.isfile('/usr/bin/chef-client') or + util.get_cfg_option_bool(chef_cfg, 'force_install', default=False)): + install_type = util.get_cfg_option_str(chef_cfg, 'install_type', 'packages') if install_type == "gems": @@ -99,6 +104,14 @@ def handle(name, cfg, cloud, log, _args): elif install_type == 'packages': # this will install and run the chef-client from packages cloud.distro.install_packages(('chef',)) + elif install_type == 'omnibus': + url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL) + content = url_helper.readurl(url=url, retries=5) + with util.tempdir() as tmpd: + # use tmpd over tmpfile to avoid 'Text file busy' on execute + tmpf = "%s/chef-omnibus-install" % tmpd + util.write_file(tmpf, content, mode=0700) + util.subp([tmpf], capture=False) else: log.warn("Unknown chef install type %s", install_type) diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt index f87472ec..4edad653 100644 --- a/doc/examples/cloud-config-chef.txt +++ b/doc/examples/cloud-config-chef.txt @@ -47,9 +47,13 @@ apt_sources: chef: - # Valid values are 'gems' and 'packages' + # Valid values are 'gems' and 'packages' and 'omnibus' install_type: "packages" + # Boolean: run 'install_type' code even if chef-client + # appears already installed. + force_install: false + # Chef settings server_url: "https://chef.yourorg.com:4000" @@ -80,6 +84,9 @@ chef: maxclients: 100 keepalive: "off" + # if install_type is 'omnibus', change the url to download + omnibus_url: "https://www.opscode.com/chef/install.sh" + # Capture all subprocess output into a logfile # Useful for troubleshooting cloud-init issues -- cgit v1.2.3 From 86301383a1f7ba99435c34a0157076aa7505599c Mon Sep 17 00:00:00 2001 From: Craig Tracey Date: Thu, 13 Dec 2012 21:06:32 -0500 Subject: Provide a mechanism for puppet to be conditionally installed. --- cloudinit/config/cc_puppet.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 8fe3af57..e9a0a0f4 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -57,8 +57,10 @@ def handle(name, cfg, cloud, log, _args): puppet_cfg = cfg['puppet'] - # Start by installing the puppet package ... - cloud.distro.install_packages(["puppet"]) + # Start by installing the puppet package if necessary... + install = util.get_cfg_option_bool(puppet_cfg, 'install', True) + if install: + cloud.distro.install_packages(["puppet"]) # ... and then update the puppet configuration if 'conf' in puppet_cfg: -- cgit v1.2.3 From c196afccda0293613e3586922347749c33dfddbf Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 17 Dec 2012 08:41:11 -0500 Subject: ensure a datasource's 'distro' and sys_cfg are updated After parsing and merging datasource's config, the changes in were not making it into the datasource's 'distro. The end result was that the when a config module was called, it's 'cloud' argument would be updated in 'cloud.distro', but not in 'cloud.datasource.distro'. This path was required for getting mirror settings to take affect, because they include information from the datasource. Ie: cc_apt_configure had mirror_info = cloud.datasource.get_package_mirror_info() the datasource then used *its* copy of sys_cfg to call self.distro.get_package_mirror_info and *that* distro's sys_cfg had not been updated. LP: #1090482 --- ChangeLog | 2 ++ cloudinit/stages.py | 20 +++++++++++++------- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/ChangeLog b/ChangeLog index e4bcdd76..64a6e618 100644 --- a/ChangeLog +++ b/ChangeLog @@ -9,6 +9,8 @@ option (LP: #1083715) - make install of puppet configurable (LP: #1090205) [Craig Tracey] - support omnibus installer for chef [Anatoliy Dobrosynets] + - fix bug where cloud-config in user-data could not modify system_info + settings (LP: #1090482) 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index d9391f39..8d3213b4 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -63,23 +63,29 @@ class Init(object): # Changed only when a fetch occurs self.datasource = NULL_DATA_SOURCE - def _reset(self, ds=False): + def _reset(self, reset_ds=False): # Recreated on access self._cfg = None self._paths = None self._distro = None - if ds: + if reset_ds: self.datasource = NULL_DATA_SOURCE @property def distro(self): if not self._distro: # Try to find the right class to use - scfg = self._extract_cfg('system') - name = scfg.pop('distro', 'ubuntu') - cls = distros.fetch(name) - LOG.debug("Using distro class %s", cls) - self._distro = cls(name, scfg, self.paths) + system_config = self._extract_cfg('system') + distro_name = system_config.pop('distro', 'ubuntu') + distro_cls = distros.fetch(distro_name) + LOG.debug("Using distro class %s", distro_cls) + self._distro = distro_cls(distro_name, system_config, self.paths) + # If we have an active datasource we need to adjust + # said datasource and move its distro/system config + # from whatever it was to a new set... + if self.datasource is not NULL_DATA_SOURCE: + self.datasource.distro = self._distro + self.datasource.sys_cfg = system_config return self._distro @property -- cgit v1.2.3 From 5021c3fa71a6d239a8a67303cd564d383a9c6e1d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 17 Dec 2012 12:26:10 -0500 Subject: tell upstart to reload configuration after writing an upstart job Invoking 'initctl reload-configuration' is only required if inotify does not work. overlayroot does not support inotify. So, we just call initctl always, which wont hurt anything. LP: #1080841 --- cloudinit/handlers/upstart_job.py | 4 ++++ tests/unittests/test_builtin_handlers.py | 9 +++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 99e0afde..4e204da0 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -64,3 +64,7 @@ class UpstartJobPartHandler(handlers.Handler): payload = util.dos2unix(payload) path = os.path.join(self.upstart_dir, filename) util.write_file(path, payload, 0644) + + # if inotify support is not present in the root filesystem + # (overlayroot) then we need to tell upstart to re-read /etc + util.subp(["initctl", "reload-configuration"], capture=False) diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index ebc0bd51..5f41cb3d 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -6,6 +6,7 @@ from mocker import MockerTestCase from cloudinit import handlers from cloudinit import helpers +from cloudinit import util from cloudinit.handlers import upstart_job @@ -34,6 +35,7 @@ class TestBuiltins(MockerTestCase): self.assertEquals(0, len(os.listdir(up_root))) def test_upstart_frequency_single(self): + # files should be written out when frequency is ! per-instance c_root = self.makeDir() up_root = self.makeDir() paths = helpers.Paths({ @@ -41,9 +43,12 @@ class TestBuiltins(MockerTestCase): 'upstart_dir': up_root, }) freq = PER_INSTANCE + + mock_subp = self.mocker.replace(util.subp, passthrough=False) + mock_subp(["initctl", "reload-configuration"], capture=False) + self.mocker.replay() + h = upstart_job.UpstartJobPartHandler(paths) - # No files should be written out when - # the frequency is ! per-instance h.handle_part('', handlers.CONTENT_START, None, None, None) h.handle_part('blah', 'text/upstart-job', -- cgit v1.2.3 From 6f96b922886c8d607a6be69ae8bc0ce1caf75d04 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Dec 2012 09:21:01 -0500 Subject: cloudinit/handlers/upstart_job.py: pep8 / trailing whitespace --- cloudinit/handlers/upstart_job.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 4e204da0..4684f7f2 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -66,5 +66,5 @@ class UpstartJobPartHandler(handlers.Handler): util.write_file(path, payload, 0644) # if inotify support is not present in the root filesystem - # (overlayroot) then we need to tell upstart to re-read /etc + # (overlayroot) then we need to tell upstart to re-read /etc util.subp(["initctl", "reload-configuration"], capture=False) -- cgit v1.2.3 From 8abbeae7ce15a6fb7a08adc697205d614f868a98 Mon Sep 17 00:00:00 2001 From: Gerard Dethier Date: Wed, 19 Dec 2012 09:27:33 -0500 Subject: DataSourceCloudStack: use virtual router rather than default route In CloudStack's documentation, it is stated that meta/user-data can be retrieved from CloudStack's Virtual Router [1]. However, cloud-init retrieves these information from default gateway. VR and default gateway may be the same machine (i.e. have the same address) in some cases, but that is not be always true (actually, in my case, it is not). This change searches the lease files in /var/lib/dhclient to pick out the dhcp-server-identifier. It admittedly does make this specific to dhclient. -- [1] http://incubator.apache.org/cloudstack/docs/en-US/Apache_CloudStack/4.0.0-incubating/html/Admin_Guide/user-data-and-meta-data.html). LP: #1089989 --- cloudinit/sources/DataSourceCloudStack.py | 51 ++++++++++++++++++------------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 076dba5a..82e1e130 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -3,10 +3,12 @@ # Copyright (C) 2012 Canonical Ltd. # Copyright (C) 2012 Cosmin Luta # Copyright (C) 2012 Yahoo! Inc. +# Copyright (C) 2012 Gerard Dethier # # Author: Cosmin Luta # Author: Scott Moser # Author: Joshua Harlow +# Author: Gerard Dethier # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -20,9 +22,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from socket import inet_ntoa -from struct import pack - import os import time @@ -40,24 +39,12 @@ class DataSourceCloudStack(sources.DataSource): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'cs') # Cloudstack has its metadata/userdata URLs located at - # http:///latest/ + # http:///latest/ self.api_ver = 'latest' - gw_addr = self.get_default_gateway() - if not gw_addr: - raise RuntimeError("No default gateway found!") - self.metadata_address = "http://%s/" % (gw_addr) - - def get_default_gateway(self): - """Returns the default gateway ip address in the dotted format.""" - lines = util.load_file("/proc/net/route").splitlines() - for line in lines: - items = line.split("\t") - if items[1] == "00000000": - # Found the default route, get the gateway - gw = inet_ntoa(pack(" 2: + dhcp = words[2] + LOG.debug("Found DHCP identifier %s", dhcp) + addresses.add(dhcp) + if len(addresses) != 1: + # No unique virtual router found + return None + return addresses.pop() + + # Used to match classes to dependencies datasources = [ (DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), -- cgit v1.2.3 From 3569e71a1579b97f4e33fb46ab3fcef08a4ddad4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Dec 2012 10:09:39 -0500 Subject: add ChangeLog entry for previous commit --- ChangeLog | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ChangeLog b/ChangeLog index 64a6e618..31a19996 100644 --- a/ChangeLog +++ b/ChangeLog @@ -11,6 +11,8 @@ - support omnibus installer for chef [Anatoliy Dobrosynets] - fix bug where cloud-config in user-data could not modify system_info settings (LP: #1090482) + - fix CloudStack DataSource to use Virtual Router as found in + /var/lib/dhcpclient rather than default gateway (LP: #1089989) 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) -- cgit v1.2.3