From 52a1884822ecb9474e12e6c16b62dbd0728a4a0e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Wed, 28 Nov 2012 10:41:42 -0800 Subject: Check for running inside RHEL and adjust the logging options. It seems like at least RHEL does not have the "--stderr" option but instead only supports the short version "-s" so add a check that will switch from the long version to the short version when RHEL is detected. LP: #1083715 --- tools/write-ssh-key-fingerprints | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/tools/write-ssh-key-fingerprints b/tools/write-ssh-key-fingerprints index aa1f3c38..d69c7fcc 100755 --- a/tools/write-ssh-key-fingerprints +++ b/tools/write-ssh-key-fingerprints @@ -1,5 +1,18 @@ #!/bin/sh + +logger_opts="-p user.info -t ec2" + +if [ -f "/etc/redhat-release" ] +then + # Seems like rhel only supports the short version + logger_opts="$logger_opts -s" +else + logger_opts="$logger_opts --stderr" +fi + +# Redirect stderr to stdout exec 2>&1 + fp_blist=",${1}," key_blist=",${2}," { @@ -16,9 +29,9 @@ done echo "-----END SSH HOST KEY FINGERPRINTS-----" echo "#############################################################" -} | logger -p user.info --stderr -t "ec2" +} | logger $logger_opts -echo -----BEGIN SSH HOST KEY KEYS----- +echo "-----BEGIN SSH HOST KEY KEYS-----" for f in /etc/ssh/ssh_host_*key.pub; do [ -f "$f" ] || continue read ktype line < "$f" @@ -26,4 +39,4 @@ for f in /etc/ssh/ssh_host_*key.pub; do [ "${key_blist#*,$ktype,}" = "${key_blist}" ] || continue cat $f done -echo -----END SSH HOST KEY KEYS----- +echo "-----END SSH HOST KEY KEYS-----" -- cgit v1.2.3 From 974e76eab2e43718802c8ef845e6696637e46930 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 1 Dec 2012 21:46:27 -0500 Subject: make sure no blank lines before cloud-init entry in ca-certificates.conf when /etc/ca-certificates.conf is read by update-ca-certificates lines after a blank line get ignored. Here, ensure that there are no blank lines, and no duplicate entries for cloud-init are added. LP: #1077020 --- ChangeLog | 2 + cloudinit/config/cc_ca_certs.py | 9 +++- .../test_handler/test_handler_ca_certs.py | 50 +++++++++++++++++++--- 3 files changed, 55 insertions(+), 6 deletions(-) diff --git a/ChangeLog b/ChangeLog index bd52f182..13afb2c2 100644 --- a/ChangeLog +++ b/ChangeLog @@ -2,6 +2,8 @@ - add a debian watch file - add 'sudo' entry to ubuntu's default user (LP: #1080717) - fix resizefs module when 'noblock' was provided (LP: #1080985) + - make sure there is no blank line before cloud-init entry in + there are no blank lines in /etc/ca-certificates.conf (LP: #1077020) 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index 20f24357..4f2a46a1 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -45,8 +45,15 @@ def add_ca_certs(certs): # First ensure they are strings... cert_file_contents = "\n".join([str(c) for c in certs]) util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0644) + # Append cert filename to CA_CERT_CONFIG file. - util.write_file(CA_CERT_CONFIG, "\n%s" % CA_CERT_FILENAME, omode="ab") + # We have to strip the content because blank lines in the file + # causes subsequent entries to be ignored. (LP: #1077020) + orig = util.load_file(CA_CERT_CONFIG) + cur_cont = '\n'.join([l for l in orig.splitlines() + if l != CA_CERT_FILENAME]) + out = "%s\n%s\n" % (cur_cont.rstrip(), CA_CERT_FILENAME) + util.write_file(CA_CERT_CONFIG, out, omode="wb") def remove_default_ca_certs(): diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py index d73c9fa9..0558023a 100644 --- a/tests/unittests/test_handler/test_handler_ca_certs.py +++ b/tests/unittests/test_handler/test_handler_ca_certs.py @@ -138,15 +138,47 @@ class TestAddCaCerts(MockerTestCase): self.mocker.replay() cc_ca_certs.add_ca_certs([]) - def test_single_cert(self): - """Test adding a single certificate to the trusted CAs.""" + def test_single_cert_trailing_cr(self): + """Test adding a single certificate to the trusted CAs + when existing ca-certificates has trailing newline""" cert = "CERT1\nLINE2\nLINE3" + ca_certs_content = "line1\nline2\ncloud-init-ca-certs.crt\nline3\n" + expected = "line1\nline2\nline3\ncloud-init-ca-certs.crt\n" + + mock_write = self.mocker.replace(util.write_file, passthrough=False) + mock_load = self.mocker.replace(util.load_file, passthrough=False) + + mock_write("/usr/share/ca-certificates/cloud-init-ca-certs.crt", + cert, mode=0644) + + mock_load("/etc/ca-certificates.conf") + self.mocker.result(ca_certs_content) + + mock_write("/etc/ca-certificates.conf", expected, omode="wb") + self.mocker.replay() + + cc_ca_certs.add_ca_certs([cert]) + + def test_single_cert_no_trailing_cr(self): + """Test adding a single certificate to the trusted CAs + when existing ca-certificates has no trailing newline""" + cert = "CERT1\nLINE2\nLINE3" + + ca_certs_content = "line1\nline2\nline3" + mock_write = self.mocker.replace(util.write_file, passthrough=False) + mock_load = self.mocker.replace(util.load_file, passthrough=False) + mock_write("/usr/share/ca-certificates/cloud-init-ca-certs.crt", cert, mode=0644) + + mock_load("/etc/ca-certificates.conf") + self.mocker.result(ca_certs_content) + mock_write("/etc/ca-certificates.conf", - "\ncloud-init-ca-certs.crt", omode="ab") + "%s\n%s\n" % (ca_certs_content, "cloud-init-ca-certs.crt"), + omode="wb") self.mocker.replay() cc_ca_certs.add_ca_certs([cert]) @@ -157,10 +189,18 @@ class TestAddCaCerts(MockerTestCase): expected_cert_file = "\n".join(certs) mock_write = self.mocker.replace(util.write_file, passthrough=False) + mock_load = self.mocker.replace(util.load_file, passthrough=False) + mock_write("/usr/share/ca-certificates/cloud-init-ca-certs.crt", expected_cert_file, mode=0644) - mock_write("/etc/ca-certificates.conf", - "\ncloud-init-ca-certs.crt", omode="ab") + + ca_certs_content = "line1\nline2\nline3" + mock_load("/etc/ca-certificates.conf") + self.mocker.result(ca_certs_content) + + out = "%s\n%s\n" % (ca_certs_content, "cloud-init-ca-certs.crt") + mock_write("/etc/ca-certificates.conf", out, omode="wb") + self.mocker.replay() cc_ca_certs.add_ca_certs(certs) -- cgit v1.2.3 From 1e7b96743314f566814848ad05c5bc7271a5de91 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 3 Dec 2012 20:56:36 -0500 Subject: ChangeLog: mention fix of lp:1079002 --- ChangeLog | 1 + 1 file changed, 1 insertion(+) diff --git a/ChangeLog b/ChangeLog index 13afb2c2..c02334a9 100644 --- a/ChangeLog +++ b/ChangeLog @@ -4,6 +4,7 @@ - fix resizefs module when 'noblock' was provided (LP: #1080985) - make sure there is no blank line before cloud-init entry in there are no blank lines in /etc/ca-certificates.conf (LP: #1077020) + - fix sudoers writing when entry is a string (LP: #1079002) 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) -- cgit v1.2.3 From 75d991b2e807d8bf26a2b94791870b86c43a1c96 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 4 Dec 2012 10:04:14 -0500 Subject: replace if..else based on presense of /etc/redhat-release with use of -s instead of using '--stderr' on non-rhel based on the presense of /etc/redhat-release, just use the short form '-s' everywhere. --- tools/write-ssh-key-fingerprints | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/tools/write-ssh-key-fingerprints b/tools/write-ssh-key-fingerprints index d69c7fcc..6c3451fd 100755 --- a/tools/write-ssh-key-fingerprints +++ b/tools/write-ssh-key-fingerprints @@ -2,13 +2,9 @@ logger_opts="-p user.info -t ec2" -if [ -f "/etc/redhat-release" ] -then - # Seems like rhel only supports the short version - logger_opts="$logger_opts -s" -else - logger_opts="$logger_opts --stderr" -fi +# rhels' version of logger_opts does not support long +# for of -s (--stderr), so use short form. +logger_opts="$logger_opts -s" # Redirect stderr to stdout exec 2>&1 -- cgit v1.2.3 From 5c5041367a8630543d84a9edf9dd4321f0c69718 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 6 Dec 2012 09:36:57 -0500 Subject: cloudinit/stages.py: separate _read_base_cfg() into static function The Init._read_base_cfg() was really a static function, this just moves it to its own static function. Its not used anywhere else at the moment. --- cloudinit/stages.py | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 94a267df..d9391f39 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -157,27 +157,12 @@ class Init(object): self._cfg = self._read_cfg(extra_fns) # LOG.debug("Loaded 'init' config %s", self._cfg) - def _read_base_cfg(self): - base_cfgs = [] - default_cfg = util.get_builtin_cfg() - kern_contents = util.read_cc_from_cmdline() - # Kernel/cmdline parameters override system config - if kern_contents: - base_cfgs.append(util.load_yaml(kern_contents, default={})) - # Anything in your conf.d location?? - # or the 'default' cloud.cfg location??? - base_cfgs.append(util.read_conf_with_confd(CLOUD_CONFIG)) - # And finally the default gets to play - if default_cfg: - base_cfgs.append(default_cfg) - return util.mergemanydict(base_cfgs) - def _read_cfg(self, extra_fns): no_cfg_paths = helpers.Paths({}, self.datasource) merger = helpers.ConfigMerger(paths=no_cfg_paths, datasource=self.datasource, additional_fns=extra_fns, - base_cfg=self._read_base_cfg()) + base_cfg=fetch_base_config()) return merger.cfg def _restore_from_cache(self): @@ -575,3 +560,23 @@ class Modules(object): raw_mods = self._read_modules(section_name) mostly_mods = self._fixup_modules(raw_mods) return self._run_modules(mostly_mods) + + +def fetch_base_config(): + base_cfgs = [] + default_cfg = util.get_builtin_cfg() + kern_contents = util.read_cc_from_cmdline() + + # Kernel/cmdline parameters override system config + if kern_contents: + base_cfgs.append(util.load_yaml(kern_contents, default={})) + + # Anything in your conf.d location?? + # or the 'default' cloud.cfg location??? + base_cfgs.append(util.read_conf_with_confd(CLOUD_CONFIG)) + + # And finally the default gets to play + if default_cfg: + base_cfgs.append(default_cfg) + + return util.mergemanydict(base_cfgs) -- cgit v1.2.3 From cbd1ca764ed265460c3a79729a27ca8e3841390c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 12 Dec 2012 10:39:43 -0500 Subject: add 'omnibus' as an install type for chef. Thanks to Anatoliy Dobrosynets --- ChangeLog | 1 + cloudinit/config/cc_chef.py | 15 ++++++++++++++- doc/examples/cloud-config-chef.txt | 9 ++++++++- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index fbfd3385..af1e024d 100644 --- a/ChangeLog +++ b/ChangeLog @@ -7,6 +7,7 @@ - fix sudoers writing when entry is a string (LP: #1079002) - tools/write-ssh-key-fingerprints: use '-s' rather than '--stderr' option (LP: #1083715) + - support omnibus installer for chef [Anatoliy Dobrosynets] 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 7a3d6a31..607f789e 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -22,6 +22,7 @@ import json import os from cloudinit import templater +from cloudinit import url_helper from cloudinit import util RUBY_VERSION_DEFAULT = "1.8" @@ -35,6 +36,8 @@ CHEF_DIRS = [ '/var/run/chef', ] +OMNIBUS_URL = "https://www.opscode.com/chef/install.sh" + def handle(name, cfg, cloud, log, _args): @@ -83,7 +86,9 @@ def handle(name, cfg, cloud, log, _args): util.write_file('/etc/chef/firstboot.json', json.dumps(initial_json)) # If chef is not installed, we install chef based on 'install_type' - if not os.path.isfile('/usr/bin/chef-client'): + if (not os.path.isfile('/usr/bin/chef-client') or + util.get_cfg_option_bool(chef_cfg, 'force_install', default=False)): + install_type = util.get_cfg_option_str(chef_cfg, 'install_type', 'packages') if install_type == "gems": @@ -99,6 +104,14 @@ def handle(name, cfg, cloud, log, _args): elif install_type == 'packages': # this will install and run the chef-client from packages cloud.distro.install_packages(('chef',)) + elif install_type == 'omnibus': + url = util.get_cfg_option_str(chef_cfg, "omnibus_url", OMNIBUS_URL) + content = url_helper.readurl(url=url, retries=5) + with util.tempdir() as tmpd: + # use tmpd over tmpfile to avoid 'Text file busy' on execute + tmpf = "%s/chef-omnibus-install" % tmpd + util.write_file(tmpf, content, mode=0700) + util.subp([tmpf], capture=False) else: log.warn("Unknown chef install type %s", install_type) diff --git a/doc/examples/cloud-config-chef.txt b/doc/examples/cloud-config-chef.txt index f87472ec..4edad653 100644 --- a/doc/examples/cloud-config-chef.txt +++ b/doc/examples/cloud-config-chef.txt @@ -47,9 +47,13 @@ apt_sources: chef: - # Valid values are 'gems' and 'packages' + # Valid values are 'gems' and 'packages' and 'omnibus' install_type: "packages" + # Boolean: run 'install_type' code even if chef-client + # appears already installed. + force_install: false + # Chef settings server_url: "https://chef.yourorg.com:4000" @@ -80,6 +84,9 @@ chef: maxclients: 100 keepalive: "off" + # if install_type is 'omnibus', change the url to download + omnibus_url: "https://www.opscode.com/chef/install.sh" + # Capture all subprocess output into a logfile # Useful for troubleshooting cloud-init issues -- cgit v1.2.3 From 86301383a1f7ba99435c34a0157076aa7505599c Mon Sep 17 00:00:00 2001 From: Craig Tracey Date: Thu, 13 Dec 2012 21:06:32 -0500 Subject: Provide a mechanism for puppet to be conditionally installed. --- cloudinit/config/cc_puppet.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 8fe3af57..e9a0a0f4 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -57,8 +57,10 @@ def handle(name, cfg, cloud, log, _args): puppet_cfg = cfg['puppet'] - # Start by installing the puppet package ... - cloud.distro.install_packages(["puppet"]) + # Start by installing the puppet package if necessary... + install = util.get_cfg_option_bool(puppet_cfg, 'install', True) + if install: + cloud.distro.install_packages(["puppet"]) # ... and then update the puppet configuration if 'conf' in puppet_cfg: -- cgit v1.2.3 From c196afccda0293613e3586922347749c33dfddbf Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 17 Dec 2012 08:41:11 -0500 Subject: ensure a datasource's 'distro' and sys_cfg are updated After parsing and merging datasource's config, the changes in were not making it into the datasource's 'distro. The end result was that the when a config module was called, it's 'cloud' argument would be updated in 'cloud.distro', but not in 'cloud.datasource.distro'. This path was required for getting mirror settings to take affect, because they include information from the datasource. Ie: cc_apt_configure had mirror_info = cloud.datasource.get_package_mirror_info() the datasource then used *its* copy of sys_cfg to call self.distro.get_package_mirror_info and *that* distro's sys_cfg had not been updated. LP: #1090482 --- ChangeLog | 2 ++ cloudinit/stages.py | 20 +++++++++++++------- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/ChangeLog b/ChangeLog index e4bcdd76..64a6e618 100644 --- a/ChangeLog +++ b/ChangeLog @@ -9,6 +9,8 @@ option (LP: #1083715) - make install of puppet configurable (LP: #1090205) [Craig Tracey] - support omnibus installer for chef [Anatoliy Dobrosynets] + - fix bug where cloud-config in user-data could not modify system_info + settings (LP: #1090482) 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index d9391f39..8d3213b4 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -63,23 +63,29 @@ class Init(object): # Changed only when a fetch occurs self.datasource = NULL_DATA_SOURCE - def _reset(self, ds=False): + def _reset(self, reset_ds=False): # Recreated on access self._cfg = None self._paths = None self._distro = None - if ds: + if reset_ds: self.datasource = NULL_DATA_SOURCE @property def distro(self): if not self._distro: # Try to find the right class to use - scfg = self._extract_cfg('system') - name = scfg.pop('distro', 'ubuntu') - cls = distros.fetch(name) - LOG.debug("Using distro class %s", cls) - self._distro = cls(name, scfg, self.paths) + system_config = self._extract_cfg('system') + distro_name = system_config.pop('distro', 'ubuntu') + distro_cls = distros.fetch(distro_name) + LOG.debug("Using distro class %s", distro_cls) + self._distro = distro_cls(distro_name, system_config, self.paths) + # If we have an active datasource we need to adjust + # said datasource and move its distro/system config + # from whatever it was to a new set... + if self.datasource is not NULL_DATA_SOURCE: + self.datasource.distro = self._distro + self.datasource.sys_cfg = system_config return self._distro @property -- cgit v1.2.3 From 5021c3fa71a6d239a8a67303cd564d383a9c6e1d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 17 Dec 2012 12:26:10 -0500 Subject: tell upstart to reload configuration after writing an upstart job Invoking 'initctl reload-configuration' is only required if inotify does not work. overlayroot does not support inotify. So, we just call initctl always, which wont hurt anything. LP: #1080841 --- cloudinit/handlers/upstart_job.py | 4 ++++ tests/unittests/test_builtin_handlers.py | 9 +++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 99e0afde..4e204da0 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -64,3 +64,7 @@ class UpstartJobPartHandler(handlers.Handler): payload = util.dos2unix(payload) path = os.path.join(self.upstart_dir, filename) util.write_file(path, payload, 0644) + + # if inotify support is not present in the root filesystem + # (overlayroot) then we need to tell upstart to re-read /etc + util.subp(["initctl", "reload-configuration"], capture=False) diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index ebc0bd51..5f41cb3d 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -6,6 +6,7 @@ from mocker import MockerTestCase from cloudinit import handlers from cloudinit import helpers +from cloudinit import util from cloudinit.handlers import upstart_job @@ -34,6 +35,7 @@ class TestBuiltins(MockerTestCase): self.assertEquals(0, len(os.listdir(up_root))) def test_upstart_frequency_single(self): + # files should be written out when frequency is ! per-instance c_root = self.makeDir() up_root = self.makeDir() paths = helpers.Paths({ @@ -41,9 +43,12 @@ class TestBuiltins(MockerTestCase): 'upstart_dir': up_root, }) freq = PER_INSTANCE + + mock_subp = self.mocker.replace(util.subp, passthrough=False) + mock_subp(["initctl", "reload-configuration"], capture=False) + self.mocker.replay() + h = upstart_job.UpstartJobPartHandler(paths) - # No files should be written out when - # the frequency is ! per-instance h.handle_part('', handlers.CONTENT_START, None, None, None) h.handle_part('blah', 'text/upstart-job', -- cgit v1.2.3 From 6f96b922886c8d607a6be69ae8bc0ce1caf75d04 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Dec 2012 09:21:01 -0500 Subject: cloudinit/handlers/upstart_job.py: pep8 / trailing whitespace --- cloudinit/handlers/upstart_job.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 4e204da0..4684f7f2 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -66,5 +66,5 @@ class UpstartJobPartHandler(handlers.Handler): util.write_file(path, payload, 0644) # if inotify support is not present in the root filesystem - # (overlayroot) then we need to tell upstart to re-read /etc + # (overlayroot) then we need to tell upstart to re-read /etc util.subp(["initctl", "reload-configuration"], capture=False) -- cgit v1.2.3 From 8abbeae7ce15a6fb7a08adc697205d614f868a98 Mon Sep 17 00:00:00 2001 From: Gerard Dethier Date: Wed, 19 Dec 2012 09:27:33 -0500 Subject: DataSourceCloudStack: use virtual router rather than default route In CloudStack's documentation, it is stated that meta/user-data can be retrieved from CloudStack's Virtual Router [1]. However, cloud-init retrieves these information from default gateway. VR and default gateway may be the same machine (i.e. have the same address) in some cases, but that is not be always true (actually, in my case, it is not). This change searches the lease files in /var/lib/dhclient to pick out the dhcp-server-identifier. It admittedly does make this specific to dhclient. -- [1] http://incubator.apache.org/cloudstack/docs/en-US/Apache_CloudStack/4.0.0-incubating/html/Admin_Guide/user-data-and-meta-data.html). LP: #1089989 --- cloudinit/sources/DataSourceCloudStack.py | 51 ++++++++++++++++++------------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 076dba5a..82e1e130 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -3,10 +3,12 @@ # Copyright (C) 2012 Canonical Ltd. # Copyright (C) 2012 Cosmin Luta # Copyright (C) 2012 Yahoo! Inc. +# Copyright (C) 2012 Gerard Dethier # # Author: Cosmin Luta # Author: Scott Moser # Author: Joshua Harlow +# Author: Gerard Dethier # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -20,9 +22,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from socket import inet_ntoa -from struct import pack - import os import time @@ -40,24 +39,12 @@ class DataSourceCloudStack(sources.DataSource): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'cs') # Cloudstack has its metadata/userdata URLs located at - # http:///latest/ + # http:///latest/ self.api_ver = 'latest' - gw_addr = self.get_default_gateway() - if not gw_addr: - raise RuntimeError("No default gateway found!") - self.metadata_address = "http://%s/" % (gw_addr) - - def get_default_gateway(self): - """Returns the default gateway ip address in the dotted format.""" - lines = util.load_file("/proc/net/route").splitlines() - for line in lines: - items = line.split("\t") - if items[1] == "00000000": - # Found the default route, get the gateway - gw = inet_ntoa(pack(" 2: + dhcp = words[2] + LOG.debug("Found DHCP identifier %s", dhcp) + addresses.add(dhcp) + if len(addresses) != 1: + # No unique virtual router found + return None + return addresses.pop() + + # Used to match classes to dependencies datasources = [ (DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), -- cgit v1.2.3 From 3569e71a1579b97f4e33fb46ab3fcef08a4ddad4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Dec 2012 10:09:39 -0500 Subject: add ChangeLog entry for previous commit --- ChangeLog | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ChangeLog b/ChangeLog index 64a6e618..31a19996 100644 --- a/ChangeLog +++ b/ChangeLog @@ -11,6 +11,8 @@ - support omnibus installer for chef [Anatoliy Dobrosynets] - fix bug where cloud-config in user-data could not modify system_info settings (LP: #1090482) + - fix CloudStack DataSource to use Virtual Router as found in + /var/lib/dhcpclient rather than default gateway (LP: #1089989) 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) -- cgit v1.2.3 From e53ab8f17d2aa8d6826581eee20202812b0620e9 Mon Sep 17 00:00:00 2001 From: Vlastimil Holer Date: Thu, 3 Jan 2013 13:07:07 +0100 Subject: Support for sr[0-9]+ short device names. --- cloudinit/config/cc_mounts.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index cb772c86..9010d97f 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -24,8 +24,8 @@ import re from cloudinit import util -# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1 -SHORTNAME_FILTER = r"^[x]{0,1}[shv]d[a-z][0-9]*$" +# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0 +SHORTNAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$" SHORTNAME = re.compile(SHORTNAME_FILTER) WS = re.compile("[%s]+" % (whitespace)) FSTAB_PATH = "/etc/fstab" -- cgit v1.2.3 From 15a33d190f2a9247accf8834b005521c615cb6b3 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 5 Jan 2013 10:04:58 -0800 Subject: Make which fields are redacted come from a field array. LP: #1096417 --- cloudinit/distros/__init__.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 6a684b89..8a3e0570 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -297,22 +297,26 @@ class Distro(object): "no_create_home": "-M", } + redact_fields = ['passwd'] + # Now check the value and create the command for option in kwargs: value = kwargs[option] if option in adduser_opts and value \ and isinstance(value, str): adduser_cmd.extend([adduser_opts[option], value]) - - # Redact the password field from the logs - if option != "password": - x_adduser_cmd.extend([adduser_opts[option], value]) - else: + # Redact certain fields from the logs + if option in redact_fields: x_adduser_cmd.extend([adduser_opts[option], 'REDACTED']) - + else: + x_adduser_cmd.extend([adduser_opts[option], value]) elif option in adduser_opts_flags and value: adduser_cmd.append(adduser_opts_flags[option]) - x_adduser_cmd.append(adduser_opts_flags[option]) + # Redact certain fields from the logs + if option in redact_fields: + x_adduser_cmd.append('REDACTED') + else: + x_adduser_cmd.append(adduser_opts_flags[option]) # Default to creating home directory unless otherwise directed # Also, we do not create home directories for system users. -- cgit v1.2.3 From 6cfd12c96608eb5fd086da49c4c685635e40e6e0 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Sat, 5 Jan 2013 10:18:01 -0800 Subject: Fix the password locking logic. Instead of only not locking when system is present the logic should handle the correct case when lock password is set and system is not present. LP: #1096423 --- cloudinit/distros/__init__.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 6a684b89..be32757d 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -335,9 +335,10 @@ class Distro(object): self.set_passwd(name, kwargs['plain_text_passwd']) # Default locking down the account. - if ('lock_passwd' not in kwargs and - ('lock_passwd' in kwargs and kwargs['lock_passwd']) or - 'system' not in kwargs): + # + # Which means if lock_passwd is False (on non-existent its true) + # then lock or if system is True (on non-existent its false) then lock. + if (kwargs.get('lock_passwd', True) or kwargs.get('system', False)): try: util.subp(['passwd', '--lock', name]) except Exception as e: -- cgit v1.2.3 From 4fde399a38765fa9641b3177b966ad6c8ec9750f Mon Sep 17 00:00:00 2001 From: Gerard Dethier Date: Mon, 7 Jan 2013 12:20:58 -0500 Subject: DataSourceCloudStack: fallback to default route if no virtual router found Changes in revision 753 broke cloud-init on ubuntu, as it has a different dhclient directory than Fedora where the change was developed and tested. This change does 2 things: * searches multiple directories (including /var/lib/dhcp) for the lease files. * adds a fallback to the old code path of choosing the default route as the virtual router if there were no virtual routers found in the lease files. LP: #1089989 --- ChangeLog | 6 ++- cloudinit/sources/DataSourceCloudStack.py | 82 ++++++++++++++++++++++++------- 2 files changed, 68 insertions(+), 20 deletions(-) diff --git a/ChangeLog b/ChangeLog index 9534be26..18e25725 100644 --- a/ChangeLog +++ b/ChangeLog @@ -11,8 +11,10 @@ - support omnibus installer for chef [Anatoliy Dobrosynets] - fix bug where cloud-config in user-data could not modify system_info settings (LP: #1090482) - - fix CloudStack DataSource to use Virtual Router as found in - /var/lib/dhcpclient rather than default gateway (LP: #1089989) + - fix CloudStack DataSource to use Virtual Router as described by + CloudStack documentation if it is available by searching through dhclient + lease files. If it is not available, then fall back to the default + gateway. (LP: #1089989) - fix redaction of password field in log (LP: #1096417) - fix to cloud-config user setup. Previously, lock_passwd was broken and all accounts would be locked unless 'system' was given (LP: #1096423). diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 82e1e130..275caf0d 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -30,6 +30,8 @@ from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper as uhelp from cloudinit import util +from socket import inet_ntoa +from struct import pack LOG = logging.getLogger(__name__) @@ -122,26 +124,70 @@ class DataSourceCloudStack(sources.DataSource): return self.metadata['availability-zone'] +def get_default_gateway(): + # Returns the default gateway ip address in the dotted format. + lines = util.load_file("/proc/net/route").splitlines() + for line in lines: + items = line.split("\t") + if items[1] == "00000000": + # Found the default route, get the gateway + gw = inet_ntoa(pack(" latest_mtime: + latest_mtime = mtime + latest_file = abs_path + return latest_file + + def get_vr_address(): - # get the address of the virtual router via dhcp responses + # Get the address of the virtual router via dhcp leases # see http://bit.ly/T76eKC for documentation on the virtual router. - dhclient_d = "/var/lib/dhclient" - addresses = set() - dhclient_files = os.listdir(dhclient_d) - for file_name in dhclient_files: - if file_name.endswith(".lease") or file_name.endswith(".leases"): - with open(os.path.join(dhclient_d, file_name), "r") as fd: - for line in fd: - if "dhcp-server-identifier" in line: - words = line.strip(" ;\r\n").split(" ") - if len(words) > 2: - dhcp = words[2] - LOG.debug("Found DHCP identifier %s", dhcp) - addresses.add(dhcp) - if len(addresses) != 1: - # No unique virtual router found - return None - return addresses.pop() + # If no virtual router is detected, fallback on default gateway. + lease_file = get_latest_lease() + if not lease_file: + LOG.debug("No lease file found, using default gateway") + return get_default_gateway() + + latest_address = None + with open(lease_file, "r") as fd: + for line in fd: + if "dhcp-server-identifier" in line: + words = line.strip(" ;\r\n").split(" ") + if len(words) > 2: + dhcp = words[2] + LOG.debug("Found DHCP identifier %s", dhcp) + latest_address = dhcp + if not latest_address: + # No virtual router found, fallback on default gateway + LOG.debug("No DHCP found, using default gateway") + return get_default_gateway() + return latest_address # Used to match classes to dependencies -- cgit v1.2.3 From 9800832d4fbfef2624baa0d3c1a0aa737bc0dfb2 Mon Sep 17 00:00:00 2001 From: harlowja Date: Thu, 10 Jan 2013 23:09:02 -0800 Subject: Add a context manager function in test helpers. This function can be used to ensure that mocker objects are restored and verified during usage if exceptions are thrown while the mock object is being used. Ensure it is used in the config drive test when multiple mock objects are being created and restored. LP: #1098430 --- tests/unittests/helpers.py | 14 ++++ .../unittests/test_datasource/test_configdrive.py | 80 +++++++++++----------- 2 files changed, 53 insertions(+), 41 deletions(-) diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 92540b0c..4258a29d 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -2,6 +2,9 @@ import os import sys import unittest +from contextlib import contextmanager + +from mocker import Mocker from mocker import MockerTestCase from cloudinit import helpers as ch @@ -31,6 +34,17 @@ else: pass +@contextmanager +def mocker(verify_calls=True): + m = Mocker() + try: + yield m + finally: + m.restore() + if verify_calls: + m.verify() + + # Makes the old path start # with new base instead of whatever # it previously had diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index aa5b98ed..6751a679 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -11,6 +11,7 @@ from cloudinit import settings from cloudinit.sources import DataSourceConfigDrive as ds from cloudinit import util +from tests.unittests import helpers as unit_helpers PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n' EC2_META = { @@ -89,23 +90,22 @@ class TestConfigDriveDataSource(MockerTestCase): 'swap': '/dev/vda3', } for name, dev_name in name_tests.items(): - my_mock = mocker.Mocker() - find_mock = my_mock.replace(util.find_devs_with, - spec=False, passthrough=False) - provided_name = dev_name[len('/dev/'):] - provided_name = "s" + provided_name[1:] - find_mock(mocker.ARGS) - my_mock.result([provided_name]) - exists_mock = my_mock.replace(os.path.exists, - spec=False, passthrough=False) - exists_mock(mocker.ARGS) - my_mock.result(False) - exists_mock(mocker.ARGS) - my_mock.result(True) - my_mock.replay() - device = cfg_ds.device_name_to_device(name) - my_mock.restore() - self.assertEquals(dev_name, device) + with unit_helpers.mocker() as my_mock: + find_mock = my_mock.replace(util.find_devs_with, + spec=False, passthrough=False) + provided_name = dev_name[len('/dev/'):] + provided_name = "s" + provided_name[1:] + find_mock(mocker.ARGS) + my_mock.result([provided_name]) + exists_mock = my_mock.replace(os.path.exists, + spec=False, passthrough=False) + exists_mock(mocker.ARGS) + my_mock.result(False) + exists_mock(mocker.ARGS) + my_mock.result(True) + my_mock.replay() + device = cfg_ds.device_name_to_device(name) + self.assertEquals(dev_name, device) def test_dev_os_map(self): populate_dir(self.tmp, CFG_DRIVE_FILES_V2) @@ -122,19 +122,18 @@ class TestConfigDriveDataSource(MockerTestCase): 'swap': '/dev/vda3', } for name, dev_name in name_tests.items(): - my_mock = mocker.Mocker() - find_mock = my_mock.replace(util.find_devs_with, - spec=False, passthrough=False) - find_mock(mocker.ARGS) - my_mock.result([dev_name]) - exists_mock = my_mock.replace(os.path.exists, - spec=False, passthrough=False) - exists_mock(mocker.ARGS) - my_mock.result(True) - my_mock.replay() - device = cfg_ds.device_name_to_device(name) - my_mock.restore() - self.assertEquals(dev_name, device) + with unit_helpers.mocker() as my_mock: + find_mock = my_mock.replace(util.find_devs_with, + spec=False, passthrough=False) + find_mock(mocker.ARGS) + my_mock.result([dev_name]) + exists_mock = my_mock.replace(os.path.exists, + spec=False, passthrough=False) + exists_mock(mocker.ARGS) + my_mock.result(True) + my_mock.replay() + device = cfg_ds.device_name_to_device(name) + self.assertEquals(dev_name, device) def test_dev_ec2_remap(self): populate_dir(self.tmp, CFG_DRIVE_FILES_V2) @@ -156,17 +155,16 @@ class TestConfigDriveDataSource(MockerTestCase): 'root2k': None, } for name, dev_name in name_tests.items(): - my_mock = mocker.Mocker() - exists_mock = my_mock.replace(os.path.exists, - spec=False, passthrough=False) - exists_mock(mocker.ARGS) - my_mock.result(False) - exists_mock(mocker.ARGS) - my_mock.result(True) - my_mock.replay() - device = cfg_ds.device_name_to_device(name) - self.assertEquals(dev_name, device) - my_mock.restore() + with unit_helpers.mocker(verify_calls=False) as my_mock: + exists_mock = my_mock.replace(os.path.exists, + spec=False, passthrough=False) + exists_mock(mocker.ARGS) + my_mock.result(False) + exists_mock(mocker.ARGS) + my_mock.result(True) + my_mock.replay() + device = cfg_ds.device_name_to_device(name) + self.assertEquals(dev_name, device) def test_dev_ec2_map(self): populate_dir(self.tmp, CFG_DRIVE_FILES_V2) -- cgit v1.2.3 From a44ca2e095856a2401911ad72a235e6c4d598392 Mon Sep 17 00:00:00 2001 From: ctracey Date: Tue, 15 Jan 2013 11:36:56 -0500 Subject: Add a HACKING file Adding a HACKING file based upon a message from smoser on the mailing list from 12/12/2012. --- HACKING | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 HACKING diff --git a/HACKING b/HACKING new file mode 100644 index 00000000..2111f7e5 --- /dev/null +++ b/HACKING @@ -0,0 +1,27 @@ +To get changes into cloud-init, the process to follow is: + + * get your changes into a local bzr branch: + # init a repo, and checkout trunk (init repo is to share bzr info + # across multiple checkouts, its different than git). + bzr init-repo cloud-init + bzr branch lp:cloud-init trunk.dist + bzr branch trunk.dist my-topic-branch + * commit your changes + bzr commit + # note, you can make multiple commits, fixes, more commits. + * check pylint and pep8 and test , and address issues + make test pylint pep8 + * push to launchpad to a personal branch: + bzr push lp:~/cloud-init/ + * propose that for a merge into lp:cloud-init via web browser + # open the branch in launchpad, it will be at: + # https://code.launchpad.net/// + # for example: + # https://code.launchpad.net/~smoser/cloud-init/mybranch + click 'propose for merging' + select 'lp:cloud-init' as the target branch + + Then, someone on cloud-init-dev (currently Scott Moser and Joshua Harlow) will +review your changes and follow up in the merge request. + + Feel free to ping #cloud-init on freenode if you have any questions. -- cgit v1.2.3 From 361738c6a9a14e32bd2123828fab8d8b70c6bc3a Mon Sep 17 00:00:00 2001 From: ctracey Date: Tue, 15 Jan 2013 16:08:43 -0500 Subject: add support for operating system families often it is convenient to classify a distro as being part of an operating system family. for instance, file templates may be identical for both debian and ubuntu, but to support this under the current templating code, one would need multiple templates for the same code. similarly, configuration handlers often fall into the same bucket: the configuraton is known to work/has been tested on a particular family of operating systems. right now this is handled with a declaration like: distros = ['fedora', 'rhel'] this fix seeks to address both of these issues. it allows for the simplification of the above line to: osfamilies = ['redhat'] and provides a mechanism for operating system family templates. --- cloudinit/config/__init__.py | 4 +++- cloudinit/distros/__init__.py | 16 +++++++++++++++- cloudinit/distros/debian.py | 1 + cloudinit/distros/rhel.py | 1 + cloudinit/stages.py | 9 +++++++-- 5 files changed, 27 insertions(+), 4 deletions(-) diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py index 69a8cc68..d57453be 100644 --- a/cloudinit/config/__init__.py +++ b/cloudinit/config/__init__.py @@ -52,5 +52,7 @@ def fixup_module(mod, def_freq=PER_INSTANCE): if freq and freq not in FREQUENCIES: LOG.warn("Module %s has an unknown frequency %s", mod, freq) if not hasattr(mod, 'distros'): - setattr(mod, 'distros', None) + setattr(mod, 'distros', []) + if not hasattr(mod, 'osfamilies'): + setattr(mod, 'osfamilies', []) return mod diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 38b2f829..ff325b40 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -35,6 +35,11 @@ from cloudinit import util from cloudinit.distros.parsers import hosts +OSFAMILIES = { + 'debian': ['debian', 'ubuntu'], + 'redhat': ['fedora', 'rhel'] +} + LOG = logging.getLogger(__name__) @@ -143,6 +148,16 @@ class Distro(object): def _select_hostname(self, hostname, fqdn): raise NotImplementedError() + @staticmethod + def expand_osfamily(family_list): + distros = [] + for family in family_list: + if not family in OSFAMILIES: + raise ValueError("No distibutions found for osfamily %s" + % (family)) + distros.extend(OSFAMILIES[family]) + return distros + def update_hostname(self, hostname, fqdn, prev_hostname_fn): applying_hostname = hostname @@ -515,7 +530,6 @@ def _get_package_mirror_info(mirror_info, availability_zone=None, return results - def _get_arch_package_mirror_info(package_mirrors, arch): # pull out the specific arch from a 'package_mirrors' config option default = None diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 7422f4f0..49b73477 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -48,6 +48,7 @@ class Distro(distros.Distro): # calls from repeatly happening (when they # should only happen say once per instance...) self._runner = helpers.Runners(paths) + self.osfamily = 'debian' def apply_locale(self, locale, out_fn=None): if not out_fn: diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index bc0877d5..e65be8d7 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -60,6 +60,7 @@ class Distro(distros.Distro): # calls from repeatly happening (when they # should only happen say once per instance...) self._runner = helpers.Runners(paths) + self.osfamily = 'redhat' def install_packages(self, pkglist): self.package_command('install', pkglist) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 8d3213b4..d7d1dea0 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -529,11 +529,16 @@ class Modules(object): freq = mod.frequency if not freq in FREQUENCIES: freq = PER_INSTANCE - worked_distros = mod.distros + + worked_distros = set(mod.distros) + worked_distros.update( + distros.Distro.expand_osfamily(mod.osfamilies)) + if (worked_distros and d_name not in worked_distros): LOG.warn(("Module %s is verified on %s distros" " but not on %s distro. It may or may not work" - " correctly."), name, worked_distros, d_name) + " correctly."), name, list(worked_distros), + d_name) # Use the configs logger and not our own # TODO(harlowja): possibly check the module # for having a LOG attr and just give it back -- cgit v1.2.3 From 5d4f4df6804995d74e7962f60dcd72b26bcac69b Mon Sep 17 00:00:00 2001 From: ctracey Date: Tue, 15 Jan 2013 16:25:20 -0500 Subject: cleanup a pep8 failure accidentally removed a line between two functions. --- cloudinit/distros/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index ff325b40..5a2092c0 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -530,6 +530,7 @@ def _get_package_mirror_info(mirror_info, availability_zone=None, return results + def _get_arch_package_mirror_info(package_mirrors, arch): # pull out the specific arch from a 'package_mirrors' config option default = None -- cgit v1.2.3 From 93bf045ce5e676a7568d3b14b175295b6ca38003 Mon Sep 17 00:00:00 2001 From: ctracey Date: Tue, 15 Jan 2013 16:34:51 -0500 Subject: Fix broken cc_update_etc_hosts (LP: #1100036) Right now, all distros but ubuntu will fail to manage /etc/hosts. This is due to the fact that the templates are named: - hosts.ubuntu.tmpl - hosts.redhat.tmpl The config handler is specifically looking for a template with the given distro name. This change addresses this issue and is contingent upon support of 'osfamilies' as implemented in LP: #1100029 (lp:~craigtracey/cloud-init/osfamilies) --- cloudinit/config/cc_update_etc_hosts.py | 5 +++-- templates/hosts.debian.tmpl | 25 +++++++++++++++++++++++++ templates/hosts.ubuntu.tmpl | 25 ------------------------- 3 files changed, 28 insertions(+), 27 deletions(-) create mode 100644 templates/hosts.debian.tmpl delete mode 100644 templates/hosts.ubuntu.tmpl diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index 96103615..d3dd1f32 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -37,10 +37,11 @@ def handle(name, cfg, cloud, log, _args): # Render from a template file tpl_fn_name = cloud.get_template_filename("hosts.%s" % - (cloud.distro.name)) + (cloud.distro.osfamily)) if not tpl_fn_name: raise RuntimeError(("No hosts template could be" - " found for distro %s") % (cloud.distro.name)) + " found for distro %s") % + (cloud.distro.osfamily)) templater.render_to_file(tpl_fn_name, '/etc/hosts', {'hostname': hostname, 'fqdn': fqdn}) diff --git a/templates/hosts.debian.tmpl b/templates/hosts.debian.tmpl new file mode 100644 index 00000000..ae120b02 --- /dev/null +++ b/templates/hosts.debian.tmpl @@ -0,0 +1,25 @@ +## This file (/etc/cloud/templates/hosts.tmpl) is only utilized +## if enabled in cloud-config. Specifically, in order to enable it +## you need to add the following to config: +## manage_etc_hosts: True +## +## Note, double-hash commented lines will not appear in /etc/hosts +# +# Your system has configured 'manage_etc_hosts' as True. +# As a result, if you wish for changes to this file to persist +# then you will need to either +# a.) make changes to the master file in /etc/cloud/templates/hosts.tmpl +# b.) change or remove the value of 'manage_etc_hosts' in +# /etc/cloud/cloud.cfg or cloud-config from user-data +# +## The value '$hostname' will be replaced with the local-hostname +127.0.1.1 $fqdn $hostname +127.0.0.1 localhost + +# The following lines are desirable for IPv6 capable hosts +::1 ip6-localhost ip6-loopback +fe00::0 ip6-localnet +ff00::0 ip6-mcastprefix +ff02::1 ip6-allnodes +ff02::2 ip6-allrouters +ff02::3 ip6-allhosts diff --git a/templates/hosts.ubuntu.tmpl b/templates/hosts.ubuntu.tmpl deleted file mode 100644 index ae120b02..00000000 --- a/templates/hosts.ubuntu.tmpl +++ /dev/null @@ -1,25 +0,0 @@ -## This file (/etc/cloud/templates/hosts.tmpl) is only utilized -## if enabled in cloud-config. Specifically, in order to enable it -## you need to add the following to config: -## manage_etc_hosts: True -## -## Note, double-hash commented lines will not appear in /etc/hosts -# -# Your system has configured 'manage_etc_hosts' as True. -# As a result, if you wish for changes to this file to persist -# then you will need to either -# a.) make changes to the master file in /etc/cloud/templates/hosts.tmpl -# b.) change or remove the value of 'manage_etc_hosts' in -# /etc/cloud/cloud.cfg or cloud-config from user-data -# -## The value '$hostname' will be replaced with the local-hostname -127.0.1.1 $fqdn $hostname -127.0.0.1 localhost - -# The following lines are desirable for IPv6 capable hosts -::1 ip6-localhost ip6-loopback -fe00::0 ip6-localnet -ff00::0 ip6-mcastprefix -ff02::1 ip6-allnodes -ff02::2 ip6-allrouters -ff02::3 ip6-allhosts -- cgit v1.2.3 From e561742aeab1e8090467f0fa304ee06e82e85f2c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 16 Jan 2013 19:46:30 -0500 Subject: DataSourceConfigDrive: consider CD rom as valid config-drive source. previously, there was an attempt in the config drive source to limit the source device to a "full block device" rather than a partition. This was done by a simplistic approach of checking that the last character of the name was not a number. That was filtering out CD-rom devices (sr0). Now, we have a bit more sophisticated approach to that same problem. We filter out block devices that have a 'partition' entry in /sys/class/block/DEVICE_NAME/partition . LP: #1100545 --- ChangeLog | 2 ++ cloudinit/sources/DataSourceConfigDrive.py | 2 +- cloudinit/util.py | 7 +++++++ tests/unittests/test_datasource/test_configdrive.py | 17 ++++++++++++----- 4 files changed, 22 insertions(+), 6 deletions(-) diff --git a/ChangeLog b/ChangeLog index 544032a2..f076a27f 100644 --- a/ChangeLog +++ b/ChangeLog @@ -20,6 +20,8 @@ all accounts would be locked unless 'system' was given (LP: #1096423). - Allow 'sr0' (or sr[0-9]) to be specified without /dev/ as a source for mounts. [Vlastimil Holer] + - allow config-drive-data to come from a CD device by more correctly + filtering out partitions. (LP: #1100545) 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index c7826851..ec016a1d 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -270,7 +270,7 @@ def find_candidate_devs(): combined = (by_label + [d for d in by_fstype if d not in by_label]) # We are looking for block device (sda, not sda1), ignore partitions - combined = [d for d in combined if d[-1] not in "0123456789"] + combined = [d for d in combined if not util.is_partition(d)] return combined diff --git a/cloudinit/util.py b/cloudinit/util.py index ab918433..c0ea8d91 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1553,3 +1553,10 @@ def keyval_str_to_dict(kvstring): val = True ret[key] = val return ret + + +def is_partition(device): + if device.startswith("/dev/"): + device = device[5:] + + return os.path.isfile("/sys/class/block/%s/partition" % device) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 6751a679..930086db 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -257,19 +257,25 @@ class TestConfigDriveDataSource(MockerTestCase): ds.read_config_drive_dir, my_d) def test_find_candidates(self): - devs_with_answers = { - "TYPE=vfat": [], - "TYPE=iso9660": ["/dev/vdb"], - "LABEL=config-2": ["/dev/vdb"], - } + devs_with_answers = {} def my_devs_with(criteria): return devs_with_answers[criteria] + def my_is_partition(dev): + return dev[-1] in "0123456789" and not dev.startswith("sr") + try: orig_find_devs_with = util.find_devs_with util.find_devs_with = my_devs_with + orig_is_partition = util.is_partition + util.is_partition = my_is_partition + + devs_with_answers = {"TYPE=vfat": [], + "TYPE=iso9660": ["/dev/vdb"], + "LABEL=config-2": ["/dev/vdb"], + } self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) # add a vfat item @@ -285,6 +291,7 @@ class TestConfigDriveDataSource(MockerTestCase): finally: util.find_devs_with = orig_find_devs_with + util.is_partition = orig_is_partition def test_pubkeys_v2(self): """Verify that public-keys work in config-drive-v2.""" -- cgit v1.2.3 From 01f2979bb4fb0fcb2a51471cf81821c73f773288 Mon Sep 17 00:00:00 2001 From: Craig Tracey Date: Thu, 17 Jan 2013 00:09:49 -0500 Subject: Adding a resolv.conf configuration module (LP: #1100434) Managing resolv.conf can be quite handy when running in an environment where you would like to control DNS resolution, despite being provided DNS server information by DHCP. This module will allow one to define the structure of their resolv.conf and write it PER_ONCE. Right now this makes the most sense on RedHat, and therefore, has defined 'distros' as such. --- cloudinit/config/cc_resolv_conf.py | 107 +++++++++++++++++++++++++++++++++++++ templates/resolv.conf.tmpl | 39 ++++++++++++++ 2 files changed, 146 insertions(+) create mode 100644 cloudinit/config/cc_resolv_conf.py create mode 100644 templates/resolv.conf.tmpl diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py new file mode 100644 index 00000000..f67fa992 --- /dev/null +++ b/cloudinit/config/cc_resolv_conf.py @@ -0,0 +1,107 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2013 Craig Tracey +# +# Author: Craig Tracey +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# Note: +# This module is intended to manage resolv.conf in environments where +# early configuration of resolv.conf is necessary for further +# bootstrapping and/or where configuration management such as puppet or +# chef own dns configuration. As Debian/Ubuntu will, by default, utilize +# resovlconf, and similarly RedHat will use sysconfig, this module is +# likely to be of little use unless those are configured correctly. +# +# For RedHat with sysconfig, be sure to set PEERDNS=no for all DHCP +# enabled NICs. And, in Ubuntu/Debian it is recommended that DNS +# be configured via the standard /etc/network/interfaces configuration +# file. +# +# +# Usage Example: +# +# #cloud-config +# manage_resolv_conf: true +# +# resolv_conf: +# nameservers: ['8.8.4.4', '8.8.8.8'] +# searchdomains: +# - foo.example.com +# - bar.example.com +# domain: example.com +# options: +# rotate: true +# timeout: 1 +# + + +from cloudinit.settings import PER_ONCE +from cloudinit import templater +from cloudinit import util + +frequency = PER_ONCE + +distros = ['fedora', 'rhel'] + + +def generate_resolv_conf(cloud, log, params): + template_fn = cloud.get_template_filename('resolv.conf') + if not template_fn: + log.warn("No template found, not rendering /etc/resolv.conf") + return + + flags = [] + false_flags = [] + if 'options' in params: + for key, val in params['options'].iteritems(): + if type(val) == bool: + if val: + flags.append(key) + else: + false_flags.append(key) + + for flag in flags + false_flags: + del params['options'][flag] + + params['flags'] = flags + log.debug("Writing resolv.conf from template %s" % template_fn) + templater.render_to_file(template_fn, '/etc/resolv.conf', params) + + +def handle(name, cfg, _cloud, log, _args): + """ + Handler for resolv.conf + + @param name: The module name "resolv-conf" from cloud.cfg + @param cfg: A nested dict containing the entire cloud config contents. + @param cloud: The L{CloudInit} object in use. + @param log: Pre-initialized Python logger object to use for logging. + @param args: Any module arguments from cloud.cfg + """ + if "manage_resolv_conf" not in cfg: + log.debug(("Skipping module named %s," + " no 'manage_resolv_conf' key in configuration"), name) + return + + if not util.get_cfg_option_bool("manage_resolv_conf", False): + log.debug(("Skipping module named %s," + " 'manage_resolv_conf' present but set to False"), name) + return + + if not "resolv_conf" in cfg: + log.warn("manage_resolv_conf True but no parameters provided!") + + generate_resolv_conf(_cloud, log, cfg["resolv_conf"]) + return diff --git a/templates/resolv.conf.tmpl b/templates/resolv.conf.tmpl new file mode 100644 index 00000000..b7e97b13 --- /dev/null +++ b/templates/resolv.conf.tmpl @@ -0,0 +1,39 @@ +# +# Your system has been configured with 'manage-resolv-conf' set to true. +# As a result, cloud-init has written this file with configuration data +# that it has been provided. Cloud-init, by default, will write this file +# a single time (PER_ONCE). +# + +#if $varExists('nameservers') +#for $server in $nameservers +nameserver $server +#end for +#end if +#if $varExists('searchdomains') +search #slurp +#for $search in $searchdomains +$search #slurp +#end for + +#end if +#if $varExists('domain') +domain $domain +#end if +#if $varExists('sortlist') +sortlist #slurp +#for $sort in $sortlist +$sort #slurp +#end for + +#end if +#if $varExists('options') or $varExists('flags') +options #slurp +#for $flag in $flags +$flag #slurp +#end for +#for $key, $value in $options.items() +$key:$value #slurp +#end for + +#end if -- cgit v1.2.3 From 2e5875ed212fb11a91b6b2bc81dfb038b960082b Mon Sep 17 00:00:00 2001 From: Craig Tracey Date: Thu, 17 Jan 2013 00:27:40 -0500 Subject: Fixing missing argument to get_cfg_option_bool Forgot to pass cfg to this function, and thus this would have never worked. --- cloudinit/config/cc_resolv_conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index f67fa992..17c74695 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -95,7 +95,7 @@ def handle(name, cfg, _cloud, log, _args): " no 'manage_resolv_conf' key in configuration"), name) return - if not util.get_cfg_option_bool("manage_resolv_conf", False): + if not util.get_cfg_option_bool(cfg, "manage_resolv_conf", False): log.debug(("Skipping module named %s," " 'manage_resolv_conf' present but set to False"), name) return -- cgit v1.2.3 From baefd17a9d997e11f85bf89d9337c2d40748bc37 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 18 Jan 2013 10:57:20 -0800 Subject: Adjust how the legacy user: XYZ config alters the normalized user list Previously if a legacy user: XYZ entry was found, XYZ would not automatically be promoted to the default user but would instead just be added on as a new entry to the normalized user list. It appears the behavior that is wanted is for the XYZ entry to be added on as the default user (thus overriding a distro provided default user), which better matches how the code previous worked. LP: #1100920 --- cloudinit/distros/__init__.py | 67 +++++++++++++++------- .../test_distros/test_user_data_normalize.py | 10 +++- 2 files changed, 52 insertions(+), 25 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 38b2f829..c74be4e2 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -705,41 +705,64 @@ def _normalize_users(u_cfg, def_user_cfg=None): def normalize_users_groups(cfg, distro): if not cfg: cfg = {} + users = {} groups = {} if 'groups' in cfg: groups = _normalize_groups(cfg['groups']) - # Handle the previous style of doing this... + # Handle the previous style of doing this where the first user + # overrides the concept of the default user if provided in the user: XYZ + # format. old_user = None if 'user' in cfg and cfg['user']: - old_user = str(cfg['user']) - if not 'users' in cfg: - cfg['users'] = old_user + old_user = cfg['user'] + # Translate it into the format that is more useful + # going forward + if isinstance(old_user, (basestring, str)): + old_user = { + 'name': old_user, + } + if not isinstance(old_user, (dict)): + LOG.warn(("Format for 'user:' key must be a string or " + "dictionary and not %s"), util.obj_name(old_user)) old_user = None - if 'users' in cfg: - default_user_config = None + + default_user_config = None + if not old_user: + # If no old user format, then assume the distro + # provides what the 'default' user maps to, but notice + # that if this is provided, we won't automatically inject + # a 'default' user into the users list, while if a old user + # format is provided we will. try: default_user_config = distro.get_default_user() except NotImplementedError: LOG.warn(("Distro has not implemented default user " "access. No default user will be normalized.")) - base_users = cfg['users'] - if old_user: - if isinstance(base_users, (list)): - if len(base_users): - # The old user replaces user[0] - base_users[0] = {'name': old_user} - else: - # Just add it on at the end... - base_users.append({'name': old_user}) - elif isinstance(base_users, (dict)): - if old_user not in base_users: - base_users[old_user] = True - elif isinstance(base_users, (str, basestring)): - # Just append it on to be re-parsed later - base_users += ",%s" % (old_user) - users = _normalize_users(base_users, default_user_config) + else: + default_user_config = dict(old_user) + + base_users = cfg.get('users', []) + if not isinstance(base_users, (list, dict, str, basestring)): + LOG.warn(("Format for 'users:' key must be a comma separated string" + " or a dictionary or a list and not %s"), + util.obj_name(base_users)) + base_users = [] + + if old_user: + # Ensure that when user: is provided that this user + # always gets added (as the default user) + if isinstance(base_users, (list)): + # Just add it on at the end... + base_users.append({'name': 'default'}) + elif isinstance(base_users, (dict)): + base_users['default'] = base_users.get('default', True) + elif isinstance(base_users, (str, basestring)): + # Just append it on to be re-parsed later + base_users += ",default" + + users = _normalize_users(base_users, default_user_config) return (users, groups) diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index 5d9d4311..50398c74 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -173,26 +173,29 @@ class TestUGNormalize(MockerTestCase): 'users': 'default' } (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) + self.assertNotIn('bob', users) # Bob is not the default now, zetta is self.assertIn('zetta', users) + self.assertTrue(users['zetta']['default']) self.assertNotIn('default', users) ug_cfg = { 'user': 'zetta', 'users': 'default, joe' } (users, _groups) = self._norm(ug_cfg, distro) - self.assertIn('bob', users) + self.assertNotIn('bob', users) # Bob is not the default now, zetta is self.assertIn('joe', users) self.assertIn('zetta', users) + self.assertTrue(users['zetta']['default']) self.assertNotIn('default', users) ug_cfg = { 'user': 'zetta', 'users': ['bob', 'joe'] } (users, _groups) = self._norm(ug_cfg, distro) - self.assertNotIn('bob', users) + self.assertIn('bob', users) self.assertIn('joe', users) self.assertIn('zetta', users) + self.assertTrue(users['zetta']['default']) ug_cfg = { 'user': 'zetta', 'users': { @@ -204,6 +207,7 @@ class TestUGNormalize(MockerTestCase): self.assertIn('bob', users) self.assertIn('joe', users) self.assertIn('zetta', users) + self.assertTrue(users['zetta']['default']) ug_cfg = { 'user': 'zetta', } -- cgit v1.2.3 From 06ca24c39289f2d1f0f3f810abf155043a36d2f2 Mon Sep 17 00:00:00 2001 From: harlowja Date: Sat, 19 Jan 2013 17:51:24 -0800 Subject: Merge the old user style with the distro provided config. When the old user: style entry is found, don't forget that we need to use the distro settings that are provided but override the name with the new name, this is now accomplished by merging them together in the correct order (using the standard cloud-init merging algo). --- cloudinit/distros/__init__.py | 42 +++++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 19 deletions(-) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index c74be4e2..ddea8417 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -714,7 +714,7 @@ def normalize_users_groups(cfg, distro): # Handle the previous style of doing this where the first user # overrides the concept of the default user if provided in the user: XYZ # format. - old_user = None + old_user = {} if 'user' in cfg and cfg['user']: old_user = cfg['user'] # Translate it into the format that is more useful @@ -724,28 +724,32 @@ def normalize_users_groups(cfg, distro): 'name': old_user, } if not isinstance(old_user, (dict)): - LOG.warn(("Format for 'user:' key must be a string or " + LOG.warn(("Format for 'user' key must be a string or " "dictionary and not %s"), util.obj_name(old_user)) - old_user = None - - default_user_config = None - if not old_user: - # If no old user format, then assume the distro - # provides what the 'default' user maps to, but notice - # that if this is provided, we won't automatically inject - # a 'default' user into the users list, while if a old user - # format is provided we will. - try: - default_user_config = distro.get_default_user() - except NotImplementedError: - LOG.warn(("Distro has not implemented default user " - "access. No default user will be normalized.")) - else: - default_user_config = dict(old_user) + old_user = {} + + # If no old user format, then assume the distro + # provides what the 'default' user maps to, but notice + # that if this is provided, we won't automatically inject + # a 'default' user into the users list, while if a old user + # format is provided we will. + distro_user_config = {} + try: + distro_user_config = distro.get_default_user() + except NotImplementedError: + LOG.warn(("Distro has not implemented default user " + "access. No distribution provided default user" + " will be normalized.")) + + # Merge the old user (which may just be an empty dict when not + # present with the distro provided default user configuration so + # that the old user style picks up all the distribution specific + # attributes (if any) + default_user_config = util.mergemanydict([old_user, distro_user_config]) base_users = cfg.get('users', []) if not isinstance(base_users, (list, dict, str, basestring)): - LOG.warn(("Format for 'users:' key must be a comma separated string" + LOG.warn(("Format for 'users' key must be a comma separated string" " or a dictionary or a list and not %s"), util.obj_name(base_users)) base_users = [] -- cgit v1.2.3 From aee23ad10ef7854a197ab664a6e0738cd042d716 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 21 Jan 2013 20:21:04 -0800 Subject: Add in a tool to help make mime multipart messages. --- tools/make-mime.py | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100755 tools/make-mime.py diff --git a/tools/make-mime.py b/tools/make-mime.py new file mode 100755 index 00000000..72b29fb9 --- /dev/null +++ b/tools/make-mime.py @@ -0,0 +1,60 @@ +#!/usr/bin/python + +import argparse +import sys + +from email.mime.multipart import MIMEMultipart +from email.mime.text import MIMEText + +KNOWN_CONTENT_TYPES = [ + 'text/x-include-once-url', + 'text/x-include-url', + 'text/cloud-config-archive', + 'text/upstart-job', + 'text/cloud-config', + 'text/part-handler', + 'text/x-shellscript', + 'text/cloud-boothook', +] + + +def file_content_type(text): + try: + filename, content_type = text.split(":", 1) + return (open(filename, 'r'), filename, content_type.strip()) + except: + raise argparse.ArgumentError("Invalid value for %r" % (text)) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("-a", "--attach", + dest="files", + type=file_content_type, + action='append', + default=[], + required=True, + metavar=":", + help="attach the given file in the specified " + "content type") + args = parser.parse_args() + sub_messages = [] + for i, (fh, filename, format_type) in enumerate(args.files): + contents = fh.read() + sub_message = MIMEText(contents, format_type, sys.getdefaultencoding()) + sub_message.add_header('Content-Disposition', + 'attachment; filename="%s"' % (filename)) + content_type = sub_message.get_content_type().lower() + if content_type not in KNOWN_CONTENT_TYPES: + sys.stderr.write(("WARNING: content type %r for attachment %s " + "may be incorrect!\n") % (content_type, i + 1)) + sub_messages.append(sub_message) + combined_message = MIMEMultipart() + for msg in sub_messages: + combined_message.attach(msg) + print(combined_message) + return 0 + + +if __name__ == '__main__': + sys.exit(main()) -- cgit v1.2.3 From 6fb6fe24d5ba185d42f2d7a82c3c54d26fea3392 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Thu, 24 Jan 2013 18:41:26 -0800 Subject: Add docs which can be used on readthedocs.org Include a new set of docs that can be used to create a readthedocs.org site, which includes examples, directory layout, capabilities and such. This in-code documentation then allows for readthedocs.org to create a website directly from the cloud-init codebase. --- doc/examples/cloud-config-add-apt-repos.txt | 34 ++++++ doc/examples/cloud-config-boot-cmds.txt | 15 +++ doc/examples/cloud-config-final-message.txt | 7 ++ doc/examples/cloud-config-install-packages.txt | 11 ++ doc/examples/cloud-config-mount-points.txt | 39 ++++++ doc/examples/cloud-config-phone-home.txt | 14 +++ doc/examples/cloud-config-power-state.txt | 22 ++++ doc/examples/cloud-config-run-cmds.txt | 21 ++++ doc/examples/cloud-config-ssh-keys.txt | 46 +++++++ doc/examples/cloud-config-update-apt.txt | 7 ++ doc/examples/cloud-config-update-packages.txt | 8 ++ doc/rtd/conf.py | 73 ++++++++++++ doc/rtd/index.rst | 29 +++++ doc/rtd/logo.png | Bin 0 -> 4477 bytes doc/rtd/topics/availability.rst | 20 ++++ doc/rtd/topics/capabilities.rst | 24 ++++ doc/rtd/topics/dir_layout.rst | 81 +++++++++++++ doc/rtd/topics/examples.rst | 121 +++++++++++++++++++ doc/rtd/topics/format.rst | 159 +++++++++++++++++++++++++ doc/rtd/topics/modules.rst | 3 + doc/rtd/topics/moreinfo.rst | 12 ++ 21 files changed, 746 insertions(+) create mode 100644 doc/examples/cloud-config-add-apt-repos.txt create mode 100644 doc/examples/cloud-config-boot-cmds.txt create mode 100644 doc/examples/cloud-config-final-message.txt create mode 100644 doc/examples/cloud-config-install-packages.txt create mode 100644 doc/examples/cloud-config-mount-points.txt create mode 100644 doc/examples/cloud-config-phone-home.txt create mode 100644 doc/examples/cloud-config-power-state.txt create mode 100644 doc/examples/cloud-config-run-cmds.txt create mode 100644 doc/examples/cloud-config-ssh-keys.txt create mode 100644 doc/examples/cloud-config-update-apt.txt create mode 100644 doc/examples/cloud-config-update-packages.txt create mode 100644 doc/rtd/conf.py create mode 100644 doc/rtd/index.rst create mode 100644 doc/rtd/logo.png create mode 100644 doc/rtd/topics/availability.rst create mode 100644 doc/rtd/topics/capabilities.rst create mode 100644 doc/rtd/topics/dir_layout.rst create mode 100644 doc/rtd/topics/examples.rst create mode 100644 doc/rtd/topics/format.rst create mode 100644 doc/rtd/topics/modules.rst create mode 100644 doc/rtd/topics/moreinfo.rst diff --git a/doc/examples/cloud-config-add-apt-repos.txt b/doc/examples/cloud-config-add-apt-repos.txt new file mode 100644 index 00000000..be9d5472 --- /dev/null +++ b/doc/examples/cloud-config-add-apt-repos.txt @@ -0,0 +1,34 @@ +#cloud-config + +# Add apt repositories +# +# Default: auto select based on cloud metadata +# in ec2, the default is .archive.ubuntu.com +# apt_mirror: +# use the provided mirror +# apt_mirror_search: +# search the list for the first mirror. +# this is currently very limited, only verifying that +# the mirror is dns resolvable or an IP address +# +# if neither apt_mirror nor apt_mirror search is set (the default) +# then use the mirror provided by the DataSource found. +# In EC2, that means using .ec2.archive.ubuntu.com +# +# if no mirror is provided by the DataSource, and 'apt_mirror_search_dns' is +# true, then search for dns names '-mirror' in each of +# - fqdn of this host per cloud metadata +# - localdomain +# - no domain (which would search domains listed in /etc/resolv.conf) +# If there is a dns entry for -mirror, then it is assumed that there +# is a distro mirror at http://-mirror./ +# +# That gives the cloud provider the opportunity to set mirrors of a distro +# up and expose them only by creating dns entries. +# +# if none of that is found, then the default distro mirror is used +apt_mirror: http://us.archive.ubuntu.com/ubuntu/ +apt_mirror_search: + - http://local-mirror.mydomain + - http://archive.ubuntu.com +apt_mirror_search_dns: False diff --git a/doc/examples/cloud-config-boot-cmds.txt b/doc/examples/cloud-config-boot-cmds.txt new file mode 100644 index 00000000..b281d327 --- /dev/null +++ b/doc/examples/cloud-config-boot-cmds.txt @@ -0,0 +1,15 @@ +#cloud-config + +# boot commands +# default: none +# this is very similar to runcmd, but commands run very early +# in the boot process, only slightly after a 'boothook' would run. +# bootcmd should really only be used for things that could not be +# done later in the boot process. bootcmd is very much like +# boothook, but possibly with more friendly. +# * bootcmd will run on every boot +# * the INSTANCE_ID variable will be set to the current instance id. +# * you can use 'cloud-init-boot-per' command to help only run once +bootcmd: + - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts + - [ cloud-init-per, once, mymkfs, mkfs, /dev/vdb ] diff --git a/doc/examples/cloud-config-final-message.txt b/doc/examples/cloud-config-final-message.txt new file mode 100644 index 00000000..0ce31467 --- /dev/null +++ b/doc/examples/cloud-config-final-message.txt @@ -0,0 +1,7 @@ +#cloud-config + +# final_message +# default: cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds +# this message is written by cloud-final when the system is finished +# its first boot +final_message: "The system is finally up, after $UPTIME seconds" diff --git a/doc/examples/cloud-config-install-packages.txt b/doc/examples/cloud-config-install-packages.txt new file mode 100644 index 00000000..4984818f --- /dev/null +++ b/doc/examples/cloud-config-install-packages.txt @@ -0,0 +1,11 @@ +#cloud-config + +# Install additional packages on first boot +# +# Default: none +# +# if packages are specified, this apt_update will be set to true +# +packages: + - pwgen + - pastebinit diff --git a/doc/examples/cloud-config-mount-points.txt b/doc/examples/cloud-config-mount-points.txt new file mode 100644 index 00000000..416006db --- /dev/null +++ b/doc/examples/cloud-config-mount-points.txt @@ -0,0 +1,39 @@ +#cloud-config + +# set up mount points +# 'mounts' contains a list of lists +# the inner list are entries for an /etc/fstab line +# ie : [ fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno ] +# +# default: +# mounts: +# - [ ephemeral0, /mnt ] +# - [ swap, none, swap, sw, 0, 0 ] +# +# in order to remove a previously listed mount (ie, one from defaults) +# list only the fs_spec. For example, to override the default, of +# mounting swap: +# - [ swap ] +# or +# - [ swap, null ] +# +# - if a device does not exist at the time, an entry will still be +# written to /etc/fstab. +# - '/dev' can be ommitted for device names that begin with: xvd, sd, hd, vd +# - if an entry does not have all 6 fields, they will be filled in +# with values from 'mount_default_fields' below. +# +# Note, that you should set 'nobootwait' (see man fstab) for volumes that may +# not be attached at instance boot (or reboot) +# +mounts: + - [ ephemeral0, /mnt, auto, "defaults,noexec" ] + - [ sdc, /opt/data ] + - [ xvdh, /opt/data, "auto", "defaults,nobootwait", "0", "0" ] + - [ dd, /dev/zero ] + +# mount_default_fields +# These values are used to fill in any entries in 'mounts' that are not +# complete. This must be an array, and must have 7 fields. +mount_default_fields: [ None, None, "auto", "defaults,nobootwait", "0", "2" ] + diff --git a/doc/examples/cloud-config-phone-home.txt b/doc/examples/cloud-config-phone-home.txt new file mode 100644 index 00000000..7f2b69f7 --- /dev/null +++ b/doc/examples/cloud-config-phone-home.txt @@ -0,0 +1,14 @@ +#cloud-config + +# phone_home: if this dictionary is present, then the phone_home +# cloud-config module will post specified data back to the given +# url +# default: none +# phone_home: +# url: http://my.foo.bar/$INSTANCE/ +# post: all +# tries: 10 +# +phone_home: + url: http://my.example.com/$INSTANCE_ID/ + post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id ] diff --git a/doc/examples/cloud-config-power-state.txt b/doc/examples/cloud-config-power-state.txt new file mode 100644 index 00000000..59f062d0 --- /dev/null +++ b/doc/examples/cloud-config-power-state.txt @@ -0,0 +1,22 @@ +#cloud-config + +## poweroff or reboot system after finished +# default: none +# +# power_state can be used to make the system shutdown, reboot or +# halt after boot is finished. This same thing can be acheived by +# user-data scripts or by runcmd by simply invoking 'shutdown'. +# +# Doing it this way ensures that cloud-init is entirely finished with +# modules that would be executed, and avoids any error/log messages +# that may go to the console as a result of system services like +# syslog being taken down while cloud-init is running. +# +# delay: form accepted by shutdown. default is 'now'. other format +# accepted is +m (m in minutes) +# mode: required. must be one of 'poweroff', 'halt', 'reboot' +# message: provided as the message argument to 'shutdown'. default is none. +power_state: + delay: 30 + mode: poweroff + message: Bye Bye diff --git a/doc/examples/cloud-config-run-cmds.txt b/doc/examples/cloud-config-run-cmds.txt new file mode 100644 index 00000000..61b3bd63 --- /dev/null +++ b/doc/examples/cloud-config-run-cmds.txt @@ -0,0 +1,21 @@ +#cloud-config + +# run commands +# default: none +# runcmd contains a list of either lists or a string +# each item will be executed in order at rc.local like level with +# output to the console +# - if the item is a list, the items will be properly executed as if +# passed to execve(3) (with the first arg as the command). +# - if the item is a string, it will be simply written to the file and +# will be interpreted by 'sh' +# +# Note, that the list has to be proper yaml, so you have to escape +# any characters yaml would eat (':' can be problematic) +runcmd: + - [ ls, -l, / ] + - [ sh, -xc, "echo $(date) ': hello world!'" ] + - [ sh, -c, echo "=========hello world'=========" ] + - ls -l /root + - [ wget, "http://slashdot.org", -O, /tmp/index.html ] + diff --git a/doc/examples/cloud-config-ssh-keys.txt b/doc/examples/cloud-config-ssh-keys.txt new file mode 100644 index 00000000..235a114f --- /dev/null +++ b/doc/examples/cloud-config-ssh-keys.txt @@ -0,0 +1,46 @@ +#cloud-config + +# add each entry to ~/.ssh/authorized_keys for the configured user or the +# first user defined in the user definition directive. +ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEA3FSyQwBI6Z+nCSjUUk8EEAnnkhXlukKoUPND/RRClWz2s5TCzIkd3Ou5+Cyz71X0XmazM3l5WgeErvtIwQMyT1KjNoMhoJMrJnWqQPOt5Q8zWd9qG7PBl9+eiH5qV7NZ mykey@host + - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw== smoser@brickies + +# Send pre-generated ssh private keys to the server +# If these are present, they will be written to /etc/ssh and +# new random keys will not be generated +# in addition to 'rsa' and 'dsa' as shown below, 'ecdsa' is also supported +ssh_keys: + rsa_private: | + -----BEGIN RSA PRIVATE KEY----- + MIIBxwIBAAJhAKD0YSHy73nUgysO13XsJmd4fHiFyQ+00R7VVu2iV9Qcon2LZS/x + 1cydPZ4pQpfjEha6WxZ6o8ci/Ea/w0n+0HGPwaxlEG2Z9inNtj3pgFrYcRztfECb + 1j6HCibZbAzYtwIBIwJgO8h72WjcmvcpZ8OvHSvTwAguO2TkR6mPgHsgSaKy6GJo + PUJnaZRWuba/HX0KGyhz19nPzLpzG5f0fYahlMJAyc13FV7K6kMBPXTRR6FxgHEg + L0MPC7cdqAwOVNcPY6A7AjEA1bNaIjOzFN2sfZX0j7OMhQuc4zP7r80zaGc5oy6W + p58hRAncFKEvnEq2CeL3vtuZAjEAwNBHpbNsBYTRPCHM7rZuG/iBtwp8Rxhc9I5w + ixvzMgi+HpGLWzUIBS+P/XhekIjPAjA285rVmEP+DR255Ls65QbgYhJmTzIXQ2T9 + luLvcmFBC6l35Uc4gTgg4ALsmXLn71MCMGMpSWspEvuGInayTCL+vEjmNBT+FAdO + W7D4zCpI43jRS9U06JVOeSc9CDk2lwiA3wIwCTB/6uc8Cq85D9YqpM10FuHjKpnP + REPPOyrAspdeOAV+6VKRavstea7+2DZmSUgE + -----END RSA PRIVATE KEY----- + + rsa_public: ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAGEAoPRhIfLvedSDKw7XdewmZ3h8eIXJD7TRHtVW7aJX1ByifYtlL/HVzJ09nilCl+MSFrpbFnqjxyL8Rr/DSf7QcY/BrGUQbZn2Kc22PemAWthxHO18QJvWPocKJtlsDNi3 smoser@localhost + + dsa_private: | + -----BEGIN DSA PRIVATE KEY----- + MIIBuwIBAAKBgQDP2HLu7pTExL89USyM0264RCyWX/CMLmukxX0Jdbm29ax8FBJT + pLrO8TIXVY5rPAJm1dTHnpuyJhOvU9G7M8tPUABtzSJh4GVSHlwaCfycwcpLv9TX + DgWIpSj+6EiHCyaRlB1/CBp9RiaB+10QcFbm+lapuET+/Au6vSDp9IRtlQIVAIMR + 8KucvUYbOEI+yv+5LW9u3z/BAoGBAI0q6JP+JvJmwZFaeCMMVxXUbqiSko/P1lsa + LNNBHZ5/8MOUIm8rB2FC6ziidfueJpqTMqeQmSAlEBCwnwreUnGfRrKoJpyPNENY + d15MG6N5J+z81sEcHFeprryZ+D3Ge9VjPq3Tf3NhKKwCDQ0240aPezbnjPeFm4mH + bYxxcZ9GAoGAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI3 + 8UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC + /QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQCFEIsKKWv + 99iziAH0KBMVbxy03Trz + -----END DSA PRIVATE KEY----- + + dsa_public: ssh-dss AAAAB3NzaC1kc3MAAACBAM/Ycu7ulMTEvz1RLIzTbrhELJZf8Iwua6TFfQl1ubb1rHwUElOkus7xMhdVjms8AmbV1Meem7ImE69T0bszy09QAG3NImHgZVIeXBoJ/JzByku/1NcOBYilKP7oSIcLJpGUHX8IGn1GJoH7XRBwVub6Vqm4RP78C7q9IOn0hG2VAAAAFQCDEfCrnL1GGzhCPsr/uS1vbt8/wQAAAIEAjSrok/4m8mbBkVp4IwxXFdRuqJKSj8/WWxos00Ednn/ww5QibysHYULrOKJ1+54mmpMyp5CZICUQELCfCt5ScZ9GsqgmnI80Q1h3Xkwbo3kn7PzWwRwcV6muvJn4PcZ71WM+rdN/c2EorAINDTbjRo97NueM94WbiYdtjHFxn0YAAACAXmLIFSQgiAPu459rCKxT46tHJtM0QfnNiEnQLbFluefZ/yiI4DI38UzTCOXLhUA7ybmZha+D/csj15Y9/BNFuO7unzVhikCQV9DTeXX46pG4s1o23JKC/QaYWNMZ7kTRv+wWow9MhGiVdML4ZN4XnifuO5krqAybngIy66PMEoQ= smoser@localhost + + diff --git a/doc/examples/cloud-config-update-apt.txt b/doc/examples/cloud-config-update-apt.txt new file mode 100644 index 00000000..a83ce3f7 --- /dev/null +++ b/doc/examples/cloud-config-update-apt.txt @@ -0,0 +1,7 @@ +#cloud-config +# Update apt database on first boot +# (ie run apt-get update) +# +# Default: true +# Aliases: apt_update +package_update: false diff --git a/doc/examples/cloud-config-update-packages.txt b/doc/examples/cloud-config-update-packages.txt new file mode 100644 index 00000000..56b72c63 --- /dev/null +++ b/doc/examples/cloud-config-update-packages.txt @@ -0,0 +1,8 @@ +#cloud-config + +# Upgrade the instance on first boot +# (ie run apt-get upgrade) +# +# Default: false +# Aliases: apt_upgrade +package_upgrade: true diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py new file mode 100644 index 00000000..56ec912f --- /dev/null +++ b/doc/rtd/conf.py @@ -0,0 +1,73 @@ +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath('../../')) +sys.path.insert(0, os.path.abspath('../')) +sys.path.insert(0, os.path.abspath('./')) +sys.path.insert(0, os.path.abspath('.')) + +from cloudinit import version + +# Supress warnings for docs that aren't used yet +#unused_docs = [ +#] + +# General information about the project. +project = 'Cloud-Init' + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +#needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = [ + 'sphinx.ext.intersphinx', +] + +intersphinx_mapping = { + 'sphinx': ('http://sphinx.pocoo.org', None) +} + +# The suffix of source filenames. +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +version = version.version_string() + +# Set the default Pygments syntax +highlight_language = 'python' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +show_authors = False + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "bodyfont": "Arial, sans-serif", + "headfont": "Arial, sans-serif" +} + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +html_logo = 'logo.png' diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst new file mode 100644 index 00000000..f878dbd4 --- /dev/null +++ b/doc/rtd/index.rst @@ -0,0 +1,29 @@ +.. _index: + +===================== +Documentation +===================== + +.. rubric:: Everything about cloud-init, a set of **python** scripts and utilities to make your cloud images be all they can be! + +Summary +----------------- + +`Cloud-init`_ is the *defacto* multi-distribution package that handles early initialization of a cloud instance. + + +---- + +.. toctree:: + :maxdepth: 2 + + topics/capabilities + topics/availability + topics/format + topics/dir_layout + topics/examples + topics/modules + topics/moreinfo + + +.. _Cloud-init: https://launchpad.net/cloud-init diff --git a/doc/rtd/logo.png b/doc/rtd/logo.png new file mode 100644 index 00000000..ed1b0a91 Binary files /dev/null and b/doc/rtd/logo.png differ diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst new file mode 100644 index 00000000..2d58f808 --- /dev/null +++ b/doc/rtd/topics/availability.rst @@ -0,0 +1,20 @@ +============ +Availability +============ + +It is currently installed in the `Ubuntu Cloud Images`_ and also in the official `Ubuntu`_ images available on EC2. + +Versions for other systems can be (or have been) created for the following distributions: + +- Ubuntu +- Fedora +- Debian +- RHEL +- CentOS +- *and more...* + +So ask your distribution provider where you can obtain an image with it built-in if one is not already available ☺ + + +.. _Ubuntu Cloud Images: http://cloud-images.ubuntu.com/ +.. _Ubuntu: http://www.ubuntu.com/ diff --git a/doc/rtd/topics/capabilities.rst b/doc/rtd/topics/capabilities.rst new file mode 100644 index 00000000..63b34270 --- /dev/null +++ b/doc/rtd/topics/capabilities.rst @@ -0,0 +1,24 @@ +===================== +Capabilities +===================== + +- Setting a default locale +- Setting a instance hostname +- Generating instance ssh private keys +- Adding ssh keys to a users ``.ssh/authorized_keys`` so they can log in +- Setting up ephemeral mount points + +User configurability +-------------------- + +`Cloud-init`_ 's behavior can be configured via user-data. + + User-data can be given by the user at instance launch time. + +This is done via the ``--user-data`` or ``--user-data-file`` argument to ec2-run-instances for example. + +* Check your local clients documentation for how to provide a `user-data` string + or `user-data` file for usage by cloud-init on instance creation. + + +.. _Cloud-init: https://launchpad.net/cloud-init diff --git a/doc/rtd/topics/dir_layout.rst b/doc/rtd/topics/dir_layout.rst new file mode 100644 index 00000000..f072c585 --- /dev/null +++ b/doc/rtd/topics/dir_layout.rst @@ -0,0 +1,81 @@ +========= +Directory layout +========= + +Cloudinits's directory structure is somewhat different from a regular application:: + + /var/lib/cloud/ + - data/ + - instance-id + - previous-instance-id + - datasource + - previous-datasource + - previous-hostname + - handlers/ + - instance + - instances/ + i-00000XYZ/ + - boot-finished + - cloud-config.txt + - datasource + - handlers/ + - obj.pkl + - scripts/ + - sem/ + - user-data.txt + - user-data.txt.i + - scripts/ + - per-boot/ + - per-instance/ + - per-once/ + - seed/ + - sem/ + +``/var/lib/cloud`` + + The main directory containing the cloud-init specific subdirectories. + It is typically located at ``/var/lib`` but there are certain configuration + scenarios where this can be altered. + + TBD, describe this overriding more. + +``data/`` + + Contains information releated to instance ids, datasources and hostnames of the previous + and current instance if they are different. These can be examined as needed to + determine any information releated to a previous boot (if applicable). + +``handlers/`` + + Custom ``part-handlers`` code is written out here. Files that end up here are written + out with in the scheme of ``part-handler-XYZ`` where ``XYZ`` is the handler number (the + first handler found starts at 0). + + +``instance`` + + A symlink to the current ``instances/`` subdirectory that points to the currently + active instance (which is active is dependent on the datasource loaded). + +``instances/`` + + All instances that were created using this image end up with instance identifer + subdirectories (and corresponding data for each instance). The currently active + instance will be symlinked the the ``instance`` symlink file defined previously. + +``scripts/`` + + Scripts that are downloaded/created by the corresponding ``part-handler`` will end up + in one of these subdirectories. + +``seed/`` + + TBD + +``sem/`` + + Cloud-init has a concept of a module sempahore, which basically consists + of the module name and its frequency. These files are used to ensure a module + is only ran `per-once`, `per-instance`, `per-always`. This folder contains + sempaphore `files` which are only supposed to run `per-once` (not tied to the instance id). + diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst new file mode 100644 index 00000000..9bbc33cc --- /dev/null +++ b/doc/rtd/topics/examples.rst @@ -0,0 +1,121 @@ +.. _yaml_examples: + +========= +Cloud config examples +========= + +Including users and groups +--------------------------- + +.. literalinclude:: ../../examples/cloud-config-user-groups.txt + :language: yaml + :linenos: + + +Writing out arbitrary files +--------------------------- + +.. literalinclude:: ../../examples/cloud-config-write-files.txt + :language: yaml + :linenos: + + +Adding a yum repository +--------------------------- + +.. literalinclude:: ../../examples/cloud-config-yum-repo.txt + :language: yaml + :linenos: + +Configure an instance's trusted CA certificates +------------------------------------------------------ + +.. literalinclude:: ../../examples/cloud-config-ca-certs.txt + :language: yaml + :linenos: + +Install and run `chef`_ recipes +------------------------------------------------------ + +.. literalinclude:: ../../examples/cloud-config-chef.txt + :language: yaml + :linenos: + +Setup and run `puppet`_ +------------------------------------------------------ + +.. literalinclude:: ../../examples/cloud-config-puppet.txt + :language: yaml + :linenos: + +Add apt repositories +--------------------------- + +.. literalinclude:: ../../examples/cloud-config-add-apt-repos.txt + :language: yaml + :linenos: + +Run commands on first boot +--------------------------- + +.. literalinclude:: ../../examples/cloud-config-boot-cmds.txt + :language: yaml + :linenos: + +.. literalinclude:: ../../examples/cloud-config-run-cmds.txt + :language: yaml + :linenos: + + +Alter the completion message +--------------------------- + +.. literalinclude:: ../../examples/cloud-config-final-message.txt + :language: yaml + :linenos: + +Install arbitrary packages +--------------------------- + +.. literalinclude:: ../../examples/cloud-config-install-packages.txt + :language: yaml + :linenos: + +Run apt or yum upgrade +--------------------------- + +.. literalinclude:: ../../examples/cloud-config-update-packages.txt + :language: yaml + :linenos: + +Adjust mount points mounted +--------------------------- + +.. literalinclude:: ../../examples/cloud-config-mount-points.txt + :language: yaml + :linenos: + +Call a url when finished +--------------------------- + +.. literalinclude:: ../../examples/cloud-config-phone-home.txt + :language: yaml + :linenos: + +Reboot/poweroff when finished +--------------------------- + +.. literalinclude:: ../../examples/cloud-config-power-state.txt + :language: yaml + :linenos: + +Configure instances ssh-keys +--------------------------- + +.. literalinclude:: ../../examples/cloud-config-ssh-keys.txt + :language: yaml + :linenos: + + +.. _chef: http://www.opscode.com/chef/ +.. _puppet: http://puppetlabs.com/ diff --git a/doc/rtd/topics/format.rst b/doc/rtd/topics/format.rst new file mode 100644 index 00000000..eba9533f --- /dev/null +++ b/doc/rtd/topics/format.rst @@ -0,0 +1,159 @@ +========= +Formats +========= + +User data that will be acted upon by cloud-init must be in one of the following types. + +Gzip Compressed Content +------------------------ + +Content found to be gzip compressed will be uncompressed. +The uncompressed data will then be used as if it were not compressed. +This is typically is useful because user-data is limited to ~16384 [#]_ bytes. + +Mime Multi Part Archive +------------------------ + +This list of rules is applied to each part of this multi-part file. +Using a mime-multi part file, the user can specify more than one type of data. + +For example, both a user data script and a cloud-config type could be specified. + +Supported content-types: + +- text/x-include-once-url +- text/x-include-url +- text/cloud-config-archive +- text/upstart-job +- text/cloud-config +- text/part-handler +- text/x-shellscript +- text/cloud-boothook + +Helper script to generate mime messages +~~~~~~~~~~~~~~~~ + +.. code-block:: python + + #!/usr/bin/python + + import sys + + from email.mime.multipart import MIMEMultipart + from email.mime.text import MIMEText + + if len(sys.argv) == 1: + print("%s input-file:type ..." % (sys.argv[0])) + sys.exit(1) + + combined_message = MIMEMultipart() + for i in sys.argv[1:]: + (filename, format_type) = i.split(":", 1) + with open(filename) as fh: + contents = fh.read() + sub_message = MIMEText(contents, format_type, sys.getdefaultencoding()) + sub_message.add_header('Content-Disposition', 'attachment; filename="%s"' % (filename)) + combined_message.attach(sub_message) + + print(combined_message) + + +User-Data Script +------------------------ + +Typically used by those who just want to execute a shell script. + +Begins with: ``#!`` or ``Content-Type: text/x-shellscript`` when using a MIME archive. + +Example +~~~~~~~ + +:: + + $ cat myscript.sh + + #!/bin/sh + echo "Hello World. The time is now $(date -R)!" | tee /root/output.txt + + $ euca-run-instances --key mykey --user-data-file myscript.sh ami-a07d95c9 + +Include File +------------ + +This content is a ``include`` file. + +The file contains a list of urls, one per line. +Each of the URLs will be read, and their content will be passed through this same set of rules. +Ie, the content read from the URL can be gzipped, mime-multi-part, or plain text. + +Begins with: ``#include`` or ``Content-Type: text/x-include-url`` when using a MIME archive. + +Cloud Config Data +----------------- + +Cloud-config is the simplest way to accomplish some things +via user-data. Using cloud-config syntax, the user can specify certain things in a human friendly format. + +These things include: + +- apt upgrade should be run on first boot +- a different apt mirror should be used +- additional apt sources should be added +- certain ssh keys should be imported +- *and many more...* + +**Note:** The file must be valid yaml syntax. + +See the :ref:`yaml_examples` section for a commented set of examples of supported cloud config formats. + +Begins with: ``#cloud-config`` or ``Content-Type: text/cloud-config`` when using a MIME archive. + +Upstart Job +----------- + +Content is placed into a file in ``/etc/init``, and will be consumed by upstart as any other upstart job. + +Begins with: ``#upstart-job`` or ``Content-Type: text/upstart-job`` when using a MIME archive. + +Cloud Boothook +-------------- + +This content is ``boothook`` data. It is stored in a file under ``/var/lib/cloud`` and then executed immediately. +This is the earliest ``hook`` available. Note, that there is no mechanism provided for running only once. The boothook must take care of this itself. +It is provided with the instance id in the environment variable ``INSTANCE_I``. This could be made use of to provide a 'once-per-instance' type of functionality. + +Begins with: ``#cloud-boothook`` or ``Content-Type: text/cloud-boothook`` when using a MIME archive. + +Part Handler +------------ + +This is a ``part-handler``. It will be written to a file in ``/var/lib/cloud/data`` based on its filename (which is generated). +This must be python code that contains a ``list_types`` method and a ``handle_type`` method. +Once the section is read the ``list_types`` method will be called. It must return a list of mime-types that this part-handler handles. + +The ``handle_type`` method must be like: + +.. code-block:: python + + def handle_part(data, ctype, filename, payload): + # data = the cloudinit object + # ctype = "__begin__", "__end__", or the mime-type of the part that is being handled. + # filename = the filename of the part (or a generated filename if none is present in mime data) + # payload = the parts' content + +Cloud-init will then call the ``handle_type`` method once at begin, once per part received, and once at end. +The ``begin`` and ``end`` calls are to allow the part handler to do initialization or teardown. + +Begins with: ``#part-handler`` or ``Content-Type: text/part-handler`` when using a MIME archive. + +Example +~~~~~~~ + +.. literalinclude:: ../../examples/part-handler.txt + :language: python + :linenos: + +Also this `blog`_ post offers another example for more advanced usage. + +.. [#] See your cloud provider for applicable user-data size limitations... +.. _blog: http://foss-boss.blogspot.com/2011/01/advanced-cloud-init-custom-handlers.html diff --git a/doc/rtd/topics/modules.rst b/doc/rtd/topics/modules.rst new file mode 100644 index 00000000..d4dd55df --- /dev/null +++ b/doc/rtd/topics/modules.rst @@ -0,0 +1,3 @@ +========= +Modules +========= diff --git a/doc/rtd/topics/moreinfo.rst b/doc/rtd/topics/moreinfo.rst new file mode 100644 index 00000000..2e436c3c --- /dev/null +++ b/doc/rtd/topics/moreinfo.rst @@ -0,0 +1,12 @@ +========= +More information +========= + +Useful external references +---------------- + +- `The beauty of cloudinit`_ +- `Introduction to cloud-init`_ (video) + +.. _Introduction to cloud-init: http://www.youtube.com/watch?v=-zL3BdbKyGY +.. _The beauty of cloudinit: http://brandon.fuller.name/archives/2011/05/02/06.40.57/ -- cgit v1.2.3 From 465994f10efbc1d82d667027a6f9dd72d8bc7c6f Mon Sep 17 00:00:00 2001 From: harlowja Date: Thu, 24 Jan 2013 23:30:41 -0800 Subject: Make the logo better. --- doc/rtd/conf.py | 3 +- doc/rtd/logo.png | Bin 4477 -> 0 bytes doc/rtd/static/logo.png | Bin 0 -> 16031 bytes doc/rtd/static/logo.svg | 14356 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 14358 insertions(+), 1 deletion(-) delete mode 100644 doc/rtd/logo.png create mode 100644 doc/rtd/static/logo.png create mode 100755 doc/rtd/static/logo.svg diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 56ec912f..b3ca2b07 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -42,6 +42,7 @@ master_doc = 'index' # |version| and |release|, also used in various other places throughout the # built documents. version = version.version_string() +release = versions # Set the default Pygments syntax highlight_language = 'python' @@ -70,4 +71,4 @@ html_theme_options = { # The name of an image file (relative to this directory) to place at the top # of the sidebar. -html_logo = 'logo.png' +html_logo = 'static/logo.png' diff --git a/doc/rtd/logo.png b/doc/rtd/logo.png deleted file mode 100644 index ed1b0a91..00000000 Binary files a/doc/rtd/logo.png and /dev/null differ diff --git a/doc/rtd/static/logo.png b/doc/rtd/static/logo.png new file mode 100644 index 00000000..893b7e3b Binary files /dev/null and b/doc/rtd/static/logo.png differ diff --git a/doc/rtd/static/logo.svg b/doc/rtd/static/logo.svg new file mode 100755 index 00000000..b22ce2a0 --- /dev/null +++ b/doc/rtd/static/logo.svg @@ -0,0 +1,14356 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + Layer 1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CloudInit + + -- cgit v1.2.3 From eedc6fb4963e9cfe6d99845ba1aff78447e480e8 Mon Sep 17 00:00:00 2001 From: harlowja Date: Thu, 24 Jan 2013 23:36:43 -0800 Subject: Fix the release variable. --- doc/rtd/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index b3ca2b07..87fc40ab 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -42,7 +42,7 @@ master_doc = 'index' # |version| and |release|, also used in various other places throughout the # built documents. version = version.version_string() -release = versions +release = version # Set the default Pygments syntax highlight_language = 'python' -- cgit v1.2.3 From e9d7caed3e2c7d86ab05d0e2280a796914e274b5 Mon Sep 17 00:00:00 2001 From: harlowja Date: Thu, 24 Jan 2013 23:50:31 -0800 Subject: Remove the release for now. --- doc/rtd/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 87fc40ab..766f9e93 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -42,7 +42,7 @@ master_doc = 'index' # |version| and |release|, also used in various other places throughout the # built documents. version = version.version_string() -release = version +# release = version # Set the default Pygments syntax highlight_language = 'python' -- cgit v1.2.3 From 257718c8a53355c3a91bbeee95f56416c08c63bb Mon Sep 17 00:00:00 2001 From: ctracey Date: Fri, 25 Jan 2013 16:31:39 -0500 Subject: Moving HACKING to restructured text As per harlowja's suggestion, moving this HACKING file to restructured text format. --- HACKING | 56 +++++++++++++++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/HACKING b/HACKING index 2111f7e5..433738da 100644 --- a/HACKING +++ b/HACKING @@ -1,27 +1,33 @@ +===================== +Hacking on cloud-init +===================== + To get changes into cloud-init, the process to follow is: - * get your changes into a local bzr branch: - # init a repo, and checkout trunk (init repo is to share bzr info - # across multiple checkouts, its different than git). - bzr init-repo cloud-init - bzr branch lp:cloud-init trunk.dist - bzr branch trunk.dist my-topic-branch - * commit your changes - bzr commit - # note, you can make multiple commits, fixes, more commits. - * check pylint and pep8 and test , and address issues - make test pylint pep8 - * push to launchpad to a personal branch: - bzr push lp:~/cloud-init/ - * propose that for a merge into lp:cloud-init via web browser - # open the branch in launchpad, it will be at: - # https://code.launchpad.net/// - # for example: - # https://code.launchpad.net/~smoser/cloud-init/mybranch - click 'propose for merging' - select 'lp:cloud-init' as the target branch - - Then, someone on cloud-init-dev (currently Scott Moser and Joshua Harlow) will -review your changes and follow up in the merge request. - - Feel free to ping #cloud-init on freenode if you have any questions. + * If you have not already, be sure to sign the CCA: + - `Canonical Contributor Agreement`_ + + * Get your changes into a local bzr branch. Initialize a repo, and checkout trunk (init repo is to share bzr info across multiple checkouts, its different than git): + - ``bzr init-repo cloud-init`` + - ``bzr branch lp:cloud-init trunk.dist`` + - ``bzr branch trunk.dist my-topic-branch`` + + * Commit your changes (note, you can make multiple commits, fixes, more commits.): + - ``bzr commit`` + + * Check pylint and pep8 and test, and address any issues: + - ``make test pylint pep8`` + + * Push to launchpad to a personal branch: + - ``bzr push lp:~/cloud-init/`` + + * Propose that for a merge into lp:cloud-init via web browser. Open the branch in `Launchpad`_, it will be at https://code.launchpad.net/// (ie. https://code.launchpad.net/~smoser/cloud-init/mybranch): + - Click 'Propose for merging' + - Select 'lp:cloud-init' as the target branch + +Then, someone on cloud-init-dev (currently Scott Moser and Joshua Harlow) will review your changes and follow up in the merge request. + +Feel free to ping #cloud-init on freenode if you have any questions. + +.. _Launchpad: https://launchpad.net +.. _Canonical Contributor Agreement: http://www.canonical.com/contributors -- cgit v1.2.3 From 94c37c074aed4036160881f4f3a28d35f868d006 Mon Sep 17 00:00:00 2001 From: ctracey Date: Fri, 25 Jan 2013 17:27:06 -0500 Subject: Adding a doc example for resolv_conf handler. As per harlowja's suggestion addding an example to the doc directory for cc_resolv_conf.py --- doc/examples/cloud-config-resolv-conf.txt | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 doc/examples/cloud-config-resolv-conf.txt diff --git a/doc/examples/cloud-config-resolv-conf.txt b/doc/examples/cloud-config-resolv-conf.txt new file mode 100644 index 00000000..37ffc91a --- /dev/null +++ b/doc/examples/cloud-config-resolv-conf.txt @@ -0,0 +1,20 @@ +#cloud-config +# +# This is an example file to automatically configure resolv.conf when the +# instance boots for the first time. +# +# Ensure that your yaml is valid and pass this as user-data when starting +# the instance. Also be sure that your cloud.cfg file includes this +# configuration module in the appropirate section. +# +manage-resolv-conf: true + +resolv_conf: + nameservers: ['8.8.4.4', '8.8.8.8'] + searchdomains: + - foo.example.com + - bar.example.com + domain: example.com + options: + rotate: true + timeout: 1 -- cgit v1.2.3 From 3ed65a8030713a18e2fb541f14b77fd5c45b383e Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 25 Jan 2013 17:51:24 -0800 Subject: Integreate HACKING into the rtd site. --- HACKING | 49 +++++++++++++++++++++++++++++---------------- doc/rtd/conf.py | 2 +- doc/rtd/index.rst | 2 +- doc/rtd/topics/moreinfo.rst | 2 +- 4 files changed, 35 insertions(+), 20 deletions(-) diff --git a/HACKING b/HACKING index 433738da..66bf7c90 100644 --- a/HACKING +++ b/HACKING @@ -4,30 +4,45 @@ Hacking on cloud-init To get changes into cloud-init, the process to follow is: - * If you have not already, be sure to sign the CCA: - - `Canonical Contributor Agreement`_ +* If you have not already, be sure to sign the CCA: - * Get your changes into a local bzr branch. Initialize a repo, and checkout trunk (init repo is to share bzr info across multiple checkouts, its different than git): - - ``bzr init-repo cloud-init`` - - ``bzr branch lp:cloud-init trunk.dist`` - - ``bzr branch trunk.dist my-topic-branch`` + - `Canonical Contributor Agreement`_ - * Commit your changes (note, you can make multiple commits, fixes, more commits.): - - ``bzr commit`` +* Get your changes into a local bzr branch. + Initialize a repo, and checkout trunk (init repo is to share bzr info across multiple checkouts, its different than git): - * Check pylint and pep8 and test, and address any issues: - - ``make test pylint pep8`` + - ``bzr init-repo cloud-init`` + - ``bzr branch lp:cloud-init trunk.dist`` + - ``bzr branch trunk.dist my-topic-branch`` - * Push to launchpad to a personal branch: - - ``bzr push lp:~/cloud-init/`` +* Commit your changes (note, you can make multiple commits, fixes, more commits.): - * Propose that for a merge into lp:cloud-init via web browser. Open the branch in `Launchpad`_, it will be at https://code.launchpad.net/// (ie. https://code.launchpad.net/~smoser/cloud-init/mybranch): - - Click 'Propose for merging' - - Select 'lp:cloud-init' as the target branch + - ``bzr commit`` -Then, someone on cloud-init-dev (currently Scott Moser and Joshua Harlow) will review your changes and follow up in the merge request. +* Check pylint and pep8 and test, and address any issues: -Feel free to ping #cloud-init on freenode if you have any questions. + - ``make test pylint pep8`` + +* Push to launchpad to a personal branch: + + - ``bzr push lp:~/cloud-init/`` + +* Propose that for a merge into lp:cloud-init via web browser. + + - Open the branch in `Launchpad`_ + + - It will typically be at ``https://code.launchpad.net///`` + - ie. https://code.launchpad.net/~smoser/cloud-init/mybranch + +* Click 'Propose for merging' +* Select 'lp:cloud-init' as the target branch + +Then, someone on cloud-init-dev (currently `Scott Moser`_ and `Joshua Harlow`_) will +review your changes and follow up in the merge request. + +Feel free to ping and/or join #cloud-init on freenode (irc) if you have any questions. .. _Launchpad: https://launchpad.net .. _Canonical Contributor Agreement: http://www.canonical.com/contributors +.. _Scott Moser: https://launchpad.net/~smoser +.. _Joshua Harlow: https://launchpad.net/~harlowja diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 766f9e93..87fc40ab 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -42,7 +42,7 @@ master_doc = 'index' # |version| and |release|, also used in various other places throughout the # built documents. version = version.version_string() -# release = version +release = version # Set the default Pygments syntax highlight_language = 'python' diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst index f878dbd4..53b39a31 100644 --- a/doc/rtd/index.rst +++ b/doc/rtd/index.rst @@ -24,6 +24,6 @@ Summary topics/examples topics/modules topics/moreinfo - + topics/hacking .. _Cloud-init: https://launchpad.net/cloud-init diff --git a/doc/rtd/topics/moreinfo.rst b/doc/rtd/topics/moreinfo.rst index 2e436c3c..19e96af0 100644 --- a/doc/rtd/topics/moreinfo.rst +++ b/doc/rtd/topics/moreinfo.rst @@ -3,7 +3,7 @@ More information ========= Useful external references ----------------- +------------------------- - `The beauty of cloudinit`_ - `Introduction to cloud-init`_ (video) -- cgit v1.2.3 From cabd9653546586d2370d9c1d81f14e12dd28b94b Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 25 Jan 2013 17:53:33 -0800 Subject: Don't forget the hacking 'inclusion' file. --- doc/rtd/topics/hacking.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 doc/rtd/topics/hacking.rst diff --git a/doc/rtd/topics/hacking.rst b/doc/rtd/topics/hacking.rst new file mode 100644 index 00000000..350265eb --- /dev/null +++ b/doc/rtd/topics/hacking.rst @@ -0,0 +1 @@ +.. include:: ../../../HACKING -- cgit v1.2.3 From dc3ebfe2416028b78b6a846e939201d894b2c9b6 Mon Sep 17 00:00:00 2001 From: Craig Tracey Date: Sun, 27 Jan 2013 21:48:03 -0500 Subject: Adding package versioning logic to package_command This change adds the ability to provide specific package versions to Distro.install_packages and subsequently Distro.package_command. In order to effectively use Distro.install_packages, one is now able to pass a variety of formats in order to easily manage package requirements. These are examples of what can be passed: - "package" - ["package1","package2"] - ("package",) - ("package", "version") - [("package1",)("package2",)] - [("package1", "version1"),("package2","version2")] This change also adds the option to install a specific version for the puppet configuration module. This is especially important here as successful puppet deployments are highly reliant on specific puppet versions. --- cloudinit/config/cc_landscape.py | 2 +- cloudinit/config/cc_puppet.py | 10 ++++++++-- cloudinit/config/cc_salt_minion.py | 2 +- cloudinit/distros/debian.py | 17 +++++++++++++---- cloudinit/distros/rhel.py | 16 ++++++++++++---- cloudinit/util.py | 23 +++++++++++++++++++++++ 6 files changed, 58 insertions(+), 12 deletions(-) diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 02610dd0..2efdff79 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -62,7 +62,7 @@ def handle(_name, cfg, cloud, log, _args): if not ls_cloudcfg: return - cloud.distro.install_packages(["landscape-client"]) + cloud.distro.install_packages(('landscape-client',)) merge_data = [ LSC_BUILTIN_CFG, diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index e9a0a0f4..471a1a8a 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -59,8 +59,14 @@ def handle(name, cfg, cloud, log, _args): # Start by installing the puppet package if necessary... install = util.get_cfg_option_bool(puppet_cfg, 'install', True) - if install: - cloud.distro.install_packages(["puppet"]) + version = util.get_cfg_option_str(puppet_cfg, 'version', None) + if not install and version: + log.warn(("Puppet install set false but version supplied," + " doing nothing.")) + elif install: + log.debug(("Attempting to install puppet %s,"), + version if version else 'latest') + cloud.distro.install_packages(('puppet', version)) # ... and then update the puppet configuration if 'conf' in puppet_cfg: diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py index f3eede18..53013dcb 100644 --- a/cloudinit/config/cc_salt_minion.py +++ b/cloudinit/config/cc_salt_minion.py @@ -31,7 +31,7 @@ def handle(name, cfg, cloud, log, _args): salt_cfg = cfg['salt_minion'] # Start by installing the salt package ... - cloud.distro.install_packages(["salt-minion"]) + cloud.distro.install_packages(('salt-minion',)) # Ensure we can configure files at the right dir config_dir = salt_cfg.get("config_dir", '/etc/salt') diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 49b73477..1a8e927b 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -65,7 +65,7 @@ class Distro(distros.Distro): def install_packages(self, pkglist): self.update_package_sources() - self.package_command('install', pkglist) + self.package_command('install', pkgs=pkglist) def _write_network(self, settings): util.write_file(self.network_conf_fn, settings) @@ -142,15 +142,24 @@ class Distro(distros.Distro): # This ensures that the correct tz will be used for the system util.copy(tz_file, self.tz_local_fn) - def package_command(self, command, args=None): + def package_command(self, command, args=None, pkgs=[]): e = os.environ.copy() # See: http://tiny.cc/kg91fw # Or: http://tiny.cc/mh91fw e['DEBIAN_FRONTEND'] = 'noninteractive' cmd = ['apt-get', '--option', 'Dpkg::Options::=--force-confold', - '--assume-yes', '--quiet', command] - if args: + '--assume-yes', '--quiet'] + + if args and isinstance(args, str): + cmd.append(args) + elif args and isinstance(args, list): cmd.extend(args) + + cmd.append(command) + + pkglist = util.expand_package_list('%s=%s', pkgs) + cmd.extend(pkglist) + # Allow the output of this to flow outwards (ie not be captured) util.subp(cmd, env=e, capture=False) diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index e65be8d7..2f91e386 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -63,7 +63,7 @@ class Distro(distros.Distro): self.osfamily = 'redhat' def install_packages(self, pkglist): - self.package_command('install', pkglist) + self.package_command('install', pkgs=pkglist) def _adjust_resolve(self, dns_servers, search_servers): try: @@ -208,7 +208,7 @@ class Distro(distros.Distro): # This ensures that the correct tz will be used for the system util.copy(tz_file, self.tz_local_fn) - def package_command(self, command, args=None): + def package_command(self, command, args=None, pkgs=[]): cmd = ['yum'] # If enabled, then yum will be tolerant of errors on the command line # with regard to packages. @@ -219,9 +219,17 @@ class Distro(distros.Distro): # Determines whether or not yum prompts for confirmation # of critical actions. We don't want to prompt... cmd.append("-y") - cmd.append(command) - if args: + + if args and isinstance(args, str): + cmd.append(args) + elif args and isinstance(args, list): cmd.extend(args) + + cmd.append(command) + + pkglist = util.expand_package_list('%s-%s', pkgs) + cmd.extend(pkglist) + # Allow the output of this to flow outwards (ie not be captured) util.subp(cmd, capture=False) diff --git a/cloudinit/util.py b/cloudinit/util.py index c0ea8d91..c9c5f794 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1560,3 +1560,26 @@ def is_partition(device): device = device[5:] return os.path.isfile("/sys/class/block/%s/partition" % device) + + +def expand_package_list(version_fmt, pkgs): + # we will accept tuples, lists of tuples, or just plain lists + if not isinstance(pkgs, list): + pkgs = [pkgs] + + pkglist = [] + for pkg in pkgs: + if isinstance(pkg, str): + pkglist.append(pkg) + continue + + if len(pkg) < 1 or len(pkg) > 2: + raise RuntimeError("Invalid package_command tuple.") + + if len(pkg) == 2 and pkg[1]: + pkglist.append(version_fmt % pkg) + continue + + pkglist.append(pkg[0]) + + return pkglist -- cgit v1.2.3 From 7656c36ef194bac61286466f86187ff8affff26e Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 28 Jan 2013 09:55:29 -0500 Subject: add entries to ChangeLog --- ChangeLog | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/ChangeLog b/ChangeLog index f076a27f..8d750ac1 100644 --- a/ChangeLog +++ b/ChangeLog @@ -22,6 +22,16 @@ mounts. [Vlastimil Holer] - allow config-drive-data to come from a CD device by more correctly filtering out partitions. (LP: #1100545) + - setup docs to be available on read-the-docs + https://cloudinit.readthedocs.org/en/latest/ (LP: #1093039) + - add HACKING file for information on contributing + - handle the legacy 'user:' configuration better, making it affect the + configured OS default user (LP: #1100920) + - Adding a resolv.conf configuration module (LP: #1100434). Currently only + working on redhat systems (no support for resolvconf) + - support grouping linux distros into "os_families". This allows a module + to operate on the family (redhat or debian) rather than the distro (ubuntu, + debian, fedora, rhel) (LP: #1100029) 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) -- cgit v1.2.3 From b602d138686bd6653a67efd61d7c245347d14dcb Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 28 Jan 2013 11:32:45 -0500 Subject: config/cc_resolv_conf: run PER_INSTANCE rather than PER_ONCE Quick chat with ctracy indicated that this is just as well run PER_INSTANCE, and it is more consistent with other things that way. --- cloudinit/config/cc_resolv_conf.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index 17c74695..8a460f7e 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -47,11 +47,11 @@ # -from cloudinit.settings import PER_ONCE +from cloudinit.settings import PER_INSTANCE from cloudinit import templater from cloudinit import util -frequency = PER_ONCE +frequency = PER_INSTANCE distros = ['fedora', 'rhel'] -- cgit v1.2.3 From 62f0f1745677e3422b12a9aaa96ba3e5452db94d Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 28 Jan 2013 10:28:09 -0800 Subject: Include the resolv.conf example. --- doc/rtd/topics/examples.rst | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/doc/rtd/topics/examples.rst b/doc/rtd/topics/examples.rst index 9bbc33cc..36508bde 100644 --- a/doc/rtd/topics/examples.rst +++ b/doc/rtd/topics/examples.rst @@ -27,13 +27,25 @@ Adding a yum repository :language: yaml :linenos: -Configure an instance's trusted CA certificates +Configure an instances trusted CA certificates ------------------------------------------------------ .. literalinclude:: ../../examples/cloud-config-ca-certs.txt :language: yaml :linenos: +Configure an instances resolv.conf +------------------------------------------------------ + +*Note:* when using a config drive and a RHEL like system resolv.conf +will also be managed 'automatically' due to the available information +provided for dns servers in the config drive network format. For those +that wish to have different settings use this module. + +.. literalinclude:: ../../examples/cloud-config-resolv-conf.txt + :language: yaml + :linenos: + Install and run `chef`_ recipes ------------------------------------------------------ -- cgit v1.2.3 From 0612a35c4190a485a95ebade8f5c0598ae8b0e14 Mon Sep 17 00:00:00 2001 From: Blair Zajac Date: Mon, 28 Jan 2013 14:10:56 -0800 Subject: Support resizing btrfs filesystems. The existing code has two issues with btrfs: 1) The command to resize a btrfs filesystem uses a path to the mount point, not the underlying device: $ btrfs filesystem resize max /dev/vda1 ERROR: unable to resize '/dev/vda1' - Inappropriate ioctl for device Resize '/dev/vda1' of 'max' $ btrfs filesystem resize max / Resize '/' of 'max' 2) The code that is given a path and finds the ID of the device where the path is mounted doesn't work for btrfs: Use /proc/$$/mountinfo to find the device where path is mounted. This is done because with a btrfs filesystem using os.stat(path) does not return the ID of the device. Here, / has a device of 18 (decimal). $ stat / File: '/' Size: 234 Blocks: 0 IO Block: 4096 directory Device: 12h/18d Inode: 256 Links: 1 Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) Access: 2013-01-13 07:31:04.358011255 +0000 Modify: 2013-01-13 18:48:25.930011255 +0000 Change: 2013-01-13 18:48:25.930011255 +0000 Birth: - Find where / is mounted: $ mount | grep ' / ' /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo) And the device ID for /dev/vda1 is not 18: $ ls -l /dev/vda1 brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1 So use /proc/$$/mountinfo to find the device underlying the input path. --- cloudinit/config/cc_resizefs.py | 199 ++++++++++++++++++++++++++-------------- 1 file changed, 130 insertions(+), 69 deletions(-) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 70294eda..0bbbf81e 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -27,42 +27,108 @@ from cloudinit import util frequency = PER_ALWAYS +def _resize_btrfs(mount_point, devpth): + return ('btrfs', 'filesystem', 'resize', 'max', mount_point) + +def _resize_ext(mount_point, devpth): + return ('resize2fs', devpth) + +def _resize_xfs(mount_point, devpth): + return ('xfs_growfs', devpth) + +# Do not use a dictionary as these commands should be able to be used +# for multiple filesystem types if possible, e.g. one command for +# ext2, ext3 and ext4. RESIZE_FS_PREFIXES_CMDS = [ - ('ext', 'resize2fs'), - ('xfs', 'xfs_growfs'), + ('btrfs', _resize_btrfs), + ('ext', _resize_ext), + ('xfs', _resize_xfs), ] NOBLOCK = "noblock" -def nodeify_path(devpth, where, log): - try: - st_dev = os.stat(where).st_dev - dev = os.makedev(os.major(st_dev), os.minor(st_dev)) - os.mknod(devpth, 0400 | stat.S_IFBLK, dev) - return st_dev - except: - if util.is_container(): - log.debug("Inside container, ignoring mknod failure in resizefs") - return - log.warn("Failed to make device node to resize %s at %s", - where, devpth) - raise - +def get_mount_info(path, log): + # Use /proc/$$/mountinfo to find the device where path is mounted. + # This is done because with a btrfs filesystem using os.stat(path) + # does not return the ID of the device. + # + # Here, / has a device of 18 (decimal). + # + # $ stat / + # File: '/' + # Size: 234 Blocks: 0 IO Block: 4096 directory + # Device: 12h/18d Inode: 256 Links: 1 + # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) + # Access: 2013-01-13 07:31:04.358011255 +0000 + # Modify: 2013-01-13 18:48:25.930011255 +0000 + # Change: 2013-01-13 18:48:25.930011255 +0000 + # Birth: - + # + # Find where / is mounted: + # + # $ mount | grep ' / ' + # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo) + # + # And the device ID for /dev/vda1 is not 18: + # + # $ ls -l /dev/vda1 + # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1 + # + # So use /proc/$$/mountinfo to find the device underlying the + # input path. + path_elements = [e for e in path.split('/') if e] + devpth = None + fs_type = None + match_mount_point = None + match_mount_point_elements = None + mountinfo_path = '/proc/%s/mountinfo' % os.getpid() + for line in util.load_file(mountinfo_path).splitlines(): + parts = line.split() + + mount_point = parts[4] + mount_point_elements = [e for e in mount_point.split('/') if e] + + # Ignore mounts deeper than the path in question. + if len(mount_point_elements) > len(path_elements): + continue + + # Ignore mounts where the common path is not the same. + l = min(len(mount_point_elements), len(path_elements)) + if mount_point_elements[0:l] != path_elements[0:l]: + continue + + # Ignore mount points higher than an already seen mount + # point. + if (match_mount_point_elements is not None and + len(match_mount_point_elements) > len(mount_point_elements)): + continue + + # Find the '-' which terminates a list of optional columns to + # find the filesystem type and the path to the device. See + # man 5 proc for the format of this file. + try: + i = parts.index('-') + except ValueError: + log.debug("Did not find column named '-' in %s", + mountinfo_path) + return None -def get_fs_type(st_dev, path, log): - try: - dev_entries = util.find_devs_with(tag='TYPE', oformat='value', - no_cache=True, path=path) - if not dev_entries: + # Get the path to the device. + try: + fs_type = parts[i+1] + devpth = parts[i+2] + except IndexError: + log.debug("Too few columns in %s after '-' column", mountinfo_path) return None - return dev_entries[0].strip() - except util.ProcessExecutionError: - util.logexc(log, ("Failed to get filesystem type" - " of maj=%s, min=%s for path %s"), - os.major(st_dev), os.minor(st_dev), path) - raise + match_mount_point = mount_point + match_mount_point_elements = mount_point_elements + + if devpth and fs_type and match_mount_point: + return (devpth, fs_type, match_mount_point) + else: + return None def handle(name, cfg, _cloud, log, args): if len(args) != 0: @@ -80,52 +146,47 @@ def handle(name, cfg, _cloud, log, args): # TODO(harlowja): allow what is to be resized to be configurable?? resize_what = "/" - with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.", - dir=resize_root_d, delete=True) as tfh: - devpth = tfh.name - - # Delete the file so that mknod will work - # but don't change the file handle to know that its - # removed so that when a later call that recreates - # occurs this temporary file will still benefit from - # auto deletion - tfh.unlink_now() - - st_dev = nodeify_path(devpth, resize_what, log) - fs_type = get_fs_type(st_dev, devpth, log) - if not fs_type: - log.warn("Could not determine filesystem type of %s", resize_what) - return - - resizer = None - fstype_lc = fs_type.lower() - for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS: - if fstype_lc.startswith(pfix): - resizer = root_cmd - break - - if not resizer: - log.warn("Not resizing unknown filesystem type %s for %s", - fs_type, resize_what) - return - - log.debug("Resizing %s (%s) using %s", resize_what, fs_type, resizer) - resize_cmd = [resizer, devpth] - - if resize_root == NOBLOCK: - # Fork to a child that will run - # the resize command - util.fork_cb(do_resize, resize_cmd, log) - # Don't delete the file now in the parent - tfh.delete = False - else: - do_resize(resize_cmd, log) + result = get_mount_info(resize_what, log) + if not result: + log.warn("Could not determine filesystem type of %s", resize_what) + return + + (devpth, fs_type, mount_point) = result + + # Ensure the path is a block device. + if not stat.S_ISBLK(os.stat(devpth).st_mode): + log.debug("The %s device which was found for mount point %s for %s " + "is not a block device" % (devpth, mount_point, resize_what)) + return + + resizer = None + fstype_lc = fs_type.lower() + for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS: + if fstype_lc.startswith(pfix): + resizer = root_cmd + break + + if not resizer: + log.warn("Not resizing unknown filesystem type %s for %s", + fs_type, resize_what) + return + + resize_cmd = resizer(resize_what, devpth) + log.debug("Resizing %s (%s) using %s", resize_what, fs_type, + ' '.join(resize_cmd)) + + if resize_root == NOBLOCK: + # Fork to a child that will run + # the resize command + util.fork_cb(do_resize, resize_cmd, log) + else: + do_resize(resize_cmd, log) action = 'Resized' if resize_root == NOBLOCK: action = 'Resizing (via forking)' - log.debug("%s root filesystem (type=%s, maj=%i, min=%i, val=%s)", - action, fs_type, os.major(st_dev), os.minor(st_dev), resize_root) + log.debug("%s root filesystem (type=%s, val=%s)", action, fs_type, + resize_root) def do_resize(resize_cmd, log): -- cgit v1.2.3 From 9ced60371239eb961e9919f13bda8b496e077411 Mon Sep 17 00:00:00 2001 From: ctracey Date: Wed, 30 Jan 2013 19:21:37 -0500 Subject: Support package versions for the generic package config module Augmenting the package version support to be available when specifying extra packages to be installed at boot via the 'packages:' yaml key. This change also improves type checking and add a configuration example to the docs. --- cloudinit/util.py | 23 +++++++++++++---------- doc/examples/cloud-config-install-packages.txt | 4 ++++ 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index c9c5f794..ffe844b2 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -402,10 +402,9 @@ def get_cfg_option_list(yobj, key, default=None): return [] val = yobj[key] if isinstance(val, (list)): - # Should we ensure they are all strings?? - cval = [str(v) for v in val] + cval = [v for v in val] return cval - if not isinstance(val, (str, basestring)): + if not isinstance(val, (basestring)): val = str(val) return [val] @@ -1569,17 +1568,21 @@ def expand_package_list(version_fmt, pkgs): pkglist = [] for pkg in pkgs: - if isinstance(pkg, str): + if isinstance(pkg, basestring): pkglist.append(pkg) continue - if len(pkg) < 1 or len(pkg) > 2: - raise RuntimeError("Invalid package_command tuple.") + if isinstance(pkg, (tuple, list)): + if len(pkg) < 1 or len(pkg) > 2: + raise RuntimeError("Invalid package & version tuple.") - if len(pkg) == 2 and pkg[1]: - pkglist.append(version_fmt % pkg) - continue + if len(pkg) == 2 and pkg[1]: + pkglist.append(version_fmt % tuple(pkg)) + continue - pkglist.append(pkg[0]) + pkglist.append(pkg[0]) + + else: + raise RuntimeError("Invalid package type.") return pkglist diff --git a/doc/examples/cloud-config-install-packages.txt b/doc/examples/cloud-config-install-packages.txt index 4984818f..2edc63da 100644 --- a/doc/examples/cloud-config-install-packages.txt +++ b/doc/examples/cloud-config-install-packages.txt @@ -6,6 +6,10 @@ # # if packages are specified, this apt_update will be set to true # +# packages may be supplied as a single package name or as a list +# with the format [, ] wherein the specifc +# package version will be installed. packages: - pwgen - pastebinit + - [libpython2.7, 2.7.3-0ubuntu3.1] -- cgit v1.2.3 From 50222a4a387e5e013bc48df7a7a208698368a527 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 31 Jan 2013 08:14:54 -0500 Subject: doc: fix example in cloud-config-write-files.txt The write_files documentation incorrectly used 'perms' rather than 'permissions'. LP: #1111205 --- ChangeLog | 2 ++ doc/examples/cloud-config-write-files.txt | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index e1b08d30..3a688ad0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -34,6 +34,8 @@ debian, fedora, rhel) (LP: #1100029) - fix /etc/hosts writing when templates are used (LP: #1100036) - add package versioning logic to package installation functionality (LP: #1108047) + - fix documentation for write_files to correctly list 'permissions' + rather than 'perms' (LP: #1111205) 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) diff --git a/doc/examples/cloud-config-write-files.txt b/doc/examples/cloud-config-write-files.txt index 9c4e3998..ec98bc93 100644 --- a/doc/examples/cloud-config-write-files.txt +++ b/doc/examples/cloud-config-write-files.txt @@ -12,7 +12,7 @@ write_files: content: CiMgVGhpcyBmaWxlIGNvbnRyb2xzIHRoZSBzdGF0ZSBvZiBTRUxpbnV4... owner: root:root path: /etc/sysconfig/selinux - perms: '0644' + permissions: '0644' - content: | # My new /etc/sysconfig/samba file @@ -24,10 +24,10 @@ write_files: AAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAA .... path: /bin/arch - perms: '0555' + permissions: '0555' - encoding: gzip content: !!binary | H4sIAIDb/U8C/1NW1E/KzNMvzuBKTc7IV8hIzcnJVyjPL8pJ4QIA6N+MVxsAAAA= path: /usr/bin/hello - perms: '0755' + permissions: '0755' -- cgit v1.2.3 From c26f0e086959634ddf41c843c5c9dd73998b7c7e Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 31 Jan 2013 14:48:32 -0500 Subject: Changelog: reformat to limit to 80 chars wide --- ChangeLog | 71 +++++++++++++++++++++++++++++++++++---------------------------- 1 file changed, 39 insertions(+), 32 deletions(-) diff --git a/ChangeLog b/ChangeLog index 3a688ad0..4745a327 100644 --- a/ChangeLog +++ b/ChangeLog @@ -33,7 +33,8 @@ to operate on the family (redhat or debian) rather than the distro (ubuntu, debian, fedora, rhel) (LP: #1100029) - fix /etc/hosts writing when templates are used (LP: #1100036) - - add package versioning logic to package installation functionality (LP: #1108047) + - add package versioning logic to package installation + functionality (LP: #1108047) - fix documentation for write_files to correctly list 'permissions' rather than 'perms' (LP: #1111205) 0.7.1: @@ -72,12 +73,13 @@ - fix how string escaping was not working when the string was a unicode string which was causing the warning message not to be written out (LP: #1075756) - - for boto > 0.6.0 there was a lazy load of the metadata added, when cloud-init - runs the usage of this lazy loading is hidden and since that lazy loading - will be performed on future attribute access we must traverse the lazy loaded - dictionary and force it to full expand so that if cloud-init blocks the ec2 - metadata port the lazy loaded dictionary will continue working properly - instead of trying to make additional url calls which will fail (LP: #1068801) + - for boto > 0.6.0 there was a lazy load of the metadata added, when + cloud-init runs the usage of this lazy loading is hidden and since that lazy + loading will be performed on future attribute access we must traverse the + lazy loaded dictionary and force it to full expand so that if cloud-init + blocks the ec2 metadata port the lazy loaded dictionary will continue + working properly instead of trying to make additional url calls which will + fail (LP: #1068801) - use a set of helper/parsing classes to perform system configuration for easier test. (/etc/sysconfig, /etc/hostname, resolv.conf, /etc/hosts) - add power_state_change config module for shutting down stystem after @@ -92,7 +94,8 @@ - do not 'start networking' in cloud-init-nonet, but add cloud-init-container job that runs only if in container and emits net-device-added (LP: #1031065) - - search only top level dns for 'instance-data' in DataSourceEc2 (LP: #1040200) + - search only top level dns for 'instance-data' in + DataSourceEc2 (LP: #1040200) - add support for config-drive-v2 (LP:#1037567) - support creating users, including the default user. [Ben Howard] (LP: #1028503) @@ -182,8 +185,8 @@ detailed information on python 2.6 and 2.7 - forced all code loading, moving, chmod, writing files and other system level actions to go through standard set of util functions, this greatly - helps in debugging and determining exactly which system actions cloud-init is - performing + helps in debugging and determining exactly which system actions cloud-init + is performing - adjust url fetching and url trying to go through a single function that reads urls in the new 'url helper' file, this helps in tracing, debugging and knowing which urls are being called and/or posted to from with-in @@ -219,28 +222,30 @@ very simple set of ec2 meta-data back to callers - useful for testing or for understanding what the ec2 meta-data service can provide in terms of data or functionality - - for ssh key and authorized key file parsing add in classes and util functions - that maintain the state of individual lines, allowing for a clearer - separation of parsing and modification (useful for testing and tracing) + - for ssh key and authorized key file parsing add in classes and util + functions that maintain the state of individual lines, allowing for a + clearer separation of parsing and modification (useful for testing and + tracing) - add a set of 'base' init.d scripts that can be used on systems that do not have full upstart or systemd support (or support that does not match the standard fedora/ubuntu implementation) - currently these are being tested on RHEL 6.2 - separate the datasources into there own subdirectory (instead of being - a top-level item), this matches how config 'modules' and user-data 'handlers' - are also in there own subdirectory (thus helping new developers and others - understand the code layout in a quicker manner) + a top-level item), this matches how config 'modules' and user-data + 'handlers' are also in there own subdirectory (thus helping new developers + and others understand the code layout in a quicker manner) - add the building of rpms based off a new cli tool and template 'spec' file that will templatize and perform the necessary commands to create a source - and binary package to be used with a cloud-init install on a 'rpm' supporting - system + and binary package to be used with a cloud-init install on a 'rpm' + supporting system - uses the new standard set of requires and converts those pypi requirements into a local set of package requirments (that are known to exist on RHEL systems but should also exist on fedora systems) - - adjust the bdeb builder to be a python script (instead of a shell script) and - make its 'control' file a template that takes in the standard set of pypi - dependencies and uses a local mapping (known to work on ubuntu) to create the - packages set of dependencies (that should also work on ubuntu-like systems) + - adjust the bdeb builder to be a python script (instead of a shell script) + and make its 'control' file a template that takes in the standard set of + pypi dependencies and uses a local mapping (known to work on ubuntu) to + create the packages set of dependencies (that should also work on + ubuntu-like systems) - pythonify a large set of various pieces of code - remove wrapping return statements with () when it has no effect - upper case all constants used @@ -251,8 +256,8 @@ there own equality) - use context managers on locks, tempdir, chdir, file, selinux, umask, unmounting commands so that these actions do not have to be closed and/or - cleaned up manually in finally blocks, which is typically not done and will - eventually be a bug in the future + cleaned up manually in finally blocks, which is typically not done and + will eventually be a bug in the future - use the 'abc' module for abstract classes base where possible - applied in the datasource root class, the distro root class, and the user-data v2 root class @@ -282,17 +287,18 @@ config without sections better (and it also maintains comments instead of removing them) - use the new defaulting config parser (that will not raise errors on sections - that do not exist or return errors when values are fetched that do not exist) - in the 'puppet' module + that do not exist or return errors when values are fetched that do not + exist) in the 'puppet' module - for config 'modules' add in the ability for the module to provide a list of - distro names which it is known to work with, if when ran and the distro being - used name does not match one of those in this list, a warning will be written - out saying that this module may not work correctly on this distrobution + distro names which it is known to work with, if when ran and the distro + being used name does not match one of those in this list, a warning will be + written out saying that this module may not work correctly on this + distrobution - for all dynamically imported modules ensure that they are fixed up before they are used by ensuring that they have certain attributes, if they do not have those attributes they will be set to a sensible set of defaults instead - adjust all 'config' modules and handlers to use the adjusted util functions - and the new distro objects where applicable so that those pieces of code can + and the new distro objects where applicable so that those pieces of code can benefit from the unified and enhanced functionality being provided in that util module - fix a potential bug whereby when a #includeonce was encountered it would @@ -300,8 +306,8 @@ it would continue checking against that cache, instead of refetching (which would likely be the expected case) - add a openstack/nova based pep8 extension utility ('hacking.py') that allows - for custom checks (along with the standard pep8 checks) to occur when running - 'make pep8' and its derivatives + for custom checks (along with the standard pep8 checks) to occur when + running 'make pep8' and its derivatives 0.6.4: - support relative path in AuthorizedKeysFile (LP: #970071). - make apt-get update run with --quiet (suitable for logging) (LP: #1012613) @@ -489,3 +495,4 @@ - make the message on 'disable_root' more clear (LP: #672417) - do not require public key if private is given in ssh cloud-config (LP: #648905) +# vi: syntax=text textwidth=79 -- cgit v1.2.3 From 1bb72070b70edaa960b3158feba936fbc3687b1f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 31 Jan 2013 14:49:22 -0500 Subject: upstart/cloud-init-container.conf: ensure /run/network exists ifquery will exit failure if there is no /run/network directory. normally that would get created by one of network-interface.conf or networking.conf. But, it is possible that we're running before either of those have. --- ChangeLog | 1 + upstart/cloud-init-container.conf | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/ChangeLog b/ChangeLog index 4745a327..639d5560 100644 --- a/ChangeLog +++ b/ChangeLog @@ -37,6 +37,7 @@ functionality (LP: #1108047) - fix documentation for write_files to correctly list 'permissions' rather than 'perms' (LP: #1111205) + - cloud-init-container.conf: ensure /run/network before running ifquery 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) diff --git a/upstart/cloud-init-container.conf b/upstart/cloud-init-container.conf index 051c6e50..6bdbe77e 100644 --- a/upstart/cloud-init-container.conf +++ b/upstart/cloud-init-container.conf @@ -21,6 +21,12 @@ script # if the all static network interfaces are already up, nothing to do [ -f "$MARK_STATIC_NETWORK_EMITTED" ] && exit 0 + # ifquery will exit failure if there is no /run/network directory. + # normally that would get created by one of network-interface.conf + # or networking.conf. But, it is possible that we're running + # before either of those have. + mkdir -p /run/network + # get list of all 'auto' interfaces. if there are none, nothing to do. auto_list=$(ifquery --list --allow auto 2>/dev/null) || : [ -z "$auto_list" ] && exit 0 -- cgit v1.2.3 From f3b25e68ac6d28cdacf7408c91da6e9a215ad1e6 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 4 Feb 2013 22:23:42 -0500 Subject: make config of nocloud datasource able to specify meta-data and user-data LP: #1115833 --- cloudinit/sources/DataSourceNoCloud.py | 72 ++++++++++++++++++------------- doc/examples/cloud-config-datasources.txt | 11 +++++ 2 files changed, 52 insertions(+), 31 deletions(-) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index bed500a2..d8484437 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -77,37 +77,47 @@ class DataSourceNoCloud(sources.DataSource): found.append("ds_config") md["seedfrom"] = self.ds_cfg['seedfrom'] - fslist = util.find_devs_with("TYPE=vfat") - fslist.extend(util.find_devs_with("TYPE=iso9660")) - - label_list = util.find_devs_with("LABEL=cidata") - devlist = list(set(fslist) & set(label_list)) - devlist.sort(reverse=True) - - for dev in devlist: - try: - LOG.debug("Attempting to use data from %s", dev) - - (newmd, newud) = util.mount_cb(dev, util.read_seeded) - md = util.mergedict(newmd, md) - ud = newud - - # For seed from a device, the default mode is 'net'. - # that is more likely to be what is desired. - # If they want dsmode of local, then they must - # specify that. - if 'dsmode' not in md: - md['dsmode'] = "net" - - LOG.debug("Using data from %s", dev) - found.append(dev) - break - except OSError as e: - if e.errno != errno.ENOENT: - raise - except util.MountFailedError: - util.logexc(LOG, ("Failed to mount %s" - " when looking for data"), dev) + # if ds_cfg has 'user-data' and 'meta-data' + if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg: + if self.ds_cfg['user-data']: + ud = util.mergedict(md, self.ds_cfg['user-data']) + if self.ds_cfg['meta-data'] is not False: + md = util.mergedict(md, self.ds_cfg['meta-data']) + if 'ds_config' not in found: + found.append("ds_config") + + if self.ds_cfg.get('fs_label', "cidata"): + fslist = util.find_devs_with("TYPE=vfat") + fslist.extend(util.find_devs_with("TYPE=iso9660")) + + label = self.ds_cfg.get('fs_label') + label_list = util.find_devs_with("LABEL=%s" % label) + devlist = list(set(fslist) & set(label_list)) + devlist.sort(reverse=True) + + for dev in devlist: + try: + LOG.debug("Attempting to use data from %s", dev) + + (newmd, newud) = util.mount_cb(dev, util.read_seeded) + md = util.mergedict(newmd, md) + ud = newud + + # For seed from a device, the default mode is 'net'. + # that is more likely to be what is desired. If they want + # dsmode of local, then they must specify that. + if 'dsmode' not in md: + md['dsmode'] = "net" + + LOG.debug("Using data from %s", dev) + found.append(dev) + break + except OSError as e: + if e.errno != errno.ENOENT: + raise + except util.MountFailedError: + util.logexc(LOG, ("Failed to mount %s" + " when looking for data"), dev) # There was no indication on kernel cmdline or data # in the seeddir suggesting this handler should be used. diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index d10dde05..fc8c22d4 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -31,3 +31,14 @@ datasource: # /user-data and /meta-data # seedfrom: http://my.example.com/i-abcde seedfrom: None + + # fs_label: the label on filesystems to be searched for NoCloud source + fs_label: cidata + + # these are optional, but allow you to basically provide a datasource + # right here + user-data: | + # This is the user-data verbatum + meta-data: + instance-id: i-87018aed + local-hostname: myhost.internal -- cgit v1.2.3 From bedf5ae6d1e81209acff81fc688f98267f9b7cf2 Mon Sep 17 00:00:00 2001 From: harlowja Date: Mon, 4 Feb 2013 23:10:36 -0800 Subject: Add initial docs about datasources. Start moving the current README for datasources to a RST format and include those files in the rtd site. LP: #1113650 --- HACKING | 48 --------------- HACKING.rst | 48 +++++++++++++++ doc/rtd/index.rst | 1 + doc/rtd/topics/datasources.rst | 99 +++++++++++++++++++++++++++++ doc/rtd/topics/dir_layout.rst | 6 +- doc/rtd/topics/hacking.rst | 2 +- doc/sources/altcloud/README | 65 -------------------- doc/sources/altcloud/README.rst | 87 ++++++++++++++++++++++++++ doc/sources/configdrive/README | 118 ----------------------------------- doc/sources/configdrive/README.rst | 123 +++++++++++++++++++++++++++++++++++++ doc/sources/nocloud/README | 55 ----------------- doc/sources/nocloud/README.rst | 71 +++++++++++++++++++++ 12 files changed, 433 insertions(+), 290 deletions(-) delete mode 100644 HACKING create mode 100644 HACKING.rst create mode 100644 doc/rtd/topics/datasources.rst delete mode 100644 doc/sources/altcloud/README create mode 100644 doc/sources/altcloud/README.rst delete mode 100644 doc/sources/configdrive/README create mode 100644 doc/sources/configdrive/README.rst delete mode 100644 doc/sources/nocloud/README create mode 100644 doc/sources/nocloud/README.rst diff --git a/HACKING b/HACKING deleted file mode 100644 index 66bf7c90..00000000 --- a/HACKING +++ /dev/null @@ -1,48 +0,0 @@ -===================== -Hacking on cloud-init -===================== - -To get changes into cloud-init, the process to follow is: - -* If you have not already, be sure to sign the CCA: - - - `Canonical Contributor Agreement`_ - -* Get your changes into a local bzr branch. - Initialize a repo, and checkout trunk (init repo is to share bzr info across multiple checkouts, its different than git): - - - ``bzr init-repo cloud-init`` - - ``bzr branch lp:cloud-init trunk.dist`` - - ``bzr branch trunk.dist my-topic-branch`` - -* Commit your changes (note, you can make multiple commits, fixes, more commits.): - - - ``bzr commit`` - -* Check pylint and pep8 and test, and address any issues: - - - ``make test pylint pep8`` - -* Push to launchpad to a personal branch: - - - ``bzr push lp:~/cloud-init/`` - -* Propose that for a merge into lp:cloud-init via web browser. - - - Open the branch in `Launchpad`_ - - - It will typically be at ``https://code.launchpad.net///`` - - ie. https://code.launchpad.net/~smoser/cloud-init/mybranch - -* Click 'Propose for merging' -* Select 'lp:cloud-init' as the target branch - -Then, someone on cloud-init-dev (currently `Scott Moser`_ and `Joshua Harlow`_) will -review your changes and follow up in the merge request. - -Feel free to ping and/or join #cloud-init on freenode (irc) if you have any questions. - -.. _Launchpad: https://launchpad.net -.. _Canonical Contributor Agreement: http://www.canonical.com/contributors -.. _Scott Moser: https://launchpad.net/~smoser -.. _Joshua Harlow: https://launchpad.net/~harlowja diff --git a/HACKING.rst b/HACKING.rst new file mode 100644 index 00000000..66bf7c90 --- /dev/null +++ b/HACKING.rst @@ -0,0 +1,48 @@ +===================== +Hacking on cloud-init +===================== + +To get changes into cloud-init, the process to follow is: + +* If you have not already, be sure to sign the CCA: + + - `Canonical Contributor Agreement`_ + +* Get your changes into a local bzr branch. + Initialize a repo, and checkout trunk (init repo is to share bzr info across multiple checkouts, its different than git): + + - ``bzr init-repo cloud-init`` + - ``bzr branch lp:cloud-init trunk.dist`` + - ``bzr branch trunk.dist my-topic-branch`` + +* Commit your changes (note, you can make multiple commits, fixes, more commits.): + + - ``bzr commit`` + +* Check pylint and pep8 and test, and address any issues: + + - ``make test pylint pep8`` + +* Push to launchpad to a personal branch: + + - ``bzr push lp:~/cloud-init/`` + +* Propose that for a merge into lp:cloud-init via web browser. + + - Open the branch in `Launchpad`_ + + - It will typically be at ``https://code.launchpad.net///`` + - ie. https://code.launchpad.net/~smoser/cloud-init/mybranch + +* Click 'Propose for merging' +* Select 'lp:cloud-init' as the target branch + +Then, someone on cloud-init-dev (currently `Scott Moser`_ and `Joshua Harlow`_) will +review your changes and follow up in the merge request. + +Feel free to ping and/or join #cloud-init on freenode (irc) if you have any questions. + +.. _Launchpad: https://launchpad.net +.. _Canonical Contributor Agreement: http://www.canonical.com/contributors +.. _Scott Moser: https://launchpad.net/~smoser +.. _Joshua Harlow: https://launchpad.net/~harlowja diff --git a/doc/rtd/index.rst b/doc/rtd/index.rst index 53b39a31..619bb5dc 100644 --- a/doc/rtd/index.rst +++ b/doc/rtd/index.rst @@ -22,6 +22,7 @@ Summary topics/format topics/dir_layout topics/examples + topics/datasources topics/modules topics/moreinfo topics/hacking diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst new file mode 100644 index 00000000..c2354ace --- /dev/null +++ b/doc/rtd/topics/datasources.rst @@ -0,0 +1,99 @@ +.. _datasources: + +========= +Datasources +========= +---------- + What is a datasource? +---------- + +Datasources are sources of configuration data for cloud-init that typically come +from the user (aka userdata) or come from the stack that created the configuration +drive (aka metadata). Typical userdata would include files, yaml, and shell scripts +while typical metadata would include server name, instance id, display name and other +cloud specific details. Since there are multiple ways to provide this data (each cloud +solution seems to prefer its own way) internally a datasource abstract class was +created to allow for a single way to access the different cloud systems methods +to provide this data through the typical usage of subclasses. + +The current interface that a datasource object must provide is the following: + +.. sourcecode:: python + + def get_userdata(self, apply_filter=False) + + @property + def launch_index(self) + + @property + def is_disconnected(self) + + def get_userdata_raw(self) + + # the data sources' config_obj is a cloud-config formated + # object that came to it from ways other than cloud-config + # because cloud-config content would be handled elsewhere + def get_config_obj(self) + + def get_public_ssh_keys(self) + + def device_name_to_device(self, name) + + def get_locale(self) + + @property + def availability_zone(self) + + def get_instance_id(self) + + def get_hostname(self, fqdn=False) + + def get_package_mirror_info(self) + +--------------------------- +EC2 +--------------------------- + +TBD + +--------------------------- +Config Drive +--------------------------- + +.. include:: ../../sources/configdrive/README.rst + +--------------------------- +Alt cloud +--------------------------- + +.. include:: ../../sources/altcloud/README.rst + +--------------------------- +No cloud +--------------------------- + +.. include:: ../../sources/nocloud/README.rst + +--------------------------- +MAAS +--------------------------- + +TBD + +--------------------------- +CloudStack +--------------------------- + +TBD + +--------------------------- +OVF +--------------------------- + +See: https://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/files/head:/doc/sources/ovf/ + +--------------------------- +Fallback/None +--------------------------- + +TBD diff --git a/doc/rtd/topics/dir_layout.rst b/doc/rtd/topics/dir_layout.rst index f072c585..8815d33d 100644 --- a/doc/rtd/topics/dir_layout.rst +++ b/doc/rtd/topics/dir_layout.rst @@ -25,9 +25,9 @@ Cloudinits's directory structure is somewhat different from a regular applicatio - user-data.txt - user-data.txt.i - scripts/ - - per-boot/ - - per-instance/ - - per-once/ + - per-boot/ + - per-instance/ + - per-once/ - seed/ - sem/ diff --git a/doc/rtd/topics/hacking.rst b/doc/rtd/topics/hacking.rst index 350265eb..96ab88ef 100644 --- a/doc/rtd/topics/hacking.rst +++ b/doc/rtd/topics/hacking.rst @@ -1 +1 @@ -.. include:: ../../../HACKING +.. include:: ../../../HACKING.rst diff --git a/doc/sources/altcloud/README b/doc/sources/altcloud/README deleted file mode 100644 index 87d7949a..00000000 --- a/doc/sources/altcloud/README +++ /dev/null @@ -1,65 +0,0 @@ -Data souce AltCloud will be used to pick up user data on -RHEVm and vSphere. - -RHEVm: -====== -For REHVm v3.0 the userdata is injected into the VM using floppy -injection via the RHEVm dashboard "Custom Properties". The format -of the Custom Properties entry must be: -"floppyinject=user-data.txt:" - -e.g.: To pass a simple bash script - -% cat simple_script.bash -#!/bin/bash -echo "Hello Joe!" >> /tmp/JJV_Joe_out.txt - -% base64 < simple_script.bash -IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gSm9lISIgPj4gL3RtcC9KSlZfSm9lX291dC50eHQK - -To pass this example script to cloud-init running in a RHEVm v3.0 VM -set the "Custom Properties" when creating the RHEMv v3.0 VM to: -floppyinject=user-data.txt:IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gSm9lISIgPj4gL3RtcC9KSlZfSm9lX291dC50eHQK - -NOTE: The prefix with file name must be: "floppyinject=user-data.txt:" - -It is also possible to launch a RHEVm v3.0 VM and pass optional user -data to it using the Delta Cloud. -For more inforation on Delta Cloud see: http://deltacloud.apache.org - -vSphere: -======== -For VMWare's vSphere the userdata is injected into the VM an ISO -via the cdrom. This can be done using the vSphere dashboard -by connecting an ISO image to the CD/DVD drive. - -To pass this example script to cloud-init running in a vSphere VM -set the CD/DVD drive when creating the vSphere VM to point to an -ISO on the data store. - -The ISO must contain the user data: - -For example, to pass the same simple_script.bash to vSphere: - -Create the ISO: -=============== -% mkdir my-iso - -NOTE: The file name on the ISO must be: "user-data.txt" -% cp simple_scirpt.bash my-iso/user-data.txt - -% genisoimage -o user-data.iso -r my-iso - -Verify the ISO: -=============== -% sudo mkdir /media/vsphere_iso -% sudo mount -o loop JoeV_CI_02.iso /media/vsphere_iso -% cat /media/vsphere_iso/user-data.txt -% sudo umount /media/vsphere_iso - -Then, launch the vSphere VM the ISO user-data.iso attached as a CDrom. - -It is also possible to launch a vSphere VM and pass optional user -data to it using the Delta Cloud. - -For more inforation on Delta Cloud see: http://deltacloud.apache.org diff --git a/doc/sources/altcloud/README.rst b/doc/sources/altcloud/README.rst new file mode 100644 index 00000000..b5d72ebb --- /dev/null +++ b/doc/sources/altcloud/README.rst @@ -0,0 +1,87 @@ +The datasource altcloud will be used to pick up user data on `RHEVm`_ and `vSphere`_. + +RHEVm +~~~~~~ + +For `RHEVm`_ v3.0 the userdata is injected into the VM using floppy +injection via the `RHEVm`_ dashboard "Custom Properties". + +The format of the Custom Properties entry must be: + +:: + + floppyinject=user-data.txt: + +For example to pass a simple bash script: + +:: + + % cat simple_script.bash + #!/bin/bash + echo "Hello Joe!" >> /tmp/JJV_Joe_out.txt + + % base64 < simple_script.bash + IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gSm9lISIgPj4gL3RtcC9KSlZfSm9lX291dC50eHQK + +To pass this example script to cloud-init running in a `RHEVm`_ v3.0 VM +set the "Custom Properties" when creating the RHEMv v3.0 VM to: + +:: + + floppyinject=user-data.txt:IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gSm9lISIgPj4gL3RtcC9KSlZfSm9lX291dC50eHQK + +**NOTE:** The prefix with file name must be: ``floppyinject=user-data.txt:`` + +It is also possible to launch a `RHEVm`_ v3.0 VM and pass optional user +data to it using the Delta Cloud. + +For more information on Delta Cloud see: http://deltacloud.apache.org + +vSphere +~~~~~~~~ + +For VMWare's `vSphere`_ the userdata is injected into the VM as an ISO +via the cdrom. This can be done using the `vSphere`_ dashboard +by connecting an ISO image to the CD/DVD drive. + +To pass this example script to cloud-init running in a `vSphere`_ VM +set the CD/DVD drive when creating the vSphere VM to point to an +ISO on the data store. + +**Note:** The ISO must contain the user data. + +For example, to pass the same ``simple_script.bash`` to vSphere: + +Create the ISO +----------------- + +:: + + % mkdir my-iso + +NOTE: The file name on the ISO must be: ``user-data.txt`` + +:: + + % cp simple_scirpt.bash my-iso/user-data.txt + % genisoimage -o user-data.iso -r my-iso + +Verify the ISO +----------------- + +:: + + % sudo mkdir /media/vsphere_iso + % sudo mount -o loop JoeV_CI_02.iso /media/vsphere_iso + % cat /media/vsphere_iso/user-data.txt + % sudo umount /media/vsphere_iso + +Then, launch the `vSphere`_ VM the ISO user-data.iso attached as a CDROM. + +It is also possible to launch a `vSphere`_ VM and pass optional user +data to it using the Delta Cloud. + +For more information on Delta Cloud see: http://deltacloud.apache.org + +.. _RHEVm: https://www.redhat.com/virtualization/rhev/desktop/rhevm/ +.. _vSphere: https://www.vmware.com/products/datacenter-virtualization/vsphere/overview.html diff --git a/doc/sources/configdrive/README b/doc/sources/configdrive/README deleted file mode 100644 index ed9033c9..00000000 --- a/doc/sources/configdrive/README +++ /dev/null @@ -1,118 +0,0 @@ -The 'ConfigDrive' DataSource supports the OpenStack configdrive disk. -See doc/source/api_ext/ext_config_drive.rst in the nova source code for -more information on config drive. - -The following criteria are required to be identified by -DataSourceConfigDrive as a config drive: - * must be formated with vfat filesystem - * must be a un-partitioned block device (/dev/vdb, not /dev/vdb1) - * must contain one of the following files: - * etc/network/interfaces - * root/.ssh/authorized_keys - * meta.js - -By default, cloud-init does not consider this source to be a full-fledged -datasource. Instead, the default behavior is to assume it is really only -present to provide networking information. Cloud-init will copy off the -network information, apply it to the system, and then continue on. The -"full" datasource would then be found in the EC2 metadata service. - -== Content of config-drive == - * etc/network/interfaces - This file is laid down by nova in order to pass static networking - information to the guest. Cloud-init will copy it off of the config-drive - and into /etc/network/interfaces as soon as it can, and then attempt to - bring up all network interfaces. - - * root/.ssh/authorized_keys - This file is laid down by nova, and contains the keys that were - provided to it on instance creation (nova-boot --key ....) - - Cloud-init will copy those keys and put them into the configured user - ('ubuntu') .ssh/authorized_keys. - - * meta.js - meta.js is populated on the config-drive in response to the user passing - "meta flags" (nova boot --meta key=value ...). It is expected to be json - formated. - -== Configuration == -Cloud-init's behavior can be modified by keys found in the meta.js file in -the following ways: - * dsmode: - values: local, net, pass - default: pass - - This is what indicates if configdrive is a final data source or not. - By default it is 'pass', meaning this datasource should not be read. - Set it to 'local' or 'net' to stop cloud-init from continuing on to - search for other data sources after network config. - - The difference between 'local' and 'net' is that local will not require - networking to be up before user-data actions (or boothooks) are run. - - * instance-id: - default: iid-dsconfigdrive - This is utilized as the metadata's instance-id. It should generally - be unique, as it is what is used to determine "is this a new instance". - - * public-keys: - default: None - if present, these keys will be used as the public keys for the - instance. This value overrides the content in authorized_keys. - Note: it is likely preferable to provide keys via user-data - - * user-data: - default: None - This provides cloud-init user-data. See other documentation for what - all can be present here. - -== Example == -Here is an example using the nova client (python-novaclien) - -Assuming the following variables set up: - * img_id : set to the nova image id (uuid from image-list) - * flav_id : set to numeric flavor_id (nova flavor-list) - * keyname : set to name of key for this instance (nova keypair-list) - -$ cat my-user-data -#!/bin/sh -echo ==== USER_DATA FROM EC2 MD ==== | tee /ud.log - -$ ud_value=$(sed 's,EC2 MD,META KEY,') - -## Now, 'ud_value' has same content of my-user-data file, but -## with the string "USER_DATA FROM META KEY" - -## launch an instance with dsmode=pass -## This will really not use the configdrive for anything as the mode -## for the datasource is 'pass', meaning it will still expect some -## other data source (DataSourceEc2). - -$ nova boot --image=$img_id --config-drive=1 --flavor=$flav_id \ - --key_name=$keyname \ - --user_data=my-user-data \ - "--meta=instance-id=iid-001 \ - "--meta=user-data=${ud_keyval}" \ - "--meta=dsmode=pass" cfgdrive-dsmode-pass - -$ euca-get-console-output i-0000001 | grep USER_DATA -echo ==== USER_DATA FROM EC2 MD ==== | tee /ud.log - -## Now, launch an instance with dsmode=local -## This time, the only metadata and userdata available to cloud-init -## are on the config-drive -$ nova boot --image=$img_id --config-drive=1 --flavor=$flav_id \ - --key_name=$keyname \ - --user_data=my-user-data \ - "--meta=instance-id=iid-001 \ - "--meta=user-data=${ud_keyval}" \ - "--meta=dsmode=local" cfgdrive-dsmode-local - -$ euca-get-console-output i-0000002 | grep USER_DATA -echo ==== USER_DATA FROM META KEY ==== | tee /ud.log - --- -[1] https://github.com/openstack/nova/blob/master/doc/source/api_ext/ext_config_drive.rst for more if - - diff --git a/doc/sources/configdrive/README.rst b/doc/sources/configdrive/README.rst new file mode 100644 index 00000000..797872ad --- /dev/null +++ b/doc/sources/configdrive/README.rst @@ -0,0 +1,123 @@ +The configuration drive datasource supports the `OpenStack`_ configuration drive disk. + + See `the config drive extension`_ and `introduction`_ in the public + documentation for more information. + +By default, cloud-init does *always* consider this source to be a full-fledged +datasource. Instead, the typical behavior is to assume it is really only +present to provide networking information. Cloud-init will copy off the +network information, apply it to the system, and then continue on. The +"full" datasource could then be found in the EC2 metadata service. If this is +not the case then the files contained on the located drive must provide equivalents +to what the EC2 metadata service would provide (which is typical of the version +2 support listed below) + +Version 1 +~~~~~~~~~ + +The following criteria are required to as a config drive: + +1. Must be formatted with `vfat`_ filesystem +2. Must be a un-partitioned block device (/dev/vdb, not /dev/vdb1) +3. Must contain *one* of the following files + +:: + + /etc/network/interfaces + /root/.ssh/authorized_keys + /meta.js + +``/etc/network/interfaces`` + + This file is laid down by nova in order to pass static networking + information to the guest. Cloud-init will copy it off of the config-drive + and into /etc/network/interfaces (or convert it to RH format) as soon as it can, + and then attempt to bring up all network interfaces. + +``/root/.ssh/authorized_keys`` + + This file is laid down by nova, and contains the ssk keys that were + provided to nova on instance creation (nova-boot --key ....) + +``/meta.js`` + + meta.js is populated on the config-drive in response to the user passing + "meta flags" (nova boot --meta key=value ...). It is expected to be json + formatted. + +Version 2 +~~~~~~~~~~~ + +The following criteria are required to as a config drive: + +1. Must be formatted with `vfat`_ or `iso9660`_ filesystem + or have a *filesystem* label of **config-2** +2. Must be a un-partitioned block device (/dev/vdb, not /dev/vdb1) +3. The files that will typically be present in the config drive are: + +:: + + openstack/ + - 2012-08-10/ or latest/ + - meta_data.json + - user_data (not mandatory) + - content/ + - 0000 (referenced content files) + - 0001 + - .... + ec2 + - latest/ + - meta-data.json (not mandatory) + +Keys and values +~~~~~~~~~~~ + +Cloud-init's behavior can be modified by keys found in the meta.js (version 1 only) file in the following ways. + +:: + + dsmode: + values: local, net, pass + default: pass + + +This is what indicates if configdrive is a final data source or not. +By default it is 'pass', meaning this datasource should not be read. +Set it to 'local' or 'net' to stop cloud-init from continuing on to +search for other data sources after network config. + +The difference between 'local' and 'net' is that local will not require +networking to be up before user-data actions (or boothooks) are run. + +:: + + instance-id: + default: iid-dsconfigdrive + +This is utilized as the metadata's instance-id. It should generally +be unique, as it is what is used to determine "is this a new instance". + +:: + + public-keys: + default: None + +If present, these keys will be used as the public keys for the +instance. This value overrides the content in authorized_keys. + +Note: it is likely preferable to provide keys via user-data + +:: + + user-data: + default: None + +This provides cloud-init user-data. See :ref:`examples ` for +what all can be present here. + +.. _OpenStack: http://www.openstack.org/ +.. _introduction: http://docs.openstack.org/trunk/openstack-compute/admin/content/config-drive.html +.. _python-novaclient: https://github.com/openstack/python-novaclient +.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660 +.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table +.. _the config drive extension: http://docs.openstack.org/developer/nova/api_ext/ext_config_drive.html diff --git a/doc/sources/nocloud/README b/doc/sources/nocloud/README deleted file mode 100644 index c94b206a..00000000 --- a/doc/sources/nocloud/README +++ /dev/null @@ -1,55 +0,0 @@ -The data source 'NoCloud' and 'NoCloudNet' allow the user to provide user-data -and meta-data to the instance without running a network service (or even without -having a network at all) - -You can provide meta-data and user-data to a local vm boot via files on a vfat -or iso9660 filesystem. These user-data and meta-data files are expected to be -in the format described in doc/example/seed/README . Basically, user-data is -simply user-data and meta-data is a yaml formated file representing what you'd -find in the EC2 metadata service. - -Given a disk 12.04 cloud image in 'disk.img', you can create a sufficient disk -by following the example below. - -## create user-data and meta-data files that will be used -## to modify image on first boot -$ { echo instance-id: iid-local01; echo local-hostname: cloudimg; } > meta-data - -$ printf "#cloud-config\npassword: passw0rd\nchpasswd: { expire: False }\nssh_pwauth: True\n" > user-data - -## create a disk to attach with some user-data and meta-data -$ genisoimage -output seed.iso -volid cidata -joliet -rock user-data meta-data - -## alternatively, create a vfat filesystem with same files -## $ truncate --size 2M seed.img -## $ mkfs.vfat -n cidata seed.img -## $ mcopy -oi seed.img user-data meta-data :: - -## create a new qcow image to boot, backed by your original image -$ qemu-img create -f qcow2 -b disk.img boot-disk.img - -## boot the image and login as 'ubuntu' with password 'passw0rd' -## note, passw0rd was set as password through the user-data above, -## there is no password set on these images. -$ kvm -m 256 \ - -net nic -net user,hostfwd=tcp::2222-:22 \ - -drive file=boot-disk.img,if=virtio \ - -drive file=seed.iso,if=virtio - -Note, that the instance-id provided ('iid-local01' above) is what is used to -determine if this is "first boot". So if you are making updates to user-data -you will also have to change that, or start the disk fresh. - - -Also, you can inject an /etc/network/interfaces file by providing the content -for that file in the 'network-interfaces' field of metadata. Example metadata: - instance-id: iid-abcdefg - network-interfaces: | - iface eth0 inet static - address 192.168.1.10 - network 192.168.1.0 - netmask 255.255.255.0 - broadcast 192.168.1.255 - gateway 192.168.1.254 - hostname: myhost - diff --git a/doc/sources/nocloud/README.rst b/doc/sources/nocloud/README.rst new file mode 100644 index 00000000..aa3cf1a3 --- /dev/null +++ b/doc/sources/nocloud/README.rst @@ -0,0 +1,71 @@ +The data source ``NoCloud`` and ``NoCloudNet`` allow the user to provide user-data +and meta-data to the instance without running a network service (or even without +having a network at all). + +You can provide meta-data and user-data to a local vm boot via files on a `vfat`_ +or `iso9660`_ filesystem. + +These user-data and meta-data files are expected to be +in the following format. + +:: + + /user-data + /meta-data + +Basically, user-data is simply user-data and meta-data is a yaml formatted file +representing what you'd find in the EC2 metadata service. + +Given a disk ubuntu 12.04 cloud image in 'disk.img', you can create a sufficient disk +by following the example below. + +:: + + ## create user-data and meta-data files that will be used + ## to modify image on first boot + $ { echo instance-id: iid-local01; echo local-hostname: cloudimg; } > meta-data + + $ printf "#cloud-config\npassword: passw0rd\nchpasswd: { expire: False }\nssh_pwauth: True\n" > user-data + + ## create a disk to attach with some user-data and meta-data + $ genisoimage -output seed.iso -volid cidata -joliet -rock user-data meta-data + + ## alternatively, create a vfat filesystem with same files + ## $ truncate --size 2M seed.img + ## $ mkfs.vfat -n cidata seed.img + ## $ mcopy -oi seed.img user-data meta-data :: + + ## create a new qcow image to boot, backed by your original image + $ qemu-img create -f qcow2 -b disk.img boot-disk.img + + ## boot the image and login as 'ubuntu' with password 'passw0rd' + ## note, passw0rd was set as password through the user-data above, + ## there is no password set on these images. + $ kvm -m 256 \ + -net nic -net user,hostfwd=tcp::2222-:22 \ + -drive file=boot-disk.img,if=virtio \ + -drive file=seed.iso,if=virtio + +**Note:** that the instance-id provided (``iid-local01`` above) is what is used to +determine if this is "first boot". So if you are making updates to user-data +you will also have to change that, or start the disk fresh. + +Also, you can inject an ``/etc/network/interfaces`` file by providing the content +for that file in the ``network-interfaces`` field of metadata. + +Example metadata: + +:: + + instance-id: iid-abcdefg + network-interfaces: | + iface eth0 inet static + address 192.168.1.10 + network 192.168.1.0 + netmask 255.255.255.0 + broadcast 192.168.1.255 + gateway 192.168.1.254 + hostname: myhost + +.. _iso9660: https://en.wikipedia.org/wiki/ISO_9660 +.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table -- cgit v1.2.3 From abe6ca57cd74e68959e7839c7abf6073631f3b49 Mon Sep 17 00:00:00 2001 From: harlowja Date: Tue, 5 Feb 2013 23:58:49 -0800 Subject: Continue adding datasource docs. Add a base set for ec2 and datasource none. --- doc/rtd/topics/datasources.rst | 113 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 103 insertions(+), 10 deletions(-) diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index c2354ace..59c58805 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -20,32 +20,52 @@ The current interface that a datasource object must provide is the following: .. sourcecode:: python + # returns a mime multipart message that contains + # all the various fully-expanded components that + # were found from processing the raw userdata string + # - when filtering only the mime messages targeting + # this instance id will be returned (or messages with + # no instance id) def get_userdata(self, apply_filter=False) - @property - def launch_index(self) + # returns the raw userdata string (or none) + def get_userdata_raw(self) + # returns a integer (or none) which can be used to identify + # this instance in a group of instances which are typically + # created from a single command, thus allowing programatic + # filtering on this launch index (or other selective actions) @property - def is_disconnected(self) - - def get_userdata_raw(self) + def launch_index(self) # the data sources' config_obj is a cloud-config formated # object that came to it from ways other than cloud-config # because cloud-config content would be handled elsewhere def get_config_obj(self) + #returns a list of public ssh keys def get_public_ssh_keys(self) + # translates a device 'short' name into the actual physical device + # fully qualified name (or none if said physical device is not attached + # or does not exist) def device_name_to_device(self, name) + # gets the locale string this instance should be applying + # which typically used to adjust the instances locale settings files def get_locale(self) @property def availability_zone(self) + # gets the instance id that was assigned to this instance by the + # cloud provider or when said instance id does not exist in the backing + # metadata this will return 'iid-datasource' def get_instance_id(self) + # gets the fully qualified domain name that this host should be using + # when configuring network or hostname releated settings, typically + # assigned either by the cloud provider or the user creating the vm def get_hostname(self, fqdn=False) def get_package_mirror_info(self) @@ -54,7 +74,65 @@ The current interface that a datasource object must provide is the following: EC2 --------------------------- -TBD +The EC2 datasource is the oldest and most widely used datasource that cloud-init +supports. This datasource interacts with a *magic* ip that is provided to the +instance by the cloud provider. Typically this ip is ``169.254.169.254`` of which +at this ip a http server is provided to the instance so that the instance can make +calls to get instance userdata and instance metadata. + +Metadata is accessible via the following URL: + +:: + + GET http://169.254.169.254/2009-04-04/meta-data/ + ami-id + ami-launch-index + ami-manifest-path + block-device-mapping/ + hostname + instance-id + instance-type + local-hostname + local-ipv4 + placement/ + public-hostname + public-ipv4 + public-keys/ + reservation-id + security-groups + +Userdata is accessible via the following URL: + +:: + + GET http://169.254.169.254/2009-04-04/user-data + 1234,fred,reboot,true | 4512,jimbo, | 173,,, + +Note that there are multiple versions of this data provided, cloud-init +by default uses **2009-04-04** but newer versions can be supported with +relative ease (newer versions have more data exposed, while maintaining +backward compatibility with the previous versions). + +To see which versions are supported from your cloud provider use the following URL: + +:: + + GET http://169.254.169.254/ + 1.0 + 2007-01-19 + 2007-03-01 + 2007-08-29 + 2007-10-10 + 2007-12-15 + 2008-02-01 + 2008-09-01 + 2009-04-04 + ... + latest + +**Note:** internally in cloudinit the `boto`_ library used to fetch the instance +userdata and instance metadata, feel free to check that library out, it provides +many other useful EC2 functionality. --------------------------- Config Drive @@ -78,22 +156,37 @@ No cloud MAAS --------------------------- -TBD +*TODO* + +For now see: http://maas.ubuntu.com/ --------------------------- CloudStack --------------------------- -TBD +*TODO* --------------------------- OVF --------------------------- -See: https://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/files/head:/doc/sources/ovf/ +*TODO* + +For now see: https://bazaar.launchpad.net/~cloud-init-dev/cloud-init/trunk/files/head:/doc/sources/ovf/ --------------------------- Fallback/None --------------------------- -TBD +This is the fallback datasource when no other datasource can be selected. It is +the equivalent of a *empty* datasource in that it provides a empty string as userdata +and a empty dictionary as metadata. It is useful for testing as well as for when +you do not have a need to have an actual datasource to meet your instance +requirements (ie you just want to run modules that are not concerned with any +external data). It is typically put at the end of the datasource search list +so that if all other datasources are not matched, then this one will be so that +the user is not left with an inaccessible instance. + +**Note:** the instance id that this datasource provides is ``iid-datasource-none``. + +.. _boto: http://docs.pythonboto.org/en/latest/ -- cgit v1.2.3 From bf0db8d0793c9c18871cbbafbbad9c127b0bd8ee Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 7 Feb 2013 08:33:48 -0500 Subject: user-data doesn't merge in from meta-data (typo) --- cloudinit/sources/DataSourceNoCloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index d8484437..5ccd6b99 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -80,7 +80,7 @@ class DataSourceNoCloud(sources.DataSource): # if ds_cfg has 'user-data' and 'meta-data' if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg: if self.ds_cfg['user-data']: - ud = util.mergedict(md, self.ds_cfg['user-data']) + ud = self.ds_cfg['user-data'] if self.ds_cfg['meta-data'] is not False: md = util.mergedict(md, self.ds_cfg['meta-data']) if 'ds_config' not in found: -- cgit v1.2.3 From 88a369c5324e74a2d1bb8dd0bdf8fdc9a95393c8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 7 Feb 2013 09:12:36 -0500 Subject: add test_nocloud unit tests, fix one issue found --- cloudinit/sources/DataSourceNoCloud.py | 2 + tests/unittests/test_datasource/test_nocloud.py | 56 +++++++++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 tests/unittests/test_datasource/test_nocloud.py diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 5ccd6b99..097bbc52 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -205,6 +205,8 @@ def parse_cmdline_data(ds_id, fill, cmdline=None): # short2long mapping to save cmdline typing s2l = {"h": "local-hostname", "i": "instance-id", "s": "seedfrom"} for item in kvpairs: + if item == "": + continue try: (k, v) = item.split("=", 1) except: diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py new file mode 100644 index 00000000..850d3214 --- /dev/null +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -0,0 +1,56 @@ +from cloudinit.sources import DataSourceNoCloud + +from mocker import MockerTestCase + + +class TestNoCloudDataSource(MockerTestCase): + + def setUp(self): + super(TestNoCloudDataSource, self).setUp() + + def test_parse_cmdline_data_valid(self): + parse = DataSourceNoCloud.parse_cmdline_data + + ds_id = "ds=nocloud" + pairs = ( + ("root=/dev/sda1 %(ds_id)s", {}), + ("%(ds_id)s; root=/dev/foo", {}), + ("%(ds_id)s", {}), + ("%(ds_id)s;", {}), + ("%(ds_id)s;s=SEED", {'seedfrom': 'SEED'}), + ("%(ds_id)s;seedfrom=SEED;local-hostname=xhost", + {'seedfrom': 'SEED', 'local-hostname': 'xhost'}), + ("%(ds_id)s;h=xhost", + {'local-hostname': 'xhost'}), + ("%(ds_id)s;h=xhost;i=IID", + {'local-hostname': 'xhost', 'instance-id': 'IID'}), + ) + + for (fmt, expected) in pairs: + fill = {} + cmdline = fmt % {'ds_id': ds_id} + ret = parse(ds_id=ds_id, fill=fill, cmdline=cmdline) + self.assertEqual(expected, fill) + self.assertTrue(ret) + + def test_parse_cmdline_data_none(self): + parse = DataSourceNoCloud.parse_cmdline_data + + ds_id = "ds=foo" + cmdlines = ( + "root=/dev/sda1 ro", + "console=/dev/ttyS0 root=/dev/foo", + "", + "ds=foocloud", + "ds=foo-net", + "ds=nocloud;s=SEED", + ) + + for cmdline in cmdlines: + fill = {} + ret = parse(ds_id=ds_id, fill=fill, cmdline=cmdline) + self.assertEqual(fill, {}) + self.assertFalse(ret) + + +# vi: ts=4 expandtab -- cgit v1.2.3 From a0f882fdd306ef7b3b90c53d5622eb5a6878cb81 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 7 Feb 2013 12:08:30 -0500 Subject: more test cases for nocloud including one for config seed --- tests/unittests/helpers.py | 7 ++ tests/unittests/test_datasource/test_maas.py | 8 +- tests/unittests/test_datasource/test_nocloud.py | 113 ++++++++++++++++++++++-- 3 files changed, 115 insertions(+), 13 deletions(-) diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 4258a29d..91a50e18 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -182,3 +182,10 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): trap_func = retarget_many_wrapper(new_root, 1, func) setattr(mod, f, trap_func) self.patched_funcs.append((mod, f, func)) + +def populate_dir(path, files): + os.makedirs(path) + for (name, content) in files.iteritems(): + with open(os.path.join(path, name), "w") as fp: + fp.write(content) + fp.close() diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index 85e6add0..b56fea82 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -3,6 +3,7 @@ import os from cloudinit.sources import DataSourceMAAS from cloudinit import url_helper +from tests.unittests.helpers import populate_dir from mocker import MockerTestCase @@ -137,11 +138,4 @@ class TestMAASDataSource(MockerTestCase): pass -def populate_dir(seed_dir, files): - os.mkdir(seed_dir) - for (name, content) in files.iteritems(): - with open(os.path.join(seed_dir, name), "w") as fp: - fp.write(content) - fp.close() - # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 850d3214..28e0a472 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -1,16 +1,106 @@ +from cloudinit import helpers +from tests.unittests.helpers import populate_dir from cloudinit.sources import DataSourceNoCloud +from cloudinit import util from mocker import MockerTestCase +import os +import yaml class TestNoCloudDataSource(MockerTestCase): def setUp(self): + self.tmp = self.makeDir() + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + + self.cmdline = "root=TESTCMDLINE" + + self.unapply = [] + self.apply_patches([(util, 'get_cmdline', self._getcmdline)]) super(TestNoCloudDataSource, self).setUp() - def test_parse_cmdline_data_valid(self): - parse = DataSourceNoCloud.parse_cmdline_data + def tearDown(self): + apply_patches([i for i in reversed(self.unapply)]) + super(TestNoCloudDataSource, self).setUp() + + def apply_patches(self, patches): + ret = apply_patches(patches) + self.unapply += ret + + def _getcmdline(self): + return self.cmdline + + def test_nocloud_seed_dir(self): + md = {'instance-id': 'IID', 'dsmode': 'local'} + ud = "USER_DATA_HERE" + populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': ud, 'meta-data': yaml.safe_dump(md)}) + + sys_cfg = { + 'datasource': {'NoCloud': {'fs_label': None}} + } + + ds = DataSourceNoCloud.DataSourceNoCloud + + dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertEqual(dsrc.userdata_raw, ud) + self.assertEqual(dsrc.metadata, md) + self.assertTrue(ret) + + def test_fs_label(self): + #find_devs_with should not be called ff fs_label is None + ds = DataSourceNoCloud.DataSourceNoCloud + + class PsuedoException(Exception): + pass + + def my_find_devs_with(*args, **kwargs): + _f = (args, kwargs) + raise PsuedoException + + self.apply_patches([(util, 'find_devs_with', my_find_devs_with)]) + # by default, NoCloud should search for filesystems by label + sys_cfg = {'datasource': {'NoCloud': {}}} + dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + self.assertRaises(PsuedoException, dsrc.get_data) + + # but disabling searching should just end up with None found + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertFalse(ret) + + def test_no_datasource_expected(self): + #no source should be found if no cmdline, config, and fs_label=None + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} + + ds = DataSourceNoCloud.DataSourceNoCloud + dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + self.assertFalse(dsrc.get_data()) + + def test_seed_in_config(self): + ds = DataSourceNoCloud.DataSourceNoCloud + + data = { + 'fs_label': None, + 'meta-data': {'instance-id': 'IID'}, + 'user-data': "USER_DATA_RAW", + } + + sys_cfg = {'datasource': {'NoCloud': data}} + dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertEqual(dsrc.userdata_raw, "USER_DATA_RAW") + self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') + self.assertTrue(ret) + + +class TestParseCommandLineData(MockerTestCase): + + def test_parse_cmdline_data_valid(self): ds_id = "ds=nocloud" pairs = ( ("root=/dev/sda1 %(ds_id)s", {}), @@ -29,13 +119,12 @@ class TestNoCloudDataSource(MockerTestCase): for (fmt, expected) in pairs: fill = {} cmdline = fmt % {'ds_id': ds_id} - ret = parse(ds_id=ds_id, fill=fill, cmdline=cmdline) + ret = DataSourceNoCloud.parse_cmdline_data(ds_id=ds_id, fill=fill, + cmdline=cmdline) self.assertEqual(expected, fill) self.assertTrue(ret) def test_parse_cmdline_data_none(self): - parse = DataSourceNoCloud.parse_cmdline_data - ds_id = "ds=foo" cmdlines = ( "root=/dev/sda1 ro", @@ -48,9 +137,21 @@ class TestNoCloudDataSource(MockerTestCase): for cmdline in cmdlines: fill = {} - ret = parse(ds_id=ds_id, fill=fill, cmdline=cmdline) + ret = DataSourceNoCloud.parse_cmdline_data(ds_id=ds_id, fill=fill, + cmdline=cmdline) self.assertEqual(fill, {}) self.assertFalse(ret) +def apply_patches(patches): + ret = [] + for (ref, name, replace) in patches: + if replace is None: + continue + orig = getattr(ref, name) + setattr(ref, name, replace) + ret.append((ref, name, orig)) + return ret + + # vi: ts=4 expandtab -- cgit v1.2.3 From 48abaf6a72dca4485f0bc64af075febac6ef590d Mon Sep 17 00:00:00 2001 From: Vlastimil Holer Date: Wed, 20 Feb 2013 17:13:38 +0100 Subject: Added arguments to packages/bddeb: -d pass through '-d' to debuild --no-cloud-utils don't depend on cloud-utils package (default: False) These are essential for building on Debian 6, because there are no python-mocker (build dependency) and cloud-utils (install dependency) in squeeze and squeeze-backports. --- packages/bddeb | 17 +++++++++++++---- packages/debian/control.in | 3 +-- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/packages/bddeb b/packages/bddeb index bda3170d..61399739 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -36,10 +36,10 @@ PKG_MP = { 'prettytable': 'python-prettytable', 'pyyaml': 'python-yaml', } -DEBUILD_ARGS = ["-us", "-S", "-uc"] +DEBUILD_ARGS = ["-us", "-S", "-uc", "-d"] -def write_debian_folder(root, version, revno): +def write_debian_folder(root, version, revno, append_requires=[]): deb_dir = util.abs_join(root, 'debian') os.makedirs(deb_dir) @@ -58,7 +58,7 @@ def write_debian_folder(root, version, revno): pkgs = [p.lower().strip() for p in stdout.splitlines()] # Map to known packages - requires = [] + requires = append_requires for p in pkgs: tgt_pkg = PKG_MP.get(p) if not tgt_pkg: @@ -87,6 +87,11 @@ def main(): " (default: %(default)s)"), default=False, action='store_true') + parser.add_argument("--no-cloud-utils", dest="no_cloud_utils", + help=("don't depend on cloud-utils package" + " (default: %(default)s)"), + default=False, + action='store_true') for ent in DEBUILD_ARGS: parser.add_argument(ent, dest="debuild_args", action='append_const', @@ -128,7 +133,11 @@ def main(): shutil.move(extracted_name, xdir) print("Creating a debian/ folder in %r" % (xdir)) - write_debian_folder(xdir, version, revno) + if not args.no_cloud_utils: + append_requires=['cloud-utils'] + else: + append_requires=[] + write_debian_folder(xdir, version, revno, append_requires) # The naming here seems to follow some debian standard # so it will whine if it is changed... diff --git a/packages/debian/control.in b/packages/debian/control.in index edb5aff5..b9352f5b 100644 --- a/packages/debian/control.in +++ b/packages/debian/control.in @@ -18,8 +18,7 @@ Standards-Version: 3.9.3 Package: cloud-init Architecture: all -Depends: cloud-utils, - procps, +Depends: procps, python, #for $r in $requires ${r}, -- cgit v1.2.3 From 27ce4ea4f2bfe1e33454b8cec63276b044e29d12 Mon Sep 17 00:00:00 2001 From: Vlastimil Holer Date: Thu, 21 Feb 2013 14:23:53 +0100 Subject: Modify init-scripts to be able to run both on RHEL and Debian. --- setup.py | 2 +- sysvinit/cloud-config | 11 ++++------- sysvinit/cloud-final | 11 ++++------- sysvinit/cloud-init | 11 ++++------- sysvinit/cloud-init-local | 11 ++++------- 5 files changed, 17 insertions(+), 29 deletions(-) diff --git a/setup.py b/setup.py index 24476681..38c8cd93 100755 --- a/setup.py +++ b/setup.py @@ -42,7 +42,7 @@ INITSYS_FILES = { 'upstart': [f for f in glob('upstart/*') if is_f(f)], } INITSYS_ROOTS = { - 'sysvinit': '/etc/rc.d/init.d', + 'sysvinit': '/etc/init.d', 'systemd': '/etc/systemd/system/', 'upstart': '/etc/init/', } diff --git a/sysvinit/cloud-config b/sysvinit/cloud-config index e587446d..ad8ed831 100755 --- a/sysvinit/cloud-config +++ b/sysvinit/cloud-config @@ -29,15 +29,13 @@ # Should-Start: $time # Required-Stop: # Should-Stop: -# Default-Start: 3 5 -# Default-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 # Short-Description: The config cloud-init job # Description: Start cloud-init and runs the config phase # and any associated config modules as desired. ### END INIT INFO -. /etc/init.d/functions - # Return values acc. to LSB for all commands but status: # 0 - success # 1 - generic or unspecified error @@ -60,8 +58,9 @@ prog="cloud-init" cloud_init="/usr/bin/cloud-init" conf="/etc/cloud/cloud.cfg" -# If there exists a sysconfig variable override file use it... +# If there exist sysconfig/default variable override files use it... [ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init +[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init start() { [ -x $cloud_init ] || return 5 @@ -80,8 +79,6 @@ stop() { return $RETVAL } -. /etc/init.d/functions - case "$1" in start) start diff --git a/sysvinit/cloud-final b/sysvinit/cloud-final index 5deb8457..aeae8903 100755 --- a/sysvinit/cloud-final +++ b/sysvinit/cloud-final @@ -29,15 +29,13 @@ # Should-Start: $time # Required-Stop: # Should-Stop: -# Default-Start: 3 5 -# Default-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 # Short-Description: The final cloud-init job # Description: Start cloud-init and runs the final phase # and any associated final modules as desired. ### END INIT INFO -. /etc/init.d/functions - # Return values acc. to LSB for all commands but status: # 0 - success # 1 - generic or unspecified error @@ -60,8 +58,9 @@ prog="cloud-init" cloud_init="/usr/bin/cloud-init" conf="/etc/cloud/cloud.cfg" -# If there exists a sysconfig variable override file use it... +# If there exist sysconfig/default variable override files use it... [ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init +[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init start() { [ -x $cloud_init ] || return 5 @@ -80,8 +79,6 @@ stop() { return $RETVAL } -. /etc/init.d/functions - case "$1" in start) start diff --git a/sysvinit/cloud-init b/sysvinit/cloud-init index f8ab5d5f..c1c92ad0 100755 --- a/sysvinit/cloud-init +++ b/sysvinit/cloud-init @@ -29,15 +29,13 @@ # Should-Start: $time # Required-Stop: # Should-Stop: -# Default-Start: 3 5 -# Default-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 # Short-Description: The initial cloud-init job (net and fs contingent) # Description: Start cloud-init and runs the initialization phase # and any associated initial modules as desired. ### END INIT INFO -. /etc/init.d/functions - # Return values acc. to LSB for all commands but status: # 0 - success # 1 - generic or unspecified error @@ -60,8 +58,9 @@ prog="cloud-init" cloud_init="/usr/bin/cloud-init" conf="/etc/cloud/cloud.cfg" -# If there exists a sysconfig variable override file use it... +# If there exist sysconfig/default variable override files use it... [ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init +[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init start() { [ -x $cloud_init ] || return 5 @@ -80,8 +79,6 @@ stop() { return $RETVAL } -. /etc/init.d/functions - case "$1" in start) start diff --git a/sysvinit/cloud-init-local b/sysvinit/cloud-init-local index 0c63b9b0..b53e0db2 100755 --- a/sysvinit/cloud-init-local +++ b/sysvinit/cloud-init-local @@ -29,15 +29,13 @@ # Should-Start: $time # Required-Stop: # Should-Stop: -# Default-Start: 3 5 -# Default-Stop: +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 # Short-Description: The initial cloud-init job (local fs contingent) # Description: Start cloud-init and runs the initialization phases # and any associated initial modules as desired. ### END INIT INFO -. /etc/init.d/functions - # Return values acc. to LSB for all commands but status: # 0 - success # 1 - generic or unspecified error @@ -60,8 +58,9 @@ prog="cloud-init" cloud_init="/usr/bin/cloud-init" conf="/etc/cloud/cloud.cfg" -# If there exists a sysconfig variable override file use it... +# If there exist sysconfig/default variable override files use it... [ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init +[ -f /etc/default/cloud-init ] && . /etc/default/cloud-init start() { [ -x $cloud_init ] || return 5 @@ -80,8 +79,6 @@ stop() { return $RETVAL } -. /etc/init.d/functions - case "$1" in start) start -- cgit v1.2.3 From 3f61f2ee54ca1e550c733bb5d9b27f6250c1e51e Mon Sep 17 00:00:00 2001 From: Vlastimil Holer Date: Thu, 21 Feb 2013 15:07:54 +0100 Subject: New option INIT_SYSTEM=sysvinit_deb for Debian /etc/ directories --- setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 38c8cd93..b30cd53b 100755 --- a/setup.py +++ b/setup.py @@ -38,11 +38,13 @@ def is_f(p): INITSYS_FILES = { 'sysvinit': [f for f in glob('sysvinit/*') if is_f(f)], + 'sysvinit_deb': [f for f in glob('sysvinit/*') if is_f(f)], 'systemd': [f for f in glob('systemd/*') if is_f(f)], 'upstart': [f for f in glob('upstart/*') if is_f(f)], } INITSYS_ROOTS = { - 'sysvinit': '/etc/init.d', + 'sysvinit': '/etc/rc.d/init.d', + 'sysvinit_deb': '/etc/init.d', 'systemd': '/etc/systemd/system/', 'upstart': '/etc/init/', } -- cgit v1.2.3 From 10ea1c0bb933b21d32012d89b218a3bbbd15a75a Mon Sep 17 00:00:00 2001 From: Vlastimil Holer Date: Thu, 21 Feb 2013 16:29:06 +0100 Subject: Split Debian and Ubuntu APT sources --- cloudinit/config/cc_apt_configure.py | 9 ++++-- templates/sources.list.debian.tmpl | 28 +++++++++++++++++ templates/sources.list.tmpl | 60 ------------------------------------ templates/sources.list.ubuntu.tmpl | 60 ++++++++++++++++++++++++++++++++++++ 4 files changed, 94 insertions(+), 63 deletions(-) create mode 100644 templates/sources.list.debian.tmpl delete mode 100644 templates/sources.list.tmpl create mode 100644 templates/sources.list.ubuntu.tmpl diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index f8664160..3ce3b351 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -140,10 +140,13 @@ def get_release(): def generate_sources_list(codename, mirrors, cloud, log): - template_fn = cloud.get_template_filename('sources.list') + template_fn = cloud.get_template_filename('sources.list.%s' % + (cloud.distro.name)) if not template_fn: - log.warn("No template found, not rendering /etc/apt/sources.list") - return + template_fn = cloud.get_template_filename('sources.list') + if not template_fn: + log.warn("No template found, not rendering /etc/apt/sources.list") + return params = {'codename': codename} for k in mirrors: diff --git a/templates/sources.list.debian.tmpl b/templates/sources.list.debian.tmpl new file mode 100644 index 00000000..609bc6bd --- /dev/null +++ b/templates/sources.list.debian.tmpl @@ -0,0 +1,28 @@ +\## Note, this file is written by cloud-init on first boot of an instance +\## modifications made here will not survive a re-bundle. +\## if you wish to make changes you can: +\## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg +\## or do the same in user-data +\## b.) add sources in /etc/apt/sources.list.d +\## c.) make changes to template file /etc/cloud/templates/sources.list.debian.tmpl +\### + +# See http://www.debian.org/releases/stable/i386/release-notes/ch-upgrading.html +# for how to upgrade to newer versions of the distribution. +deb $mirror $codename main contrib non-free +deb-src $mirror $codename main contrib non-free + +\## Major bug fix updates produced after the final release of the +\## distribution. +deb $security $codename/updates main contrib non-free +deb-src $security $codename/updates main contrib non-free +deb $mirror $codename-updates main contrib non-free +deb-src $mirror $codename-updates main contrib non-free + +\## Uncomment the following two lines to add software from the 'backports' +\## repository. +\## N.B. software from this repository may not have been tested as +\## extensively as that contained in the main release, although it includes +\## newer versions of some applications which may provide useful features. +# deb http://backports.debian.org/debian-backports $codename-backports main contrib non-free +# deb-src http://backports.debian.org/debian-backports $codename-backports main contrib non-free diff --git a/templates/sources.list.tmpl b/templates/sources.list.tmpl deleted file mode 100644 index ce395b3d..00000000 --- a/templates/sources.list.tmpl +++ /dev/null @@ -1,60 +0,0 @@ -\## Note, this file is written by cloud-init on first boot of an instance -\## modifications made here will not survive a re-bundle. -\## if you wish to make changes you can: -\## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg -\## or do the same in user-data -\## b.) add sources in /etc/apt/sources.list.d -\## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl -\### - -# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to -# newer versions of the distribution. -deb $mirror $codename main -deb-src $mirror $codename main - -\## Major bug fix updates produced after the final release of the -\## distribution. -deb $mirror $codename-updates main -deb-src $mirror $codename-updates main - -\## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu -\## team. Also, please note that software in universe WILL NOT receive any -\## review or updates from the Ubuntu security team. -deb $mirror $codename universe -deb-src $mirror $codename universe -deb $mirror $codename-updates universe -deb-src $mirror $codename-updates universe - -\## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu -\## team, and may not be under a free licence. Please satisfy yourself as to -\## your rights to use the software. Also, please note that software in -\## multiverse WILL NOT receive any review or updates from the Ubuntu -\## security team. -# deb $mirror $codename multiverse -# deb-src $mirror $codename multiverse -# deb $mirror $codename-updates multiverse -# deb-src $mirror $codename-updates multiverse - -\## Uncomment the following two lines to add software from the 'backports' -\## repository. -\## N.B. software from this repository may not have been tested as -\## extensively as that contained in the main release, although it includes -\## newer versions of some applications which may provide useful features. -\## Also, please note that software in backports WILL NOT receive any review -\## or updates from the Ubuntu security team. -# deb $mirror $codename-backports main restricted universe multiverse -# deb-src $mirror $codename-backports main restricted universe multiverse - -\## Uncomment the following two lines to add software from Canonical's -\## 'partner' repository. -\## This software is not part of Ubuntu, but is offered by Canonical and the -\## respective vendors as a service to Ubuntu users. -# deb http://archive.canonical.com/ubuntu $codename partner -# deb-src http://archive.canonical.com/ubuntu $codename partner - -deb $security $codename-security main -deb-src $security $codename-security main -deb $security $codename-security universe -deb-src $security $codename-security universe -# deb $security $codename-security multiverse -# deb-src $security $codename-security multiverse diff --git a/templates/sources.list.ubuntu.tmpl b/templates/sources.list.ubuntu.tmpl new file mode 100644 index 00000000..ce395b3d --- /dev/null +++ b/templates/sources.list.ubuntu.tmpl @@ -0,0 +1,60 @@ +\## Note, this file is written by cloud-init on first boot of an instance +\## modifications made here will not survive a re-bundle. +\## if you wish to make changes you can: +\## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg +\## or do the same in user-data +\## b.) add sources in /etc/apt/sources.list.d +\## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl +\### + +# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to +# newer versions of the distribution. +deb $mirror $codename main +deb-src $mirror $codename main + +\## Major bug fix updates produced after the final release of the +\## distribution. +deb $mirror $codename-updates main +deb-src $mirror $codename-updates main + +\## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu +\## team. Also, please note that software in universe WILL NOT receive any +\## review or updates from the Ubuntu security team. +deb $mirror $codename universe +deb-src $mirror $codename universe +deb $mirror $codename-updates universe +deb-src $mirror $codename-updates universe + +\## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu +\## team, and may not be under a free licence. Please satisfy yourself as to +\## your rights to use the software. Also, please note that software in +\## multiverse WILL NOT receive any review or updates from the Ubuntu +\## security team. +# deb $mirror $codename multiverse +# deb-src $mirror $codename multiverse +# deb $mirror $codename-updates multiverse +# deb-src $mirror $codename-updates multiverse + +\## Uncomment the following two lines to add software from the 'backports' +\## repository. +\## N.B. software from this repository may not have been tested as +\## extensively as that contained in the main release, although it includes +\## newer versions of some applications which may provide useful features. +\## Also, please note that software in backports WILL NOT receive any review +\## or updates from the Ubuntu security team. +# deb $mirror $codename-backports main restricted universe multiverse +# deb-src $mirror $codename-backports main restricted universe multiverse + +\## Uncomment the following two lines to add software from Canonical's +\## 'partner' repository. +\## This software is not part of Ubuntu, but is offered by Canonical and the +\## respective vendors as a service to Ubuntu users. +# deb http://archive.canonical.com/ubuntu $codename partner +# deb-src http://archive.canonical.com/ubuntu $codename partner + +deb $security $codename-security main +deb-src $security $codename-security main +deb $security $codename-security universe +deb-src $security $codename-security universe +# deb $security $codename-security multiverse +# deb-src $security $codename-security multiverse -- cgit v1.2.3 From 6a72a8677ad2e4e66669d2be93880643b0525b51 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 27 Feb 2013 13:02:06 -0500 Subject: do not reload upstart configuration on upstart jobs For now, we disable reloading upstart jobs due to bug 1124384. At some point in the future, we could enable it again when that bug is fixed. The change here allows for a boothook in a multipart input to write the file '/run/cloud-init-upstart-reload' and then have configuration reloaded. --- cloudinit/handlers/upstart_job.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 4684f7f2..0aa7446e 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -65,6 +65,14 @@ class UpstartJobPartHandler(handlers.Handler): path = os.path.join(self.upstart_dir, filename) util.write_file(path, payload, 0644) - # if inotify support is not present in the root filesystem - # (overlayroot) then we need to tell upstart to re-read /etc - util.subp(["initctl", "reload-configuration"], capture=False) + # FIXME LATER (LP: #1124384) + # a bug in upstart means that invoking reload-configuration + # at this stage in boot causes havoc. So, until that is fixed + # we will not do that. However, I'd like to be able to easily + # test to see if this bug is still present in an image with + # a newer upstart. So, a boot hook could easiliy write this file. + if os.path.exists("/run/cloud-init-upstart-reload"): + # if inotify support is not present in the root filesystem + # (overlayroot) then we need to tell upstart to re-read /etc + + util.subp(["initctl", "reload-configuration"], capture=False) -- cgit v1.2.3 From e71071a9bea85235c708380473d8cf3912f7aa61 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 01:41:23 -0500 Subject: skip unit test due to LP: #1124384 --- tests/unittests/test_builtin_handlers.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py index 5f41cb3d..da52f15b 100644 --- a/tests/unittests/test_builtin_handlers.py +++ b/tests/unittests/test_builtin_handlers.py @@ -1,6 +1,7 @@ """Tests of the built-in user data handlers.""" import os +import unittest from mocker import MockerTestCase @@ -34,6 +35,7 @@ class TestBuiltins(MockerTestCase): None, None, None) self.assertEquals(0, len(os.listdir(up_root))) + @unittest.skip("until LP: #1124384 fixed") def test_upstart_frequency_single(self): # files should be written out when frequency is ! per-instance c_root = self.makeDir() -- cgit v1.2.3 From 368d2ba20a1ea7a97bf7186493b17be429a031d4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 01:43:06 -0500 Subject: initial stab at growpart module LP: #1136936 --- cloudinit/config/cc_growpart.py | 98 +++++++++++++++++++++++++++++++++++++++++ cloudinit/config/cc_resizefs.py | 85 +---------------------------------- cloudinit/util.py | 83 ++++++++++++++++++++++++++++++++++ config/cloud.cfg | 1 + 4 files changed, 183 insertions(+), 84 deletions(-) create mode 100644 cloudinit/config/cc_growpart.py diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py new file mode 100644 index 00000000..f958cd53 --- /dev/null +++ b/cloudinit/config/cc_growpart.py @@ -0,0 +1,98 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2011 Canonical Ltd. +# +# Author: Scott Moser +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import os.path +import stat + +from cloudinit.settings import PER_ALWAYS +from cloudinit import util + +frequency = PER_ALWAYS + + +def device_part_info(devpath, log): + # convert an entry in /dev/ to parent disk and partition number + + # input of /dev/vdb or /dev/disk/by-label/foo + # rpath is hopefully a real-ish path in /dev (vda, sdb..) + rpath = os.path.realpath(devpath) + + bname = os.path.basename(rpath) + syspath = "/sys/class/block/%s" % bname + + if not os.path.exists(syspath): + log.debug("%s had no syspath (%s)" % (devpath, syspath)) + return None + + ptpath = os.path.join(syspath, "partition") + if not os.path.exists(ptpath): + log.debug("%s not a partition" % devpath) + return None + + ptnum = util.load_file(ptpath).rstrip() + + # for a partition, real syspath is something like: + # /sys/devices/pci0000:00/0000:00:04.0/virtio1/block/vda/vda1 + rsyspath = os.path.realpath(syspath) + disksyspath = os.path.dirname(rsyspath) + + diskmajmin = util.load_file(os.path.join(disksyspath, "dev")).rstrip() + diskdevpath = os.path.realpath("/dev/block/%s" % diskmajmin) + + # diskdevpath has something like 253:0 + # and udev has put links in /dev/block/253:0 to the device name in /dev/ + return (diskdevpath, ptnum) + + +def handle(name, cfg, _cloud, log, args): + if len(args) != 0: + growroot = args[0] + else: + growroot = util.get_cfg_option_bool(cfg, "growroot", True) + + if not growroot: + log.debug("Skipping module named %s, growroot disabled", name) + return + + resize_what = "/" + result = util.get_mount_info(resize_what, log) + if not result: + log.warn("Could not determine filesystem type of %s" % resize_what) + return + + (devpth, _fs_type, mount_point) = result + + # Ensure the path is a block device. + if not stat.S_ISBLK(os.stat(devpth).st_mode): + log.debug("The %s device which was found for mount point %s for %s " + "is not a block device" % (devpth, mount_point, resize_what)) + return + + result = device_part_info(devpth, log) + if not result: + log.debug("%s did not look like a partition" % devpth) + + (disk, ptnum) = result + + try: + (out, _err) = util.subp(["growpart", disk, ptnum], rcs=[0, 1]) + except util.ProcessExecutionError as e: + log.warn("growpart failed: %s/%s" % (e.stdout, e.stderr)) + return + + log.debug("growpart said: %s" % out) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 44b27933..51dead2f 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -51,89 +51,6 @@ RESIZE_FS_PREFIXES_CMDS = [ NOBLOCK = "noblock" -def get_mount_info(path, log): - # Use /proc/$$/mountinfo to find the device where path is mounted. - # This is done because with a btrfs filesystem using os.stat(path) - # does not return the ID of the device. - # - # Here, / has a device of 18 (decimal). - # - # $ stat / - # File: '/' - # Size: 234 Blocks: 0 IO Block: 4096 directory - # Device: 12h/18d Inode: 256 Links: 1 - # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) - # Access: 2013-01-13 07:31:04.358011255 +0000 - # Modify: 2013-01-13 18:48:25.930011255 +0000 - # Change: 2013-01-13 18:48:25.930011255 +0000 - # Birth: - - # - # Find where / is mounted: - # - # $ mount | grep ' / ' - # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo) - # - # And the device ID for /dev/vda1 is not 18: - # - # $ ls -l /dev/vda1 - # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1 - # - # So use /proc/$$/mountinfo to find the device underlying the - # input path. - path_elements = [e for e in path.split('/') if e] - devpth = None - fs_type = None - match_mount_point = None - match_mount_point_elements = None - mountinfo_path = '/proc/%s/mountinfo' % os.getpid() - for line in util.load_file(mountinfo_path).splitlines(): - parts = line.split() - - mount_point = parts[4] - mount_point_elements = [e for e in mount_point.split('/') if e] - - # Ignore mounts deeper than the path in question. - if len(mount_point_elements) > len(path_elements): - continue - - # Ignore mounts where the common path is not the same. - l = min(len(mount_point_elements), len(path_elements)) - if mount_point_elements[0:l] != path_elements[0:l]: - continue - - # Ignore mount points higher than an already seen mount - # point. - if (match_mount_point_elements is not None and - len(match_mount_point_elements) > len(mount_point_elements)): - continue - - # Find the '-' which terminates a list of optional columns to - # find the filesystem type and the path to the device. See - # man 5 proc for the format of this file. - try: - i = parts.index('-') - except ValueError: - log.debug("Did not find column named '-' in %s", - mountinfo_path) - return None - - # Get the path to the device. - try: - fs_type = parts[i + 1] - devpth = parts[i + 2] - except IndexError: - log.debug("Too few columns in %s after '-' column", mountinfo_path) - return None - - match_mount_point = mount_point - match_mount_point_elements = mount_point_elements - - if devpth and fs_type and match_mount_point: - return (devpth, fs_type, match_mount_point) - else: - return None - - def handle(name, cfg, _cloud, log, args): if len(args) != 0: resize_root = args[0] @@ -150,7 +67,7 @@ def handle(name, cfg, _cloud, log, args): # TODO(harlowja): allow what is to be resized to be configurable?? resize_what = "/" - result = get_mount_info(resize_what, log) + result = util.get_mount_info(resize_what, log) if not result: log.warn("Could not determine filesystem type of %s", resize_what) return diff --git a/cloudinit/util.py b/cloudinit/util.py index ffe844b2..1e9ca5d9 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1586,3 +1586,86 @@ def expand_package_list(version_fmt, pkgs): raise RuntimeError("Invalid package type.") return pkglist + + +def get_mount_info(path, log): + # Use /proc/$$/mountinfo to find the device where path is mounted. + # This is done because with a btrfs filesystem using os.stat(path) + # does not return the ID of the device. + # + # Here, / has a device of 18 (decimal). + # + # $ stat / + # File: '/' + # Size: 234 Blocks: 0 IO Block: 4096 directory + # Device: 12h/18d Inode: 256 Links: 1 + # Access: (0755/drwxr-xr-x) Uid: ( 0/ root) Gid: ( 0/ root) + # Access: 2013-01-13 07:31:04.358011255 +0000 + # Modify: 2013-01-13 18:48:25.930011255 +0000 + # Change: 2013-01-13 18:48:25.930011255 +0000 + # Birth: - + # + # Find where / is mounted: + # + # $ mount | grep ' / ' + # /dev/vda1 on / type btrfs (rw,subvol=@,compress=lzo) + # + # And the device ID for /dev/vda1 is not 18: + # + # $ ls -l /dev/vda1 + # brw-rw---- 1 root disk 253, 1 Jan 13 08:29 /dev/vda1 + # + # So use /proc/$$/mountinfo to find the device underlying the + # input path. + path_elements = [e for e in path.split('/') if e] + devpth = None + fs_type = None + match_mount_point = None + match_mount_point_elements = None + mountinfo_path = '/proc/%s/mountinfo' % os.getpid() + for line in load_file(mountinfo_path).splitlines(): + parts = line.split() + + mount_point = parts[4] + mount_point_elements = [e for e in mount_point.split('/') if e] + + # Ignore mounts deeper than the path in question. + if len(mount_point_elements) > len(path_elements): + continue + + # Ignore mounts where the common path is not the same. + l = min(len(mount_point_elements), len(path_elements)) + if mount_point_elements[0:l] != path_elements[0:l]: + continue + + # Ignore mount points higher than an already seen mount + # point. + if (match_mount_point_elements is not None and + len(match_mount_point_elements) > len(mount_point_elements)): + continue + + # Find the '-' which terminates a list of optional columns to + # find the filesystem type and the path to the device. See + # man 5 proc for the format of this file. + try: + i = parts.index('-') + except ValueError: + log.debug("Did not find column named '-' in %s", + mountinfo_path) + return None + + # Get the path to the device. + try: + fs_type = parts[i + 1] + devpth = parts[i + 2] + except IndexError: + log.debug("Too few columns in %s after '-' column", mountinfo_path) + return None + + match_mount_point = mount_point + match_mount_point_elements = mount_point_elements + + if devpth and fs_type and match_mount_point: + return (devpth, fs_type, match_mount_point) + else: + return None diff --git a/config/cloud.cfg b/config/cloud.cfg index a8c74486..b61b8a7d 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -26,6 +26,7 @@ cloud_init_modules: - migrator - bootcmd - write-files + - growpart - resizefs - set_hostname - update_hostname -- cgit v1.2.3 From 86fe289ceb9b292ea91dbca056e0159e74091e47 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 14:19:54 -0500 Subject: add some unit tests, fix an issue or two * drop the parsing of options into csv, as we were only exploding them back. That can only result in error. Just do minimal parsing. * change the parsing of key lines to: if entry is valid: * use it else try taking off options: if good, use it else fail --- cloudinit/ssh_util.py | 97 +++++++++++++++++++---------------------- tests/unittests/test_sshutil.py | 94 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 140 insertions(+), 51 deletions(-) create mode 100644 tests/unittests/test_sshutil.py diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index dd6b742f..863a63e7 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -107,62 +107,57 @@ class AuthKeyLineParser(object): i = i + 1 options = ent[0:i] - options_lst = [] - - # Now use a csv parser to pull the options - # out of the above string that we just found an endpoint for. - # - # No quoting so we don't mess up any of the quoting that - # is already there. - reader = csv.reader(StringIO(options), quoting=csv.QUOTE_NONE) - for row in reader: - for e in row: - # Only keep non-empty csv options - e = e.strip() - if e: - options_lst.append(e) - - # Now take the rest of the items before the string - # as long as there is room to do this... - toks = [] - if i + 1 < len(ent): - rest = ent[i + 1:] - toks = rest.split(None, 2) - return (options_lst, toks) - - def _form_components(self, src_line, toks, options=None): - components = {} - if len(toks) == 1: - components['base64'] = toks[0] - elif len(toks) == 2: - components['base64'] = toks[0] - components['comment'] = toks[1] - elif len(toks) == 3: - components['keytype'] = toks[0] - components['base64'] = toks[1] - components['comment'] = toks[2] - components['options'] = options - if not components: - return AuthKeyLine(src_line) - else: - return AuthKeyLine(src_line, **components) + + # Return the rest of the string in 'remain' + remain = ent[i:].lstrip() + return (options, remain) def parse(self, src_line, def_opt=None): + # modeled after opensshes auth2-pubkey.c:user_key_allowed2 line = src_line.rstrip("\r\n") if line.startswith("#") or line.strip() == '': return AuthKeyLine(src_line) - else: - ent = line.strip() - toks = ent.split(None, 3) - if len(toks) < 4: - return self._form_components(src_line, toks, def_opt) - else: - (options, toks) = self._extract_options(ent) - if options: - options = ",".join(options) - else: - options = def_opt - return self._form_components(src_line, toks, options) + + def parse_ssh_key(ent): + # return ketype, key, [comment] + toks = ent.split(None, 2) + if len(toks) < 2: + raise TypeError("To few fields: %s" % len(toks)) + if not _is_valid_ssh_keytype(toks[0]): + raise TypeError("Invalid keytype %s" % toks[0]) + + # valid key type and 2 or 3 fields: + if len(toks) == 2: + # no comment in line + toks.append("") + + return toks + + ent = line.strip() + options = None + try: + (keytype, base64, comment) = parse_ssh_key(ent) + options = def_opt + except TypeError as e: + (options, remain) = self._extract_options(ent) + try: + (keytype, base64, comment) = parse_ssh_key(remain) + except TypeError as e: + return AuthKeyLine(src_line) + + return AuthKeyLine(src_line, keytype=keytype, base64=base64, + comment=comment, options=options) + + +def _is_valid_ssh_keytype(key): + valid = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", + "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com", + "ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com", + "ecdsa-sha2-nistp256-cert-v01@openssh.com", + "ecdsa-sha2-nistp384-cert-v01@openssh.com", + "ecdsa-sha2-nistp521-cert-v01@openssh.com") + + return key in valid def parse_authorized_keys(fname): diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py new file mode 100644 index 00000000..4564d9be --- /dev/null +++ b/tests/unittests/test_sshutil.py @@ -0,0 +1,94 @@ +from unittest import TestCase +from cloudinit import ssh_util + + +VALID_CONTENT = { + 'dsa': ( + "AAAAB3NzaC1kc3MAAACBAIrjOQSlSea19bExXBMBKBvcLhBoVvNBjCppNzllipF" + "W4jgIOMcNanULRrZGjkOKat6MWJNetSbV1E6IOFDQ16rQgsh/OvYU9XhzM8seLa" + "A21VszZuhIV7/2DE3vxu7B54zVzueG1O1Deq6goQCRGWBUnqO2yluJiG4HzrnDa" + "jzRAAAAFQDMPO96qXd4F5A+5b2f2MO7SpVomQAAAIBpC3K2zIbDLqBBs1fn7rsv" + "KcJvwihdlVjG7UXsDB76P2GNqVG+IlYPpJZ8TO/B/fzTMtrdXp9pSm9OY1+BgN4" + "REsZ2WNcvfgY33aWaEM+ieCcQigvxrNAF2FTVcbUIIxAn6SmHuQSWrLSfdHc8H7" + "hsrgeUPPdzjBD/cv2ZmqwZ1AAAAIAplIsScrJut5wJMgyK1JG0Kbw9JYQpLe95P" + "obB069g8+mYR8U0fysmTEdR44mMu0VNU5E5OhTYoTGfXrVrkR134LqFM2zpVVbE" + "JNDnIqDHxTkc6LY2vu8Y2pQ3/bVnllZZOda2oD5HQ7ovygQa6CH+fbaZHbdDUX/" + "5z7u2rVAlDw==" + ), + 'ecdsa': ( + "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBITrGBB3cgJ" + "J7fPxvtMW9H3oRisNpJ3OAslxZeyP7I0A9BPAW0RQIwHVtVnM7zrp4nI+JLZov/" + "Ql7lc2leWL7CY=" + ), + 'rsa': ( + "AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5oz" + "emNSj8T7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbD" + "c1pvxzxtchBj78hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q" + "7NDwfIrJJtO7Hi42GyXtvEONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhT" + "YWpMfYdPUnE7u536WqzFmsaqJctz3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07" + "/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SCmXp5Kt5/82cD/VN3NtHw==" + ), +} + +TEST_OPTIONS = ("no-port-forwarding,no-agent-forwarding,no-X11-forwarding," + 'command="echo \'Please login as the user \"ubuntu\" rather than the' + 'user \"root\".\';echo;sleep 10"') + +class TestAuthKeyLineParser(TestCase): + def test_simple_parse(self): + # test key line with common 3 fields (keytype, base64, comment) + parser = ssh_util.AuthKeyLineParser() + for ktype in ['rsa', 'ecdsa', 'dsa']: + content = VALID_CONTENT[ktype] + comment = 'user-%s@host' % ktype + line = ' '.join((ktype, content, comment,)) + key = parser.parse(line) + + self.assertEqual(key.base64, content) + self.assertFalse(key.options) + self.assertEqual(key.comment, comment) + self.assertEqual(key.keytype, ktype) + + def test_parse_no_comment(self): + # test key line with key type and base64 only + parser = ssh_util.AuthKeyLineParser() + for ktype in ['rsa', 'ecdsa', 'dsa']: + content = VALID_CONTENT[ktype] + line = ' '.join((ktype, content,)) + key = parser.parse(line) + + self.assertEqual(key.base64, content) + self.assertFalse(key.options) + self.assertFalse(key.comment) + self.assertEqual(key.keytype, ktype) + + def test_parse_with_options(self): + # test key line with options in it + parser = ssh_util.AuthKeyLineParser() + options = TEST_OPTIONS + for ktype in ['rsa', 'ecdsa', 'dsa']: + content = VALID_CONTENT[ktype] + comment = 'user-%s@host' % ktype + line = ' '.join((options, ktype, content, comment,)) + key = parser.parse(line) + + self.assertEqual(key.base64, content) + self.assertEqual(key.options, options) + self.assertEqual(key.comment, comment) + self.assertEqual(key.keytype, ktype) + + def test_parse_with_defopt(self): + # test key line with key type and base64 only + parser = ssh_util.AuthKeyLineParser() + for ktype in ['rsa', 'ecdsa', 'dsa']: + content = VALID_CONTENT[ktype] + line = ' '.join((ktype, content,)) + myopts = "no-port-forwarding,no-agent-forwarding" + key = parser.parse(line, myopts) + + self.assertEqual(key.base64, content) + self.assertEqual(key.options, myopts) + self.assertFalse(key.comment) + self.assertEqual(key.keytype, ktype) + +# vi: ts=4 expandtab -- cgit v1.2.3 From ceec6724143e950d6ceb9ea0758dbfd1ad33921a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 14:22:00 -0500 Subject: move function to a static list, comment where it came from --- cloudinit/ssh_util.py | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 863a63e7..082c5bbd 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -33,6 +33,14 @@ LOG = logging.getLogger(__name__) # See: man sshd_config DEF_SSHD_CFG = "/etc/ssh/sshd_config" +# taken from openssh source key.c/key_type_from_name +VALID_KEY_TYPES = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", + "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com", + "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com", + "ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com", + "ecdsa-sha2-nistp256-cert-v01@openssh.com", + "ecdsa-sha2-nistp384-cert-v01@openssh.com", + "ecdsa-sha2-nistp521-cert-v01@openssh.com") class AuthKeyLine(object): def __init__(self, source, keytype=None, base64=None, @@ -123,7 +131,7 @@ class AuthKeyLineParser(object): toks = ent.split(None, 2) if len(toks) < 2: raise TypeError("To few fields: %s" % len(toks)) - if not _is_valid_ssh_keytype(toks[0]): + if toks[0] not in VALID_KEY_TYPES: raise TypeError("Invalid keytype %s" % toks[0]) # valid key type and 2 or 3 fields: @@ -149,17 +157,6 @@ class AuthKeyLineParser(object): comment=comment, options=options) -def _is_valid_ssh_keytype(key): - valid = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", - "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com", - "ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com", - "ecdsa-sha2-nistp256-cert-v01@openssh.com", - "ecdsa-sha2-nistp384-cert-v01@openssh.com", - "ecdsa-sha2-nistp521-cert-v01@openssh.com") - - return key in valid - - def parse_authorized_keys(fname): lines = [] try: -- cgit v1.2.3 From ff0a34876dc0ce29b762ffd7fcdbfa80308e5aae Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 14:56:55 -0500 Subject: change parser.parse 'default_opts' to 'options' Now, parser.parse specifies options that override any options found, rather than just being default options. There could still potentially be a user for default_options, but since we're not using them anywhere, I've dropped it. The difference is that in setting up the root user, we're now insisting that all keys that go in there have the key_prefix, even if the key content had other options. I think this is actually the commit that fixes LP: #1136343. --- cloudinit/config/cc_ssh.py | 4 ++-- cloudinit/ssh_util.py | 27 ++++++++++++++------------- tests/unittests/test_sshutil.py | 28 +++++++++++++++++----------- 3 files changed, 33 insertions(+), 26 deletions(-) diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index b623d476..7ef20d9f 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -126,7 +126,7 @@ def apply_credentials(keys, user, disable_root, disable_root_opts): keys = set(keys) if user: - ssh_util.setup_user_keys(keys, user, '') + ssh_util.setup_user_keys(keys, user) if disable_root: if not user: @@ -135,4 +135,4 @@ def apply_credentials(keys, user, disable_root, disable_root_opts): else: key_prefix = '' - ssh_util.setup_user_keys(keys, 'root', key_prefix) + ssh_util.setup_user_keys(keys, 'root', options=key_prefix) diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 082c5bbd..44c7c15b 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -51,11 +51,8 @@ class AuthKeyLine(object): self.keytype = keytype self.source = source - def empty(self): - if (not self.base64 and - not self.comment and not self.keytype and not self.options): - return True - return False + def valid(self): + return (self.base64 and self.keytype) def __str__(self): toks = [] @@ -120,7 +117,7 @@ class AuthKeyLineParser(object): remain = ent[i:].lstrip() return (options, remain) - def parse(self, src_line, def_opt=None): + def parse(self, src_line, options=None): # modeled after opensshes auth2-pubkey.c:user_key_allowed2 line = src_line.rstrip("\r\n") if line.startswith("#") or line.strip() == '': @@ -141,13 +138,17 @@ class AuthKeyLineParser(object): return toks + if "badopt" in src_line: + import ipdb; ipdb.set_trace() + ent = line.strip() - options = None try: (keytype, base64, comment) = parse_ssh_key(ent) - options = def_opt except TypeError as e: - (options, remain) = self._extract_options(ent) + (keyopts, remain) = self._extract_options(ent) + if options is None: + options = keyopts + try: (keytype, base64, comment) = parse_ssh_key(remain) except TypeError as e: @@ -178,11 +179,11 @@ def update_authorized_keys(old_entries, keys): for i in range(0, len(old_entries)): ent = old_entries[i] - if ent.empty() or not ent.base64: + if ent.valid(): continue # Replace those with the same base64 for k in keys: - if k.empty() or not k.base64: + if ent.valid(): continue if k.base64 == ent.base64: # Replace it with our better one @@ -241,7 +242,7 @@ def extract_authorized_keys(username): return (auth_key_fn, parse_authorized_keys(auth_key_fn)) -def setup_user_keys(keys, username, key_prefix): +def setup_user_keys(keys, username, options=None): # Make sure the users .ssh dir is setup accordingly (ssh_dir, pwent) = users_ssh_info(username) if not os.path.isdir(ssh_dir): @@ -252,7 +253,7 @@ def setup_user_keys(keys, username, key_prefix): parser = AuthKeyLineParser() key_entries = [] for k in keys: - key_entries.append(parser.parse(str(k), def_opt=key_prefix)) + key_entries.append(parser.parse(str(k), options=options)) # Extract the old and make the new (auth_key_fn, auth_key_entries) = extract_authorized_keys(username) diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 4564d9be..2415d06f 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -62,7 +62,7 @@ class TestAuthKeyLineParser(TestCase): self.assertFalse(key.comment) self.assertEqual(key.keytype, ktype) - def test_parse_with_options(self): + def test_parse_with_keyoptions(self): # test key line with options in it parser = ssh_util.AuthKeyLineParser() options = TEST_OPTIONS @@ -77,18 +77,24 @@ class TestAuthKeyLineParser(TestCase): self.assertEqual(key.comment, comment) self.assertEqual(key.keytype, ktype) - def test_parse_with_defopt(self): + def test_parse_with_options_passed_in(self): # test key line with key type and base64 only parser = ssh_util.AuthKeyLineParser() - for ktype in ['rsa', 'ecdsa', 'dsa']: - content = VALID_CONTENT[ktype] - line = ' '.join((ktype, content,)) - myopts = "no-port-forwarding,no-agent-forwarding" - key = parser.parse(line, myopts) - self.assertEqual(key.base64, content) - self.assertEqual(key.options, myopts) - self.assertFalse(key.comment) - self.assertEqual(key.keytype, ktype) + baseline = ' '.join(("rsa", VALID_CONTENT['rsa'], "user@host")) + myopts = "no-port-forwarding,no-agent-forwarding" + + key = parser.parse("allowedopt" + " " + baseline) + self.assertEqual(key.options, "allowedopt") + + key = parser.parse("overridden_opt " + baseline, options=myopts) + self.assertEqual(key.options, myopts) + + def test_parse_invalid_keytype(self): + parser = ssh_util.AuthKeyLineParser() + key = parser.parse(' '.join(["badkeytype", VALID_CONTENT['rsa']])) + + self.assertFalse(key.valid()) + # vi: ts=4 expandtab -- cgit v1.2.3 From ac83536339d2622f4a896f50681497a388e7e26f Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 16:03:02 -0500 Subject: fix regression on expected label of filesystem for DataSourceNone Last addition to DataSourceNoCloud left it looking for a filesystem named 'None'. --- cloudinit/sources/DataSourceNoCloud.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 097bbc52..603f0155 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -86,11 +86,11 @@ class DataSourceNoCloud(sources.DataSource): if 'ds_config' not in found: found.append("ds_config") - if self.ds_cfg.get('fs_label', "cidata"): + label = self.ds_cfg.get('fs_label', "cidata") + if label is not None: fslist = util.find_devs_with("TYPE=vfat") fslist.extend(util.find_devs_with("TYPE=iso9660")) - label = self.ds_cfg.get('fs_label') label_list = util.find_devs_with("LABEL=%s" % label) devlist = list(set(fslist) & set(label_list)) devlist.sort(reverse=True) -- cgit v1.2.3 From d55c9ae845544871d6bf105b44f701b7076c8e35 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 16:07:54 -0500 Subject: remove debug code --- cloudinit/ssh_util.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 44c7c15b..4b29661f 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -138,9 +138,6 @@ class AuthKeyLineParser(object): return toks - if "badopt" in src_line: - import ipdb; ipdb.set_trace() - ent = line.strip() try: (keytype, base64, comment) = parse_ssh_key(ent) -- cgit v1.2.3 From a6ef326b46a7f99b7ec585df595ef41151705ceb Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 Mar 2013 16:10:53 -0500 Subject: fix reversed logic --- cloudinit/ssh_util.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 4b29661f..65fab117 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -176,11 +176,11 @@ def update_authorized_keys(old_entries, keys): for i in range(0, len(old_entries)): ent = old_entries[i] - if ent.valid(): + if not ent.valid(): continue # Replace those with the same base64 for k in keys: - if ent.valid(): + if not ent.valid(): continue if k.base64 == ent.base64: # Replace it with our better one -- cgit v1.2.3 From 1d015f6ec3284287ad1383d0e2d9a264128f23eb Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sun, 3 Mar 2013 20:56:32 -0500 Subject: add default log value to get_mount_info --- cloudinit/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index 1e9ca5d9..d0a6f81c 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1588,7 +1588,7 @@ def expand_package_list(version_fmt, pkgs): return pkglist -def get_mount_info(path, log): +def get_mount_info(path, log=LOG): # Use /proc/$$/mountinfo to find the device where path is mounted. # This is done because with a btrfs filesystem using os.stat(path) # does not return the ID of the device. -- cgit v1.2.3 From 7bb938c0a677637796ee62fc4242d8c0118987bd Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sun, 3 Mar 2013 21:03:28 -0500 Subject: more work --- cloudinit/config/cc_growpart.py | 169 +++++++++++++++++++++++++++++++++------- 1 file changed, 139 insertions(+), 30 deletions(-) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index f958cd53..206cfc94 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -17,6 +17,7 @@ # along with this program. If not, see . import os.path +import re import stat from cloudinit.settings import PER_ALWAYS @@ -24,8 +25,82 @@ from cloudinit import util frequency = PER_ALWAYS +def resizer_factory(mode): + resize_class = None + if mode == "auto": + for (_name, resizer) in RESIZERS: + cur = resizer() + if cur.available(): + resize_class = cur -def device_part_info(devpath, log): + if not resize_class: + raise ValueError("No resizers available") + + else: + mmap = {} + for (k, v) in RESIZERS: + mmap[k] = v + + if mode not in mmap: + raise TypeError("unknown resize mode %s" % mode) + + mclass = mmap[mode]() + if mclass.available(): + resize_class = mclass + + if not resize_class: + raise ValueError("mode %s not available" % mode) + + return resize_class + + +class ResizeFailedException(Exception): + pass + + +class ResizeParted(object): + def available(self): + myenv = os.environ.copy() + myenv['LANG'] = 'C' + + try: + (out, _err) = util.subp(["parted", "--help"], env=myenv) + if re.search("COMMAND.*resize\s+", out, re.DOTALL): + return True + + except util.ProcessExecutionError: + pass + return False + + def resize(self, blockdev, part): + try: + util.subp(["parted", "resizepart", blockdev, part]) + except util.ProcessExecutionError as e: + raise ResizeFailedException(e) + + +class ResizeGrowPart(object): + def available(self): + myenv = os.environ.copy() + myenv['LANG'] = 'C' + + try: + (out, _err) = util.subp(["growpart", "--help"], env=myenv) + if re.search("--update\s+", out, re.DOTALL): + return True + + except util.ProcessExecutionError: + pass + return False + + def resize(self, blockdev, part): + try: + util.subp(["growpart", blockdev, part]) + except util.ProcessExecutionError as e: + raise ResizeFailedException(e) + + +def device_part_info(devpath): # convert an entry in /dev/ to parent disk and partition number # input of /dev/vdb or /dev/disk/by-label/foo @@ -36,13 +111,11 @@ def device_part_info(devpath, log): syspath = "/sys/class/block/%s" % bname if not os.path.exists(syspath): - log.debug("%s had no syspath (%s)" % (devpath, syspath)) - return None + raise ValueError("%s had no syspath (%s)" % (devpath, syspath)) ptpath = os.path.join(syspath, "partition") if not os.path.exists(ptpath): - log.debug("%s not a partition" % devpath) - return None + raise TypeError("%s not a partition" % devpath) ptnum = util.load_file(ptpath).rstrip() @@ -59,40 +132,76 @@ def device_part_info(devpath, log): return (diskdevpath, ptnum) -def handle(name, cfg, _cloud, log, args): - if len(args) != 0: - growroot = args[0] +def devent2dev(devent): + if devent.startswith("/dev/"): + return devent else: - growroot = util.get_cfg_option_bool(cfg, "growroot", True) + result = util.get_mount_info(devent) + if not result: + raise ValueError("Could not determine device of '%s' % dev_ent") + return result[0] + + +def resize(resizer, devices, log): + resized = [] + for devent in devices: + try: + blockdev = devent2dev(devent) + except ValueError as e: + log.debug("unable to turn %s into device: %s" % (devent, e)) + continue + + if not stat.S_ISBLK(os.stat(blockdev).st_mode): + log.debug("device '%s' for '%s' is not a block device" % + (devent, blockdev)) + continue + + try: + (disk, ptnum) = device_part_info(blockdev) + except (TypeError, ValueError) as e: + log.debug("failed to get part_info for (%s, %s): %s" % + (devent, blockdev, e)) + continue + + try: + resizer.resize(disk, ptnum) + except ResizeFailedException as e: + log.warn("failed to resize: devent=%s, disk=%s, ptnum=%s: %s", + devent, disk, ptnum, e) + + resized.append(devent) + + return resized + + +def handle(name, cfg, _cloud, log, _args): + if 'growpart' not in cfg: + log.debug("Skipping module named %s, no growpart entry", name) + return - if not growroot: - log.debug("Skipping module named %s, growroot disabled", name) + mycfg = cfg.get('growpart') + if not isinstance(mycfg, dict): + log.warn("'growpart' in config was not a dict") return - resize_what = "/" - result = util.get_mount_info(resize_what, log) - if not result: - log.warn("Could not determine filesystem type of %s" % resize_what) + mode = mycfg.get('mode') + if util.is_false(mode): + log.debug("growpart disabled: mode=%s" % mode) return - (devpth, _fs_type, mount_point) = result + try: + resizer = resizer_factory(mode) + except (ValueError, TypeError) as e: + log.debug("growpart unable to find resizer for '%s': %s" % (mode, e)) - # Ensure the path is a block device. - if not stat.S_ISBLK(os.stat(devpth).st_mode): - log.debug("The %s device which was found for mount point %s for %s " - "is not a block device" % (devpth, mount_point, resize_what)) + devices = util.get_cfg_option_list(cfg, "devices", ["/"]) + if not len(devices): + log.debug("growpart: empty device list") return - result = device_part_info(devpth, log) - if not result: - log.debug("%s did not look like a partition" % devpth) + resized = resize(resizer, devices, log) + log.debug("resized: %s" % resized) - (disk, ptnum) = result +RESIZERS = (('parted', ResizeParted), ('growpart', ResizeGrowPart)) - try: - (out, _err) = util.subp(["growpart", disk, ptnum], rcs=[0, 1]) - except util.ProcessExecutionError as e: - log.warn("growpart failed: %s/%s" % (e.stdout, e.stderr)) - return - log.debug("growpart said: %s" % out) -- cgit v1.2.3 From 1a3e0fa19b6c3224ea986141b37f74d5464b7c82 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 4 Mar 2013 09:56:24 -0500 Subject: if sigterm received, and networking is up, exit 0 LP: #1015223 --- upstart/cloud-init-nonet.conf | 53 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 11 deletions(-) diff --git a/upstart/cloud-init-nonet.conf b/upstart/cloud-init-nonet.conf index 118ffc1c..bdeb5a2e 100644 --- a/upstart/cloud-init-nonet.conf +++ b/upstart/cloud-init-nonet.conf @@ -10,19 +10,50 @@ task console output script - # /run/network/static-network-up-emitted is written by - # upstart (via /etc/network/if-up.d/upstart). its presense would - # indicate that static-network-up has already fired. - EMITTED="/run/network/static-network-up-emitted" - [ -e "$EMITTED" -o -e "/var/$EMITTED" ] && exit 0 + set +e # you cannot trap TERM reliably with 'set -e' + SLEEP_CHILD="" + static_network_up() { + local emitted="/run/network/static-network-up-emitted" + # /run/network/static-network-up-emitted is written by + # upstart (via /etc/network/if-up.d/upstart). its presense would + # indicate that static-network-up has already fired. + [ -e "$emitted" -o -e "/var/$emitted" ] + } + msg() { + echo "$UPSTART_JOB:" "$1" + } + + handle_sigterm() { + # if we received sigterm and static networking is up then it probably + # came from upstart as a result of 'stop on static-network-up' + [ -z "$SLEEP_CHILD" ] || kill $SLEEP_CHILD + if static_network_up; then + msg "static networking is now up" + exit 0 + fi + exit 2 + } + + dowait() { + msg "waiting $1 seconds for network device" + sleep "$1" & + SLEEP_CHILD=$! + wait $SLEEP_CHILD + SLEEP_CHILD="" + } + + trap handle_sigterm TERM + + # static_network_up already occurred + static_network_up && exit 0 + + # obj.pkl comes from cloud-init-local (or previous boot and + # manual_cache_clean) [ -f /var/lib/cloud/instance/obj.pkl ] && exit 0 - short=10; long=120; - sleep ${short} - echo $UPSTART_JOB "waiting ${long} seconds for a network device." - sleep ${long} - echo $UPSTART_JOB "gave up waiting for a network device." + dowait 10 + dowait 120 + msg "gave up waiting for a network device." : > /var/lib/cloud/data/no-net end script -# EOF -- cgit v1.2.3 From bcf04395be323c60b013e531ef3bf32b722a780a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 4 Mar 2013 14:28:34 -0500 Subject: add uptime to msg output --- upstart/cloud-init-nonet.conf | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/upstart/cloud-init-nonet.conf b/upstart/cloud-init-nonet.conf index bdeb5a2e..36b99fb5 100644 --- a/upstart/cloud-init-nonet.conf +++ b/upstart/cloud-init-nonet.conf @@ -21,7 +21,11 @@ script [ -e "$emitted" -o -e "/var/$emitted" ] } msg() { - echo "$UPSTART_JOB:" "$1" + local uptime="" idle="" + if [ -r /proc/uptime ]; then + read uptime idle < /proc/uptime + fi + echo "$UPSTART_JOB${uptime:+[${uptime}]}:" "$1" } handle_sigterm() { @@ -32,6 +36,7 @@ script msg "static networking is now up" exit 0 fi + msg "recieved SIGTERM, networking not up" exit 2 } -- cgit v1.2.3 From ab73a5a7befb9583a9b2cee35fa99e363793c116 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 4 Mar 2013 16:59:57 -0500 Subject: add the unit test, fix a few issues --- cloudinit/config/cc_growpart.py | 6 +- .../test_handler/test_handler_growpart.py | 156 +++++++++++++++++++++ 2 files changed, 161 insertions(+), 1 deletion(-) create mode 100644 tests/unittests/test_handler/test_handler_growpart.py diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 206cfc94..d49159ed 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -32,6 +32,7 @@ def resizer_factory(mode): cur = resizer() if cur.available(): resize_class = cur + break if not resize_class: raise ValueError("No resizers available") @@ -65,7 +66,7 @@ class ResizeParted(object): try: (out, _err) = util.subp(["parted", "--help"], env=myenv) - if re.search("COMMAND.*resize\s+", out, re.DOTALL): + if re.search("COMMAND.*resizepart\s+", out, re.DOTALL): return True except util.ProcessExecutionError: @@ -193,6 +194,9 @@ def handle(name, cfg, _cloud, log, _args): resizer = resizer_factory(mode) except (ValueError, TypeError) as e: log.debug("growpart unable to find resizer for '%s': %s" % (mode, e)) + if mode != "auto": + raise e + return devices = util.get_cfg_option_list(cfg, "devices", ["/"]) if not len(devices): diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py new file mode 100644 index 00000000..9d2a8dae --- /dev/null +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -0,0 +1,156 @@ +from mocker import MockerTestCase + +from cloudinit import cloud +from cloudinit import helpers +from cloudinit import util + +from cloudinit.config import cc_growpart + +import logging +import os +import mocker + +# growpart: +# mode: auto # off, on, auto, 'growpart', 'parted' +# devices: ['root'] + +HELP_PARTED_NO_RESIZE = """ +Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...] +Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in +interactive mode. + +OPTIONs: + + +COMMANDs: + + quit exit program + rescue START END rescue a lost partition near START + and END + resize NUMBER START END resize partition NUMBER and its file + system + rm NUMBER delete partition NUMBER + +Report bugs to bug-parted@gnu.org +""" + +HELP_PARTED_RESIZE = """ +Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...] +Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in +interactive mode. + +OPTIONs: + + +COMMANDs: + + quit exit program + rescue START END rescue a lost partition near START + and END + resize NUMBER START END resize partition NUMBER and its file + system + resizepart NUMBER END resize partition NUMBER + rm NUMBER delete partition NUMBER + +Report bugs to bug-parted@gnu.org +""" + +HELP_GROWPART_RESIZE = """ +growpart disk partition + rewrite partition table so that partition takes up all the space it can + options: + -h | --help print Usage and exit + + -u | --update R update the the kernel partition table info after growing + this requires kernel support and 'partx --update' + R is one of: + - 'auto' : [default] update partition if possible + + Example: + - growpart /dev/sda 1 + Resize partition 1 on /dev/sda +""" + +HELP_GROWPART_NO_RESIZE = """ +growpart disk partition + rewrite partition table so that partition takes up all the space it can + options: + -h | --help print Usage and exit + + Example: + - growpart /dev/sda 1 + Resize partition 1 on /dev/sda +""" + +class TestDisabled(MockerTestCase): + def setUp(self): + super(TestDisabled, self).setUp() + self.name = "growpart" + self.cloud_init = None + self.log = logging.getLogger("TestDisabled") + self.args = [] + + self.handle = cc_growpart.handle + + def test_mode_off(self): + #Test that nothing is done if mode is off. + config = {'growpart': {'mode': 'off'}} + self.mocker.replay() + + self.handle(self.name, config, self.cloud_init, self.log, self.args) + + def test_no_config(self): + #Test that nothing is done if no 'growpart' config + config = { } + self.mocker.replay() + + self.handle(self.name, config, self.cloud_init, self.log, self.args) + + +class TestConfig(MockerTestCase): + def setUp(self): + super(TestConfig, self).setUp() + self.name = "growpart" + self.paths = None + self.cloud = cloud.Cloud(None, self.paths, None, None, None) + self.log = logging.getLogger("TestConfig") + self.args = [] + os.environ = {} + + self.cloud_init = None + self.handle = cc_growpart.handle + + # Order must be correct + self.mocker.order() + + def test_no_resizers_auto_is_fine(self): + subp = self.mocker.replace(util.subp, passthrough=False) + subp(['parted', '--help'], env={'LANG': 'C'}) + self.mocker.result((HELP_PARTED_NO_RESIZE,"")) + subp(['growpart', '--help'], env={'LANG': 'C'}) + self.mocker.result((HELP_GROWPART_NO_RESIZE,"")) + self.mocker.replay() + + config = {'growpart': {'mode': 'auto'}} + self.handle(self.name, config, self.cloud_init, self.log, self.args) + + def test_no_resizers_mode_growpart_is_exception(self): + subp = self.mocker.replace(util.subp, passthrough=False) + subp(['growpart', '--help'], env={'LANG': 'C'}) + self.mocker.result((HELP_GROWPART_NO_RESIZE,"")) + self.mocker.replay() + + config = {'growpart': {'mode': "growpart"}} + self.assertRaises(ValueError, self.handle, self.name, config, + self.cloud_init, self.log, self.args) + + def test_mode_auto_prefers_parted(self): + subp = self.mocker.replace(util.subp, passthrough=False) + subp(['parted', '--help'], env={'LANG': 'C'}) + self.mocker.result((HELP_PARTED_RESIZE,"")) + self.mocker.replay() + + ret = cc_growpart.resizer_factory(mode="auto") + self.assertTrue(isinstance(ret, cc_growpart.ResizeParted)) + +# vi: ts=4 expandtab -- cgit v1.2.3 From b25c943ac22d457891cd6cfa240ca83aa03e4542 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 4 Mar 2013 23:18:37 -0500 Subject: test of resize, a couple small fixes --- cloudinit/config/cc_growpart.py | 12 +++- .../test_handler/test_handler_growpart.py | 71 ++++++++++++++++++++++ 2 files changed, 81 insertions(+), 2 deletions(-) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index d49159ed..96e72350 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -17,6 +17,7 @@ # along with this program. If not, see . import os.path +import os import re import stat @@ -152,9 +153,16 @@ def resize(resizer, devices, log): log.debug("unable to turn %s into device: %s" % (devent, e)) continue - if not stat.S_ISBLK(os.stat(blockdev).st_mode): + try: + statret = os.stat(blockdev) + except OSError as e: + log.debug("device '%s' for '%s' failed stat" % + (blockdev, devent)) + continue + + if not stat.S_ISBLK(statret.st_mode): log.debug("device '%s' for '%s' is not a block device" % - (devent, blockdev)) + (blockdev, devent)) continue try: diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 9d2a8dae..7fb58a06 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -6,9 +6,12 @@ from cloudinit import util from cloudinit.config import cc_growpart +import errno import logging import os import mocker +import re +import stat # growpart: # mode: auto # off, on, auto, 'growpart', 'parted' @@ -94,7 +97,11 @@ class TestDisabled(MockerTestCase): def test_mode_off(self): #Test that nothing is done if mode is off. + + # this really only verifies that resizer_factory isn't called config = {'growpart': {'mode': 'off'}} + self.mocker.replace(cc_growpart.resizer_factory, + passthrough=False) self.mocker.replay() self.handle(self.name, config, self.cloud_init, self.log, self.args) @@ -102,6 +109,8 @@ class TestDisabled(MockerTestCase): def test_no_config(self): #Test that nothing is done if no 'growpart' config config = { } + self.mocker.replace(cc_growpart.resizer_factory, + passthrough=False) self.mocker.replay() self.handle(self.name, config, self.cloud_init, self.log, self.args) @@ -153,4 +162,66 @@ class TestConfig(MockerTestCase): ret = cc_growpart.resizer_factory(mode="auto") self.assertTrue(isinstance(ret, cc_growpart.ResizeParted)) + +class TestResize(MockerTestCase): + def setUp(self): + super(TestResize, self).setUp() + self.name = "growpart" + self.log = logging.getLogger("TestResize") + + # Order must be correct + self.mocker.order() + + def test_simple_devices(self): + #test simple device list + # this patches out devent2dev, os.stat, and device_part_info + # so in the end, doesn't test a lot + devs = ["/dev/XXda1", "/dev/YYda2"] + devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5L, + st_nlink=1, st_uid=0, st_gid=6, st_size=0, + st_atime=0, st_mtime=0, st_ctime=0) + enoent = ["/dev/NOENT"] + real_stat = os.stat + resize_calls = [] + + class myresizer(): + def resize(self, dev, part): + resize_calls.append((dev, part,)) + return + + def mystat(path): + if path in devs: + return devstat_ret + if path in enoent: + e = OSError("%s: does not exist" % path) + e.errno = errno.ENOENT + raise e + return real_stat(path) + + try: + opinfo = cc_growpart.device_part_info + cc_growpart.device_part_info = simple_device_part_info + os.stat = mystat + + resized = cc_growpart.resize(myresizer(), devs + enoent, self.log) + + self.assertEqual(devs, resized) + self.assertEqual(resize_calls, + [("/dev/XXda", "1",), ("/dev/YYda", "2",)]) + finally: + cc_growpart.device_part_info = opinfo + os.stat = real_stat + + +def simple_device_part_info(devpath): + # simple stupid return (/dev/vda, 1) for /dev/vda + ret = re.search("([^0-9]*)([0-9]*)$", devpath) + x = (ret.group(1), ret.group(2)) + return x + +class Bunch: + def __init__(self, **kwds): + self.__dict__.update(kwds) + + # vi: ts=4 expandtab -- cgit v1.2.3 From 34a76e2be8a3eb4f8490183f12a67a01276575ff Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 5 Mar 2013 16:50:34 +0000 Subject: change default (no 'growpart' in config) to use 'auto' and '/' --- cloudinit/config/cc_growpart.py | 26 +++++++++------- .../test_handler/test_handler_growpart.py | 35 +++++++++++++++------- 2 files changed, 40 insertions(+), 21 deletions(-) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 96e72350..6d647be1 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -26,6 +26,12 @@ from cloudinit import util frequency = PER_ALWAYS +DEFAULT_CONFIG = { + 'mode': 'auto', + 'devices': ['/'], +} + + def resizer_factory(mode): resize_class = None if mode == "auto": @@ -144,7 +150,7 @@ def devent2dev(devent): return result[0] -def resize(resizer, devices, log): +def resize_devices(resizer, devices, log): resized = [] for devent in devices: try: @@ -185,8 +191,9 @@ def resize(resizer, devices, log): def handle(name, cfg, _cloud, log, _args): if 'growpart' not in cfg: - log.debug("Skipping module named %s, no growpart entry", name) - return + log.debug("No 'growpart' entry in cfg. Using default: %s" % + DEFAULT_CONFIG) + cfg['growpart'] = DEFAULT_CONFIG mycfg = cfg.get('growpart') if not isinstance(mycfg, dict): @@ -198,6 +205,11 @@ def handle(name, cfg, _cloud, log, _args): log.debug("growpart disabled: mode=%s" % mode) return + devices = util.get_cfg_option_list(cfg, "devices", ["/"]) + if not len(devices): + log.debug("growpart: empty device list") + return + try: resizer = resizer_factory(mode) except (ValueError, TypeError) as e: @@ -206,14 +218,8 @@ def handle(name, cfg, _cloud, log, _args): raise e return - devices = util.get_cfg_option_list(cfg, "devices", ["/"]) - if not len(devices): - log.debug("growpart: empty device list") - return - - resized = resize(resizer, devices, log) + resized = resize_devices(resizer, devices, log) log.debug("resized: %s" % resized) RESIZERS = (('parted', ResizeParted), ('growpart', ResizeGrowPart)) - diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 7fb58a06..9a033d6b 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -106,16 +106,6 @@ class TestDisabled(MockerTestCase): self.handle(self.name, config, self.cloud_init, self.log, self.args) - def test_no_config(self): - #Test that nothing is done if no 'growpart' config - config = { } - self.mocker.replace(cc_growpart.resizer_factory, - passthrough=False) - self.mocker.replay() - - self.handle(self.name, config, self.cloud_init, self.log, self.args) - - class TestConfig(MockerTestCase): def setUp(self): super(TestConfig, self).setUp() @@ -162,6 +152,28 @@ class TestConfig(MockerTestCase): ret = cc_growpart.resizer_factory(mode="auto") self.assertTrue(isinstance(ret, cc_growpart.ResizeParted)) + def test_handle_with_no_growpart_entry(self): + #if no 'growpart' entry in config, then mode=auto should be used + + myresizer = object() + + factory = self.mocker.replace(cc_growpart.resizer_factory, + passthrough=False) + rsdevs = self.mocker.replace(cc_growpart.resize_devices, + passthrough=False) + factory("auto") + self.mocker.result(myresizer) + rsdevs(myresizer, ["/"], self.log) + self.mocker.result(["/"]) + self.mocker.replay() + + try: + orig_resizers = cc_growpart.RESIZERS + cc_growpart.RESIZERS = (('mysizer', object),) + self.handle(self.name, {}, self.cloud_init, self.log, self.args) + finally: + cc_growpart.RESIZERS = orig_resizers + class TestResize(MockerTestCase): def setUp(self): @@ -203,7 +215,8 @@ class TestResize(MockerTestCase): cc_growpart.device_part_info = simple_device_part_info os.stat = mystat - resized = cc_growpart.resize(myresizer(), devs + enoent, self.log) + resized = cc_growpart.resize_devices(myresizer(), devs + enoent, + self.log) self.assertEqual(devs, resized) self.assertEqual(resize_calls, -- cgit v1.2.3 From 00416dd5cd8f0fb507f64a7f0035fbe706c3f279 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 5 Mar 2013 18:32:24 +0000 Subject: remove 'log' passing. call growpart with --dry-run first. growrun --dry-run will exit 1 if it wouldn't do anything. so call it, check for '1' and if no change, then just return. --- cloudinit/config/cc_growpart.py | 27 ++++++++++++++++------ .../test_handler/test_handler_growpart.py | 5 ++-- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 6d647be1..b88e6f6a 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -22,6 +22,7 @@ import re import stat from cloudinit.settings import PER_ALWAYS +from cloudinit import log as logging from cloudinit import util frequency = PER_ALWAYS @@ -31,6 +32,7 @@ DEFAULT_CONFIG = { 'devices': ['/'], } +LOG = logging.getLogger(__name__) def resizer_factory(mode): resize_class = None @@ -102,9 +104,20 @@ class ResizeGrowPart(object): return False def resize(self, blockdev, part): + try: + util.subp(["growpart", '--dry-run', blockdev, part]) + except util.ProcessExecutionError as e: + if e.exit_code != 1: + logexc(LOG, ("Failed growpart --dry-run for (%s, %s)" % + (blockdev, part))) + raise ResizeFailedException(e) + LOG.debug("no change necessary on (%s,%s)" % (blockdev, part)) + return + try: util.subp(["growpart", blockdev, part]) except util.ProcessExecutionError as e: + logexc(LOG, "Failed: growpart %s %s" % (blockdev, part)) raise ResizeFailedException(e) @@ -150,38 +163,38 @@ def devent2dev(devent): return result[0] -def resize_devices(resizer, devices, log): +def resize_devices(resizer, devices): resized = [] for devent in devices: try: blockdev = devent2dev(devent) except ValueError as e: - log.debug("unable to turn %s into device: %s" % (devent, e)) + LOG.debug("unable to turn %s into device: %s" % (devent, e)) continue try: statret = os.stat(blockdev) except OSError as e: - log.debug("device '%s' for '%s' failed stat" % + LOG.debug("device '%s' for '%s' failed stat" % (blockdev, devent)) continue if not stat.S_ISBLK(statret.st_mode): - log.debug("device '%s' for '%s' is not a block device" % + LOG.debug("device '%s' for '%s' is not a block device" % (blockdev, devent)) continue try: (disk, ptnum) = device_part_info(blockdev) except (TypeError, ValueError) as e: - log.debug("failed to get part_info for (%s, %s): %s" % + LOG.debug("failed to get part_info for (%s, %s): %s" % (devent, blockdev, e)) continue try: resizer.resize(disk, ptnum) except ResizeFailedException as e: - log.warn("failed to resize: devent=%s, disk=%s, ptnum=%s: %s", + LOG.warn("failed to resize: devent=%s, disk=%s, ptnum=%s: %s", devent, disk, ptnum, e) resized.append(devent) @@ -218,7 +231,7 @@ def handle(name, cfg, _cloud, log, _args): raise e return - resized = resize_devices(resizer, devices, log) + resized = resize_devices(resizer, devices) log.debug("resized: %s" % resized) RESIZERS = (('parted', ResizeParted), ('growpart', ResizeGrowPart)) diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 9a033d6b..3cf3efb7 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -163,7 +163,7 @@ class TestConfig(MockerTestCase): passthrough=False) factory("auto") self.mocker.result(myresizer) - rsdevs(myresizer, ["/"], self.log) + rsdevs(myresizer, ["/"]) self.mocker.result(["/"]) self.mocker.replay() @@ -215,8 +215,7 @@ class TestResize(MockerTestCase): cc_growpart.device_part_info = simple_device_part_info os.stat = mystat - resized = cc_growpart.resize_devices(myresizer(), devs + enoent, - self.log) + resized = cc_growpart.resize_devices(myresizer(), devs + enoent) self.assertEqual(devs, resized) self.assertEqual(resize_calls, -- cgit v1.2.3 From 6f31822b60f6878a65a4adb15c1d2b4cc4edc81d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 5 Mar 2013 18:48:54 +0000 Subject: change default mode to 'auto' --- cloudinit/config/cc_growpart.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index b88e6f6a..65679242 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -213,7 +213,7 @@ def handle(name, cfg, _cloud, log, _args): log.warn("'growpart' in config was not a dict") return - mode = mycfg.get('mode') + mode = mycfg.get('mode', "auto") if util.is_false(mode): log.debug("growpart disabled: mode=%s" % mode) return -- cgit v1.2.3 From 626f49e427cdbe91285dc337d134bfe2011fd268 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 5 Mar 2013 18:49:03 +0000 Subject: add doc --- doc/examples/cloud-config-growpart.txt | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 doc/examples/cloud-config-growpart.txt diff --git a/doc/examples/cloud-config-growpart.txt b/doc/examples/cloud-config-growpart.txt new file mode 100644 index 00000000..705f02c2 --- /dev/null +++ b/doc/examples/cloud-config-growpart.txt @@ -0,0 +1,24 @@ +#cloud-config +# +# growpart entry is a dict, if it is not present at all +# in config, then the default is used ({'mode': 'auto', 'devices': ['/']}) +# +# mode: +# values: +# * auto: use any option possible (growpart or parted) +# if none are available, do not warn, but debug. +# * growpart: use growpart to grow partitions +# if growpart is not available, this is an error. +# * parted: use parted (parted resizepart) to resize partitions +# if parted is not available, this is an error. +# * off, false +# +# devices: +# a list of things to resize. +# items can be filesystem paths or devices (in /dev) +# examples: +# devices: [/, /dev/vdb1] +# +growpart: + mode: auto + devices: ['/'] -- cgit v1.2.3 From f9fe61cb0fff4391212c33ff5fc8af7402ad112c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 5 Mar 2013 16:26:01 -0500 Subject: pep8, pylint, make resize_devices return more useful resize_devices now contains what action occurred for each entry. --- cloudinit/config/cc_growpart.py | 102 ++++++++++++++------- .../test_handler/test_handler_growpart.py | 28 ++++-- 2 files changed, 89 insertions(+), 41 deletions(-) diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 65679242..b6e1fd37 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -16,24 +16,33 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import os.path import os +import os.path import re import stat -from cloudinit.settings import PER_ALWAYS from cloudinit import log as logging +from cloudinit.settings import PER_ALWAYS from cloudinit import util frequency = PER_ALWAYS DEFAULT_CONFIG = { - 'mode': 'auto', - 'devices': ['/'], + 'mode': 'auto', + 'devices': ['/'], } + +def enum(**enums): + return type('Enum', (), enums) + + +RESIZE = enum(SKIPPED="SKIPPED", CHANGED="CHANGED", NOCHANGE="NOCHANGE", + FAILED="FAILED") + LOG = logging.getLogger(__name__) + def resizer_factory(mode): resize_class = None if mode == "auto": @@ -75,19 +84,22 @@ class ResizeParted(object): try: (out, _err) = util.subp(["parted", "--help"], env=myenv) - if re.search("COMMAND.*resizepart\s+", out, re.DOTALL): + if re.search(r"COMMAND.*resizepart\s+", out, re.DOTALL): return True except util.ProcessExecutionError: pass return False - def resize(self, blockdev, part): + def resize(self, diskdev, partnum, partdev): + before = get_size(partdev) try: - util.subp(["parted", "resizepart", blockdev, part]) + util.subp(["parted", "resizepart", diskdev, partnum]) except util.ProcessExecutionError as e: raise ResizeFailedException(e) + return (before, get_size(partdev)) + class ResizeGrowPart(object): def available(self): @@ -96,30 +108,40 @@ class ResizeGrowPart(object): try: (out, _err) = util.subp(["growpart", "--help"], env=myenv) - if re.search("--update\s+", out, re.DOTALL): + if re.search(r"--update\s+", out, re.DOTALL): return True except util.ProcessExecutionError: pass return False - def resize(self, blockdev, part): + def resize(self, diskdev, partnum, partdev): + before = get_size(partdev) try: - util.subp(["growpart", '--dry-run', blockdev, part]) + util.subp(["growpart", '--dry-run', diskdev, partnum]) except util.ProcessExecutionError as e: if e.exit_code != 1: - logexc(LOG, ("Failed growpart --dry-run for (%s, %s)" % - (blockdev, part))) + util.logexc(LOG, ("Failed growpart --dry-run for (%s, %s)" % + (diskdev, partnum))) raise ResizeFailedException(e) - LOG.debug("no change necessary on (%s,%s)" % (blockdev, part)) - return + return (before, before) try: - util.subp(["growpart", blockdev, part]) + util.subp(["growpart", diskdev, partnum]) except util.ProcessExecutionError as e: - logexc(LOG, "Failed: growpart %s %s" % (blockdev, part)) + util.logexc(LOG, "Failed: growpart %s %s" % (diskdev, partnum)) raise ResizeFailedException(e) + return (before, get_size(partdev)) + + +def get_size(filename): + fd = os.open(filename, os.O_RDONLY) + try: + return os.lseek(fd, 0, os.SEEK_END) + finally: + os.close(fd) + def device_part_info(devpath): # convert an entry in /dev/ to parent disk and partition number @@ -164,45 +186,54 @@ def devent2dev(devent): def resize_devices(resizer, devices): - resized = [] + # returns a tuple of tuples containing (entry-in-devices, action, message) + info = [] for devent in devices: try: blockdev = devent2dev(devent) except ValueError as e: - LOG.debug("unable to turn %s into device: %s" % (devent, e)) + info.append((devent, RESIZE.SKIPPED, + "unable to convert to device: %s" % e,)) continue try: statret = os.stat(blockdev) except OSError as e: - LOG.debug("device '%s' for '%s' failed stat" % - (blockdev, devent)) + info.append((devent, RESIZE.SKIPPED, + "stat of '%s' failed: %s" % (blockdev, e),)) continue - + if not stat.S_ISBLK(statret.st_mode): - LOG.debug("device '%s' for '%s' is not a block device" % - (blockdev, devent)) + info.append((devent, RESIZE.SKIPPED, + "device '%s' not a block device" % blockdev,)) continue try: (disk, ptnum) = device_part_info(blockdev) except (TypeError, ValueError) as e: - LOG.debug("failed to get part_info for (%s, %s): %s" % - (devent, blockdev, e)) + info.append((devent, RESIZE.SKIPPED, + "device_part_info(%s) failed: %s" % (blockdev, e),)) continue try: - resizer.resize(disk, ptnum) - except ResizeFailedException as e: - LOG.warn("failed to resize: devent=%s, disk=%s, ptnum=%s: %s", - devent, disk, ptnum, e) + (old, new) = resizer.resize(disk, ptnum, blockdev) + if old == new: + info.append((devent, RESIZE.NOCHANGE, + "no change necessary (%s, %s)" % (disk, ptnum),)) + else: + info.append((devent, RESIZE.CHANGED, + "changed (%s, %s) from %s to %s" % + (disk, ptnum, old, new),)) - resized.append(devent) + except ResizeFailedException as e: + info.append((devent, RESIZE.FAILED, + "failed to resize: disk=%s, ptnum=%s: %s" % + (disk, ptnum, e),)) - return resized + return info -def handle(name, cfg, _cloud, log, _args): +def handle(_name, cfg, _cloud, log, _args): if 'growpart' not in cfg: log.debug("No 'growpart' entry in cfg. Using default: %s" % DEFAULT_CONFIG) @@ -232,7 +263,10 @@ def handle(name, cfg, _cloud, log, _args): return resized = resize_devices(resizer, devices) - log.debug("resized: %s" % resized) + for (entry, action, msg) in resized: + if action == RESIZE.CHANGED: + log.info("'%s' resized: %s" % (entry, msg)) + else: + log.debug("'%s' %s: %s" % (entry, action, msg)) RESIZERS = (('parted', ResizeParted), ('growpart', ResizeGrowPart)) - diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 3cf3efb7..74c254e0 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -164,7 +164,7 @@ class TestConfig(MockerTestCase): factory("auto") self.mocker.result(myresizer) rsdevs(myresizer, ["/"]) - self.mocker.result(["/"]) + self.mocker.result((("/", cc_growpart.RESIZE.CHANGED, "my-message",),)) self.mocker.replay() try: @@ -197,9 +197,11 @@ class TestResize(MockerTestCase): resize_calls = [] class myresizer(): - def resize(self, dev, part): - resize_calls.append((dev, part,)) - return + def resize(self, diskdev, partnum, partdev): + resize_calls.append((diskdev, partnum, partdev)) + if partdev == "/dev/YYda2": + return (1024, 2048) + return (1024, 1024) # old size, new size def mystat(path): if path in devs: @@ -217,9 +219,21 @@ class TestResize(MockerTestCase): resized = cc_growpart.resize_devices(myresizer(), devs + enoent) - self.assertEqual(devs, resized) - self.assertEqual(resize_calls, - [("/dev/XXda", "1",), ("/dev/YYda", "2",)]) + def find(name, res): + for f in res: + if f[0] == name: + return f + return None + + self.assertEqual(cc_growpart.RESIZE.NOCHANGE, + find("/dev/XXda1", resized)[1]) + self.assertEqual(cc_growpart.RESIZE.CHANGED, + find("/dev/YYda2", resized)[1]) + self.assertEqual(cc_growpart.RESIZE.SKIPPED, + find(enoent[0], resized)[1]) + #self.assertEqual(resize_calls, + #[("/dev/XXda", "1", "/dev/XXda1"), + #("/dev/YYda", "2", "/dev/YYda2")]) finally: cc_growpart.device_part_info = opinfo os.stat = real_stat -- cgit v1.2.3 From b4fa42f0cb841b1f096bd8d654eda7230053935c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 5 Mar 2013 16:38:35 -0500 Subject: fix ChangeLog entries incorrectly added as 0.6.0 --- ChangeLog | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ChangeLog b/ChangeLog index 110cdec2..ed2373b9 100644 --- a/ChangeLog +++ b/ChangeLog @@ -43,6 +43,10 @@ - improve debian support in sysvinit scripts, package build scripts, and split sources.list template to be distro specific. - support for resizing btrfs root filesystems [Blair Zajac] + - fix issue when writing ssh keys to .ssh/authorized_keys (LP: #1136343) + - upstart: cloud-init-nonet.conf trap the TERM signal, so that dmesg or other + output does not get a 'killed by TERM signal' message. + 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 - config-drive: map hostname to local-hostname (LP: #1061964) @@ -501,7 +505,4 @@ - make the message on 'disable_root' more clear (LP: #672417) - do not require public key if private is given in ssh cloud-config (LP: #648905) - - fix issue when writing ssh keys to .ssh/authorized_keys (LP: #1136343) - - upstart: cloud-init-nonet.conf trap the TERM signal, so that dmesg or other - output does not get a 'killed by TERM signal' message. # vi: syntax=text textwidth=79 -- cgit v1.2.3 From dca9b6c94e10f9f42ad0f129ae6fd38ebb44f4b5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 7 Mar 2013 14:54:25 -0500 Subject: pep8 and pylint fixes --- cloudinit/config/cc_power_state_change.py | 2 +- cloudinit/distros/__init__.py | 6 +++--- cloudinit/distros/debian.py | 5 ++++- cloudinit/distros/rhel.py | 5 ++++- cloudinit/ssh_util.py | 10 ++++------ cloudinit/util.py | 2 +- doc/rtd/conf.py | 8 ++++---- tests/unittests/helpers.py | 1 + tests/unittests/test_datasource/test_nocloud.py | 2 +- .../test_handler/test_handler_growpart.py | 22 +++++++++++----------- tests/unittests/test_sshutil.py | 5 +++-- 11 files changed, 37 insertions(+), 31 deletions(-) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index aefa3aff..de0c0bbd 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -75,7 +75,7 @@ def load_power_state(cfg): ','.join(opt_map.keys())) delay = pstate.get("delay", "now") - if delay != "now" and not re.match("\+[0-9]+", delay): + if delay != "now" and not re.match(r"\+[0-9]+", delay): raise TypeError("power_state[delay] must be 'now' or '+m' (minutes).") args = ["shutdown", opt_map[mode], delay] diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 0db4aac7..2a2d8216 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -73,7 +73,7 @@ class Distro(object): self._apply_hostname(hostname) @abc.abstractmethod - def package_command(self, cmd, args=None): + def package_command(self, cmd, args=None, pkgs=None): raise NotImplementedError() @abc.abstractmethod @@ -370,7 +370,7 @@ class Distro(object): # Import SSH keys if 'ssh_authorized_keys' in kwargs: keys = set(kwargs['ssh_authorized_keys']) or [] - ssh_util.setup_user_keys(keys, name, key_prefix=None) + ssh_util.setup_user_keys(keys, name, options=None) return True @@ -776,7 +776,7 @@ def normalize_users_groups(cfg, distro): # Just add it on at the end... base_users.append({'name': 'default'}) elif isinstance(base_users, (dict)): - base_users['default'] = base_users.get('default', True) + base_users['default'] = dict(base_users).get('default', True) elif isinstance(base_users, (str, basestring)): # Just append it on to be re-parsed later base_users += ",default" diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 1a8e927b..1f2848d2 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -142,7 +142,10 @@ class Distro(distros.Distro): # This ensures that the correct tz will be used for the system util.copy(tz_file, self.tz_local_fn) - def package_command(self, command, args=None, pkgs=[]): + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + e = os.environ.copy() # See: http://tiny.cc/kg91fw # Or: http://tiny.cc/mh91fw diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index 2f91e386..9fee5fd1 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -208,7 +208,10 @@ class Distro(distros.Distro): # This ensures that the correct tz will be used for the system util.copy(tz_file, self.tz_local_fn) - def package_command(self, command, args=None, pkgs=[]): + def package_command(self, command, args=None, pkgs=None): + if pkgs is None: + pkgs = [] + cmd = ['yum'] # If enabled, then yum will be tolerant of errors on the command line # with regard to packages. diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 65fab117..95133236 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -19,9 +19,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from StringIO import StringIO - -import csv import os import pwd @@ -42,6 +39,7 @@ VALID_KEY_TYPES = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", "ecdsa-sha2-nistp384-cert-v01@openssh.com", "ecdsa-sha2-nistp521-cert-v01@openssh.com") + class AuthKeyLine(object): def __init__(self, source, keytype=None, base64=None, comment=None, options=None): @@ -141,14 +139,14 @@ class AuthKeyLineParser(object): ent = line.strip() try: (keytype, base64, comment) = parse_ssh_key(ent) - except TypeError as e: + except TypeError: (keyopts, remain) = self._extract_options(ent) if options is None: options = keyopts - + try: (keytype, base64, comment) = parse_ssh_key(remain) - except TypeError as e: + except TypeError: return AuthKeyLine(src_line) return AuthKeyLine(src_line, keytype=keytype, base64=base64, diff --git a/cloudinit/util.py b/cloudinit/util.py index d0a6f81c..afde2066 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1530,7 +1530,7 @@ def get_proc_env(pid): fn = os.path.join("/proc/", str(pid), "environ") try: contents = load_file(fn) - toks = contents.split("\0") + toks = contents.split("\x00") for tok in toks: if tok == "": continue diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index 87fc40ab..c9ae79f4 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -17,13 +17,13 @@ from cloudinit import version # General information about the project. project = 'Cloud-Init' -# -- General configuration ----------------------------------------------------- +# -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.intersphinx', ] @@ -55,7 +55,7 @@ exclude_patterns = [] # output. They are ignored by default. show_authors = False -# -- Options for HTML output --------------------------------------------------- +# -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 91a50e18..904677f1 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -183,6 +183,7 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): setattr(mod, f, trap_func) self.patched_funcs.append((mod, f, func)) + def populate_dir(path, files): os.makedirs(path) for (name, content) in files.iteritems(): diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 28e0a472..62fc5358 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -1,7 +1,7 @@ from cloudinit import helpers -from tests.unittests.helpers import populate_dir from cloudinit.sources import DataSourceNoCloud from cloudinit import util +from tests.unittests.helpers import populate_dir from mocker import MockerTestCase import os diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 74c254e0..325244f2 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -1,7 +1,6 @@ from mocker import MockerTestCase from cloudinit import cloud -from cloudinit import helpers from cloudinit import util from cloudinit.config import cc_growpart @@ -9,9 +8,7 @@ from cloudinit.config import cc_growpart import errno import logging import os -import mocker import re -import stat # growpart: # mode: auto # off, on, auto, 'growpart', 'parted' @@ -85,6 +82,7 @@ growpart disk partition Resize partition 1 on /dev/sda """ + class TestDisabled(MockerTestCase): def setUp(self): super(TestDisabled, self).setUp() @@ -106,6 +104,7 @@ class TestDisabled(MockerTestCase): self.handle(self.name, config, self.cloud_init, self.log, self.args) + class TestConfig(MockerTestCase): def setUp(self): super(TestConfig, self).setUp() @@ -125,9 +124,9 @@ class TestConfig(MockerTestCase): def test_no_resizers_auto_is_fine(self): subp = self.mocker.replace(util.subp, passthrough=False) subp(['parted', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_PARTED_NO_RESIZE,"")) + self.mocker.result((HELP_PARTED_NO_RESIZE, "")) subp(['growpart', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_GROWPART_NO_RESIZE,"")) + self.mocker.result((HELP_GROWPART_NO_RESIZE, "")) self.mocker.replay() config = {'growpart': {'mode': 'auto'}} @@ -136,7 +135,7 @@ class TestConfig(MockerTestCase): def test_no_resizers_mode_growpart_is_exception(self): subp = self.mocker.replace(util.subp, passthrough=False) subp(['growpart', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_GROWPART_NO_RESIZE,"")) + self.mocker.result((HELP_GROWPART_NO_RESIZE, "")) self.mocker.replay() config = {'growpart': {'mode': "growpart"}} @@ -146,7 +145,7 @@ class TestConfig(MockerTestCase): def test_mode_auto_prefers_parted(self): subp = self.mocker.replace(util.subp, passthrough=False) subp(['parted', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_PARTED_RESIZE,"")) + self.mocker.result((HELP_PARTED_RESIZE, "")) self.mocker.replay() ret = cc_growpart.resizer_factory(mode="auto") @@ -173,7 +172,7 @@ class TestConfig(MockerTestCase): self.handle(self.name, {}, self.cloud_init, self.log, self.args) finally: cc_growpart.RESIZERS = orig_resizers - + class TestResize(MockerTestCase): def setUp(self): @@ -196,7 +195,7 @@ class TestResize(MockerTestCase): real_stat = os.stat resize_calls = [] - class myresizer(): + class myresizer(object): def resize(self, diskdev, partnum, partdev): resize_calls.append((diskdev, partnum, partdev)) if partdev == "/dev/YYda2": @@ -224,7 +223,7 @@ class TestResize(MockerTestCase): if f[0] == name: return f return None - + self.assertEqual(cc_growpart.RESIZE.NOCHANGE, find("/dev/XXda1", resized)[1]) self.assertEqual(cc_growpart.RESIZE.CHANGED, @@ -244,7 +243,8 @@ def simple_device_part_info(devpath): ret = re.search("([^0-9]*)([0-9]*)$", devpath) x = (ret.group(1), ret.group(2)) return x - + + class Bunch: def __init__(self, **kwds): self.__dict__.update(kwds) diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 2415d06f..d8662cac 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -1,5 +1,5 @@ -from unittest import TestCase from cloudinit import ssh_util +from unittest import TestCase VALID_CONTENT = { @@ -34,6 +34,7 @@ TEST_OPTIONS = ("no-port-forwarding,no-agent-forwarding,no-X11-forwarding," 'command="echo \'Please login as the user \"ubuntu\" rather than the' 'user \"root\".\';echo;sleep 10"') + class TestAuthKeyLineParser(TestCase): def test_simple_parse(self): # test key line with common 3 fields (keytype, base64, comment) @@ -61,7 +62,7 @@ class TestAuthKeyLineParser(TestCase): self.assertFalse(key.options) self.assertFalse(key.comment) self.assertEqual(key.keytype, ktype) - + def test_parse_with_keyoptions(self): # test key line with options in it parser = ssh_util.AuthKeyLineParser() -- cgit v1.2.3 From 6586b35f348ba089bba00e6bebb4ca1b14f41a19 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 7 Mar 2013 15:30:38 -0500 Subject: allow customization of apt-get command, add --force-unsafe-io This allows the customization of the apt-get command used for installing packages, and also adds '--force-unsafe-io'. Because this is spawned from cloud-init, it seems to make sense as a first boot package installation option. --- ChangeLog | 1 + cloudinit/distros/debian.py | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/ChangeLog b/ChangeLog index 5ff305a1..d035a7a3 100644 --- a/ChangeLog +++ b/ChangeLog @@ -47,6 +47,7 @@ - upstart: cloud-init-nonet.conf trap the TERM signal, so that dmesg or other output does not get a 'killed by TERM signal' message. - support resizing partitions via growpart or parted (LP: #1136936) + - allow specifying apt-get command in distro config ('apt_get_command') 0.7.1: - sysvinit: fix missing dependency in cloud-init job for RHEL 5.6 diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 1f2848d2..4b779d57 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -33,6 +33,10 @@ from cloudinit.settings import PER_INSTANCE LOG = logging.getLogger(__name__) +APT_GET_COMMAND = ('apt-get', '--option=Dpkg::Options::=--force-confold', + '--option=Dpkg::options::=--force-unsafe-io', + '--assume-yes', '--quiet') + class Distro(distros.Distro): hostname_conf_fn = "/etc/hostname" @@ -150,8 +154,7 @@ class Distro(distros.Distro): # See: http://tiny.cc/kg91fw # Or: http://tiny.cc/mh91fw e['DEBIAN_FRONTEND'] = 'noninteractive' - cmd = ['apt-get', '--option', 'Dpkg::Options::=--force-confold', - '--assume-yes', '--quiet'] + cmd = list(self.get_option("apt_get_command", APT_GET_COMMAND)) if args and isinstance(args, str): cmd.append(args) -- cgit v1.2.3 From be8953bf9a27462adb5ce0c5ef6485f0cee47b48 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 7 Mar 2013 15:54:59 -0500 Subject: fix a pylint complaint in test_handler_growpart E1103: 81,44:TestWriteFile.test_basic_usage: Instance of 'Bunch' has no 'st_mode' member (but some types could not be inferred) so, if it wants st_mode, for now just give it one. --- tests/unittests/test_handler/test_handler_growpart.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 325244f2..5df93570 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -246,6 +246,7 @@ def simple_device_part_info(devpath): class Bunch: + st_mode = None # fix pylint complaint def __init__(self, **kwds): self.__dict__.update(kwds) -- cgit v1.2.3 From 8013c284e82349246b2274f5475c138323fd7c55 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 7 Mar 2013 16:00:35 -0500 Subject: pep8 --- tests/unittests/test_handler/test_handler_growpart.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index 5df93570..b1b872b0 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -247,6 +247,7 @@ def simple_device_part_info(devpath): class Bunch: st_mode = None # fix pylint complaint + def __init__(self, **kwds): self.__dict__.update(kwds) -- cgit v1.2.3