From f94b16cea578062c8abf3dd3a1ada234fa671cd3 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Thu, 15 Sep 2016 14:27:55 -0400 Subject: dmidecode: run dmidecode only on i?86 or x86_64 arch. Dmidecode is not going to run successfully on anything other than an x86 or x86_64, just avoid running it anywhere else. --- cloudinit/util.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index 7c37eb8f..6c5cf741 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2337,7 +2337,8 @@ def read_dmi_data(key): # running dmidecode can be problematic on some arches (LP: #1243287) uname_arch = os.uname()[4] - if uname_arch.startswith("arm") or uname_arch == "aarch64": + if not (uname_arch == "x86_64" or + (uname_arch.startswith("i") and uname_arch[2:] == "86")): LOG.debug("dmidata is not supported on %s", uname_arch) return None -- cgit v1.2.3 From 80f5ec4be0f781b26eca51d90d51abfab396b3f6 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 19 Sep 2016 11:48:54 -0400 Subject: Adjust mounts and disk configuration for systemd. The end result of all of these changes is to get mounts managed by cloud-init to occur only after cloud-init.service is done. We need to do that so that filesystems that are set up by cloud-init (in disk_setup) do not get mounted by stale entries in /etc/fstab before the setup occurs. This can occur in 2 ways: a.) new instance with old /etc/fstab b.) same instance where disk needs adjusting (Azure resize will re-format the ephemeral disk). The list of changes here is: - move mounts and disk_setup module to cloud-init.service rather than config. cloud-init.service runs earlier in boot so it can get those mount points done earlier. - on systemd add 'x-systemd.requires=cloud-init.service' to fstab options - cloud-init-local.service: add Before=basic.target - cloud-init.service: - extend After, Before, and Wants to multiple lines rather than one long line. - sort consistently with cloud-init-local.service - add DefaultDependencies=no - add Before=default.target - add Conflicts=shutdown.target LP: #1611074 --- cloudinit/config/cc_mounts.py | 4 ++-- cloudinit/sources/DataSourceAzure.py | 7 +++++-- config/cloud.cfg | 4 ++-- systemd/cloud-init-local.service | 3 ++- systemd/cloud-init.service | 16 +++++++++++++--- 5 files changed, 24 insertions(+), 10 deletions(-) diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 2b981935..4084118b 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -265,7 +265,7 @@ def handle(_name, cfg, cloud, log, _args): # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno def_mnt_opts = "defaults,nobootwait" if cloud.distro.uses_systemd(): - def_mnt_opts = "defaults,nofail" + def_mnt_opts = "defaults,nofail,x-systemd.requires=cloud-init.service" defvals = [None, None, "auto", def_mnt_opts, "0", "2"] defvals = cfg.get("mount_default_fields", defvals) @@ -401,5 +401,5 @@ def handle(_name, cfg, cloud, log, _args): try: util.subp(("mount", "-a")) - except Exception: + except util.ProcessExecutionError: util.logexc(log, "Activating mounts via 'mount -a' failed") diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index dbc2bb68..b802b03e 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -252,7 +252,7 @@ class DataSourceAzureNet(sources.DataSource): cc_modules_override = support_new_ephemeral(self.sys_cfg) if cc_modules_override: - self.cfg['cloud_config_modules'] = cc_modules_override + self.cfg['cloud_init_modules'] = cc_modules_override return True @@ -283,11 +283,14 @@ def find_fabric_formatted_ephemeral_part(): device_location = potential_location break if device_location is None: + LOG.debug("no azure resource disk partition path found") return None ntfs_devices = util.find_devs_with("TYPE=ntfs") real_device = os.path.realpath(device_location) if real_device in ntfs_devices: return device_location + LOG.debug("'%s' existed (%s) but was not ntfs formated", + device_location, real_device) return None @@ -342,7 +345,7 @@ def support_new_ephemeral(cfg): LOG.debug("cloud-init will format ephemeral0.1 this boot.") LOG.debug("setting disk_setup and mounts modules 'always' for this boot") - cc_modules = cfg.get('cloud_config_modules') + cc_modules = cfg.get('cloud_init_modules') if not cc_modules: return None diff --git a/config/cloud.cfg b/config/cloud.cfg index 7c94ec5c..3b4c5383 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -30,6 +30,8 @@ cloud_init_modules: - write-files - growpart - resizefs + - disk_setup + - mounts - set_hostname - update_hostname - update_etc_hosts @@ -43,8 +45,6 @@ cloud_config_modules: # Emit the cloud config ready event # this can be used by upstart jobs for 'start on cloud-config'. - emit_upstart - - disk_setup - - mounts - ntp - ssh-import-id - locale diff --git a/systemd/cloud-init-local.service b/systemd/cloud-init-local.service index b19eeaee..bc2db60e 100644 --- a/systemd/cloud-init-local.service +++ b/systemd/cloud-init-local.service @@ -4,9 +4,10 @@ DefaultDependencies=no Wants=local-fs.target Wants=network-pre.target After=local-fs.target -Conflicts=shutdown.target +Before=basic.target Before=network-pre.target Before=shutdown.target +Conflicts=shutdown.target [Service] Type=oneshot diff --git a/systemd/cloud-init.service b/systemd/cloud-init.service index 6fb655e6..1e392a39 100644 --- a/systemd/cloud-init.service +++ b/systemd/cloud-init.service @@ -1,9 +1,19 @@ [Unit] Description=Initial cloud-init job (metadata service crawler) -After=cloud-init-local.service networking.service -Before=network-online.target sshd.service sshd-keygen.service systemd-user-sessions.service +DefaultDependencies=no +Wants=cloud-init-local.service +Wants=local-fs.target +Wants=sshd-keygen.service +Wants=sshd.service +After=cloud-init-local.service +After=networking.service Requires=networking.service -Wants=local-fs.target cloud-init-local.service sshd.service sshd-keygen.service +Before=basic.target +Before=network-online.target +Before=sshd-keygen.service +Before=sshd.service +Before=systemd-user-sessions.service +Conflicts=shutdown.target [Service] Type=oneshot -- cgit v1.2.3 From 970dbd13f5ae40b0f95ea390b72d2b3426e8e4d9 Mon Sep 17 00:00:00 2001 From: LaMont Jones Date: Wed, 21 Sep 2016 10:31:40 -0400 Subject: net: support reading ipv6 dhcp config from initramfs This adds support for understanding 'dhcp6' as a protocol that can be written into /run/net-IFACE.cfg files by the initramfs. The end result is supporting ipv6 dhcp from initramfs boot all the way into iscsi root. LP: #1621615, #1621507 --- cloudinit/net/cmdline.py | 15 ++++++++++----- tests/unittests/test_net.py | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+), 5 deletions(-) diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 822a020b..933317d5 100644 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -66,7 +66,9 @@ def _klibc_to_config_entry(content, mac_addrs=None): provided here. There is no good documentation on this unfortunately. DEVICE= is expected/required and PROTO should indicate if - this is 'static' or 'dhcp'. + this is 'static' or 'dhcp' or 'dhcp6' (LP: #1621507). + note that IPV6PROTO is also written by newer code to address the + possibility of both ipv4 and ipv6 getting addresses. """ if mac_addrs is None: @@ -86,7 +88,7 @@ def _klibc_to_config_entry(content, mac_addrs=None): else: proto = 'static' - if proto not in ('static', 'dhcp'): + if proto not in ('static', 'dhcp', 'dhcp6'): raise ValueError("Unexpected value for PROTO: %s" % proto) iface = { @@ -98,12 +100,15 @@ def _klibc_to_config_entry(content, mac_addrs=None): if name in mac_addrs: iface['mac_address'] = mac_addrs[name] - # originally believed there might be IPV6* values - for v, pre in (('ipv4', 'IPV4'),): + # Handle both IPv4 and IPv6 values + for v, pre in (('ipv4', 'IPV4'), ('ipv6', 'IPV6')): # if no IPV4ADDR or IPV6ADDR, then go on. if pre + "ADDR" not in data: continue - subnet = {'type': proto, 'control': 'manual'} + + # PROTO for ipv4, IPV6PROTO for ipv6 + cur_proto = data.get(pre + 'PROTO', proto) + subnet = {'type': cur_proto, 'control': 'manual'} # these fields go right on the subnet for key in ('NETMASK', 'BROADCAST', 'GATEWAY'): diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 41b9a6d0..78c080ca 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -53,6 +53,45 @@ DHCP_EXPECTED_1 = { 'dns_nameservers': ['192.168.122.1']}], } +DHCP6_CONTENT_1 = """ +DEVICE=eno1 +HOSTNAME= +DNSDOMAIN= +reason='PREINIT' +interface='eno1' +DEVICE=eno1 +HOSTNAME= +DNSDOMAIN= +reason='FAIL' +interface='eno1' +DEVICE=eno1 +HOSTNAME= +DNSDOMAIN= +reason='PREINIT6' +interface='eno1' +DEVICE=eno1 +IPV6PROTO=dhcp6 +IPV6ADDR=2001:67c:1562:8010:0:1:: +IPV6NETMASK=64 +IPV6DNS0=2001:67c:1562:8010::2:1 +IPV6DOMAINSEARCH= +HOSTNAME= +DNSDOMAIN= +reason='BOUND6' +interface='eno1' +new_ip6_address='2001:67c:1562:8010:0:1::' +new_ip6_prefixlen='64' +new_dhcp6_name_servers='2001:67c:1562:8010::2:1' +""" + +DHCP6_EXPECTED_1 = { + 'name': 'eno1', + 'type': 'physical', + 'subnets': [{'control': 'manual', + 'dns_nameservers': ['2001:67c:1562:8010::2:1'], + 'netmask': '64', + 'type': 'dhcp6'}]} + STATIC_CONTENT_1 = """ DEVICE='eth1' @@ -590,6 +629,10 @@ class TestCmdlineConfigParsing(TestCase): found = cmdline._klibc_to_config_entry(DHCP_CONTENT_1) self.assertEqual(found, ('eth0', DHCP_EXPECTED_1)) + def test_cmdline_convert_dhcp6(self): + found = cmdline._klibc_to_config_entry(DHCP6_CONTENT_1) + self.assertEqual(found, ('eno1', DHCP6_EXPECTED_1)) + def test_cmdline_convert_static(self): found = cmdline._klibc_to_config_entry(STATIC_CONTENT_1) self.assertEqual(found, ('eth1', STATIC_EXPECTED_1)) -- cgit v1.2.3 From 40a400e42603aa1b80d9f623bc779799b370c091 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 21 Sep 2016 15:45:45 -0400 Subject: subp: add 'update_env' argument In order for a caller to use 'env' argument of subp, they will realistically do: env = os.environ.copy() env['FOO'] = 'BZR' subp(cmd, env=env) This shortens that to be: subp(cmd, update_env={'FOO': 'BZR'}) Add tests, and update growpart tests to use mock when playing with os.environ. --- cloudinit/util.py | 9 ++++++- .../test_handler/test_handler_growpart.py | 4 ++- tests/unittests/test_util.py | 30 ++++++++++++++++++++-- 3 files changed, 39 insertions(+), 4 deletions(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index 6c5cf741..05cb587c 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1762,7 +1762,7 @@ def delete_dir_contents(dirname): def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, - logstring=False, decode="replace", target=None): + logstring=False, decode="replace", target=None, update_env=None): # not supported in cloud-init (yet), for now kept in the call signature # to ease maintaining code shared between cloud-init and curtin @@ -1773,6 +1773,13 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, rcs = [0] devnull_fp = None + + if update_env: + if env is None: + env = os.environ + env = env.copy() + env.update(update_env) + try: if target_path(target) != "/": args = ['chroot', target] + list(args) diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index e653488a..e28067de 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -81,11 +81,11 @@ class TestConfig(TestCase): self.cloud = cloud.Cloud(None, self.paths, None, None, None) self.log = logging.getLogger("TestConfig") self.args = [] - os.environ = {} self.cloud_init = None self.handle = cc_growpart.handle + @mock.patch.dict("os.environ", clear=True) def test_no_resizers_auto_is_fine(self): with mock.patch.object( util, 'subp', @@ -98,6 +98,7 @@ class TestConfig(TestCase): mockobj.assert_called_once_with( ['growpart', '--help'], env={'LANG': 'C'}) + @mock.patch.dict("os.environ", clear=True) def test_no_resizers_mode_growpart_is_exception(self): with mock.patch.object( util, 'subp', @@ -110,6 +111,7 @@ class TestConfig(TestCase): mockobj.assert_called_once_with( ['growpart', '--help'], env={'LANG': 'C'}) + @mock.patch.dict("os.environ", clear=True) def test_mode_auto_prefers_growpart(self): with mock.patch.object( util, 'subp', diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index d2031f59..30f603cb 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -223,8 +223,10 @@ class TestKeyValStrings(helpers.TestCase): class TestGetCmdline(helpers.TestCase): def test_cmdline_reads_debug_env(self): - os.environ['DEBUG_PROC_CMDLINE'] = 'abcd 123' - self.assertEqual(os.environ['DEBUG_PROC_CMDLINE'], util.get_cmdline()) + with mock.patch.dict("os.environ", + values={'DEBUG_PROC_CMDLINE': 'abcd 123'}): + ret = util.get_cmdline() + self.assertEqual("abcd 123", ret) class TestLoadYaml(helpers.TestCase): @@ -516,6 +518,7 @@ class TestSubp(helpers.TestCase): utf8_invalid = b'ab\xaadef' utf8_valid = b'start \xc3\xa9 end' utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7' + printenv = ['bash', '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] def printf_cmd(self, *args): # bash's printf supports \xaa. So does /usr/bin/printf @@ -566,6 +569,29 @@ class TestSubp(helpers.TestCase): self.assertEqual(err, data) self.assertEqual(out, b'') + def test_subp_reads_env(self): + with mock.patch.dict("os.environ", values={'FOO': 'BAR'}): + out, err = util.subp(self.printenv + ['FOO'], capture=True) + self.assertEqual('FOO=BAR', out.splitlines()[0]) + + def test_subp_env_and_update_env(self): + out, err = util.subp( + self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, + env={'FOO': 'BAR'}, + update_env={'HOME': '/myhome', 'K2': 'V2'}) + self.assertEqual( + ['FOO=BAR', 'HOME=/myhome', 'K1=', 'K2=V2'], out.splitlines()) + + def test_subp_update_env(self): + extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'} + with mock.patch.dict("os.environ", values=extra): + out, err = util.subp( + self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, + update_env={'HOME': '/myhome', 'K2': 'V2'}) + + self.assertEqual( + ['FOO=BAR', 'HOME=/myhome', 'K1=V1', 'K2=V2'], out.splitlines()) + def test_returns_none_if_no_capture(self): (out, err) = util.subp(self.stdin2out, data=b'', capture=False) self.assertEqual(err, None) -- cgit v1.2.3 From 30d0adb71c9adadf437b2a1c69529ad9f44e75a8 Mon Sep 17 00:00:00 2001 From: Paul Meyer Date: Thu, 25 Aug 2016 16:43:46 -0700 Subject: Allow ephemeral drive to be unpartitioned If device has no partition table, the first line of output from `sgdisk -p ` will be "Creating new GPT entries.", instead of something like "Disk /dev/sdb: 266338304 sectors, 127.0 GiB". Also, protect against localized output by adjusting subp calls that parse sgdisk output to set LANG=C. --- cloudinit/config/cc_disk_setup.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index b642f1f8..39a23688 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -33,6 +33,8 @@ BLKID_CMD = util.which("blkid") BLKDEV_CMD = util.which("blockdev") WIPEFS_CMD = util.which("wipefs") +LANG_C_ENV = {'LANG': 'C'} + LOG = logging.getLogger(__name__) @@ -355,8 +357,11 @@ def get_mbr_hdd_size(device): def get_gpt_hdd_size(device): - out, _ = util.subp([SGDISK_CMD, '-p', device]) - return out.splitlines()[0].split()[2] + out, _ = util.subp([SGDISK_CMD, '-p', device], update_env=LANG_C_ENV) + for line in out.splitlines(): + if line.startswith("Disk"): + return line.split()[2] + raise Exception("Failed to get %s size from sgdisk" % (device)) def get_hdd_size(table_type, device): @@ -408,7 +413,7 @@ def check_partition_mbr_layout(device, layout): def check_partition_gpt_layout(device, layout): prt_cmd = [SGDISK_CMD, '-p', device] try: - out, _err = util.subp(prt_cmd) + out, _err = util.subp(prt_cmd, update_env=LANG_C_ENV) except Exception as e: raise Exception("Error running partition command on %s\n%s" % ( device, e)) -- cgit v1.2.3 From 1b71b474c0fc06e67aab8676268fd83d99091910 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Tue, 20 Sep 2016 14:13:25 -0700 Subject: systemd: Ensure that cloud-init-local happens before NetworkManager --- systemd/cloud-init-local.service | 1 + 1 file changed, 1 insertion(+) diff --git a/systemd/cloud-init-local.service b/systemd/cloud-init-local.service index bc2db60e..55834ba4 100644 --- a/systemd/cloud-init-local.service +++ b/systemd/cloud-init-local.service @@ -5,6 +5,7 @@ Wants=local-fs.target Wants=network-pre.target After=local-fs.target Before=basic.target +Before=NetworkManager.service Before=network-pre.target Before=shutdown.target Conflicts=shutdown.target -- cgit v1.2.3 From 0439d8a17d181a2546f2f7cb2d71a04bbb13b186 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Thu, 15 Sep 2016 12:05:15 -0400 Subject: Decode unicode types in decode_binary The test in decode_binary for six.text_type was incorrect as that includes unicode type in Python 2 which should actually be decoded. When the type is string_types we now properly check only for basestring and str in Python 2 and Python 3 respectively and return the given blob without making an attempt to decode. --- cloudinit/util.py | 2 +- tests/unittests/test_util.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/cloudinit/util.py b/cloudinit/util.py index 05cb587c..eb3e5899 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -154,7 +154,7 @@ def target_path(target, path=None): def decode_binary(blob, encoding='utf-8'): # Converts a binary type into a text type using given encoding. - if isinstance(blob, six.text_type): + if isinstance(blob, six.string_types): return blob return blob.decode(encoding) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 30f603cb..fc6b9d40 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -603,4 +603,12 @@ class TestSubp(helpers.TestCase): self.assertEqual("/target/my/path/", util.target_path("/target/", "///my/path/")) + +class TestEncode(helpers.TestCase): + """Test the encoding functions""" + def test_decode_binary_plain_text_with_hex(self): + blob = 'BOOTABLE_FLAG=\x80init=/bin/systemd' + text = util.decode_binary(blob) + self.assertEqual(text, blob) + # vi: ts=4 expandtab -- cgit v1.2.3 From 6d1edc3f5a18b328bdd307426056539d5b9071fd Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 28 Sep 2016 12:07:26 -0400 Subject: ntp: move to run after apt configuration since ntp module may try to install packages, it needs to run after apt is configured. LP: #1628337 --- config/cloud.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/cloud.cfg b/config/cloud.cfg index 3b4c5383..d608dc86 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -45,13 +45,13 @@ cloud_config_modules: # Emit the cloud config ready event # this can be used by upstart jobs for 'start on cloud-config'. - emit_upstart - - ntp - ssh-import-id - locale - set-passwords - grub-dpkg - apt-pipelining - apt-configure + - ntp - timezone - disable-ec2-metadata - runcmd -- cgit v1.2.3 From 9f83bb8e80806d3dd79ba426474dc3c696e19a41 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Fri, 19 Aug 2016 16:28:26 -0600 Subject: DigitalOcean: use meta-data for network configruation On DigitalOcean, Network information is provided via Meta-data. It changes the datasource to be a local datasource, meaning it will run before fallback networking is configured. The advantage of that is that before networking is configured it can bring up a network device with ipv4 link-local and hit the metadata service that lives at 169.254.169.254 to find its networking configuration. It then takes down the link local address and lets cloud-init configure networking. The configuring of a network device to go looking for a metadata service is gated by a check of data in the smbios. This guarantees that the code will not run on another system. --- cloudinit/sources/DataSourceDigitalOcean.py | 101 +++--- cloudinit/sources/helpers/digitalocean.py | 218 +++++++++++++ .../unittests/test_datasource/test_digitalocean.py | 338 +++++++++++++++++---- 3 files changed, 543 insertions(+), 114 deletions(-) create mode 100644 cloudinit/sources/helpers/digitalocean.py diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index fc596e17..c5770d5d 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -18,13 +18,12 @@ # DigitalOcean Droplet API: # https://developers.digitalocean.com/documentation/metadata/ -import json - from cloudinit import log as logging from cloudinit import sources -from cloudinit import url_helper from cloudinit import util +import cloudinit.sources.helpers.digitalocean as do_helper + LOG = logging.getLogger(__name__) BUILTIN_DS_CONFIG = { @@ -36,11 +35,13 @@ BUILTIN_DS_CONFIG = { MD_RETRIES = 30 MD_TIMEOUT = 2 MD_WAIT_RETRY = 2 +MD_USE_IPV4LL = True class DataSourceDigitalOcean(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.distro = distro self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}), @@ -48,80 +49,72 @@ class DataSourceDigitalOcean(sources.DataSource): self.metadata_address = self.ds_cfg['metadata_url'] self.retries = self.ds_cfg.get('retries', MD_RETRIES) self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) + self.use_ip4LL = self.ds_cfg.get('use_ip4LL', MD_USE_IPV4LL) self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) + self._network_config = None def _get_sysinfo(self): - # DigitalOcean embeds vendor ID and instance/droplet_id in the - # SMBIOS information - - LOG.debug("checking if instance is a DigitalOcean droplet") - - # Detect if we are on DigitalOcean and return the Droplet's ID - vendor_name = util.read_dmi_data("system-manufacturer") - if vendor_name != "DigitalOcean": - return (False, None) + return do_helper.read_sysinfo() - LOG.info("running on DigitalOcean") - - droplet_id = util.read_dmi_data("system-serial-number") - if droplet_id: - LOG.debug(("system identified via SMBIOS as DigitalOcean Droplet" - "{}").format(droplet_id)) - else: - LOG.critical(("system identified via SMBIOS as a DigitalOcean " - "Droplet, but did not provide an ID. Please file a " - "support ticket at: " - "https://cloud.digitalocean.com/support/tickets/" - "new")) - - return (True, droplet_id) - - def get_data(self, apply_filter=False): + def get_data(self): (is_do, droplet_id) = self._get_sysinfo() # only proceed if we know we are on DigitalOcean if not is_do: return False - LOG.debug("reading metadata from {}".format(self.metadata_address)) - response = url_helper.readurl(self.metadata_address, - timeout=self.timeout, - sec_between=self.wait_retry, - retries=self.retries) + LOG.info("Running on digital ocean. droplet_id=%s" % droplet_id) - contents = util.decode_binary(response.contents) - decoded = json.loads(contents) + ipv4LL_nic = None + if self.use_ip4LL: + ipv4LL_nic = do_helper.assign_ipv4_link_local() - self.metadata = decoded - self.metadata['instance-id'] = decoded.get('droplet_id', droplet_id) - self.metadata['local-hostname'] = decoded.get('hostname', droplet_id) - self.vendordata_raw = decoded.get("vendor_data", None) - self.userdata_raw = decoded.get("user_data", None) - return True + md = do_helper.read_metadata( + self.metadata_address, timeout=self.timeout, + sec_between=self.wait_retry, retries=self.retries) - def get_public_ssh_keys(self): - public_keys = self.metadata.get('public_keys', []) - if isinstance(public_keys, list): - return public_keys - else: - return [public_keys] + self.metadata_full = md + self.metadata['instance-id'] = md.get('droplet_id', droplet_id) + self.metadata['local-hostname'] = md.get('hostname', droplet_id) + self.metadata['interfaces'] = md.get('interfaces') + self.metadata['public-keys'] = md.get('public_keys') + self.metadata['availability_zone'] = md.get('region', 'default') + self.vendordata_raw = md.get("vendor_data", None) + self.userdata_raw = md.get("user_data", None) - @property - def availability_zone(self): - return self.metadata.get('region', 'default') + if ipv4LL_nic: + do_helper.del_ipv4_link_local(ipv4LL_nic) - @property - def launch_index(self): - return None + return True def check_instance_id(self, sys_cfg): return sources.instance_id_matches_system_uuid( self.get_instance_id(), 'system-serial-number') + @property + def network_config(self): + """Configure the networking. This needs to be done each boot, since + the IP information may have changed due to snapshot and/or + migration. + """ + + if self._network_config: + return self._network_config + + interfaces = self.metadata.get('interfaces') + LOG.debug(interfaces) + if not interfaces: + raise Exception("Unable to get meta-data from server....") + + nameservers = self.metadata_full['dns']['nameservers'] + self._network_config = do_helper.convert_network_configuration( + interfaces, nameservers) + return self._network_config + # Used to match classes to dependencies datasources = [ - (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, )), ] diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py new file mode 100644 index 00000000..b0a721c2 --- /dev/null +++ b/cloudinit/sources/helpers/digitalocean.py @@ -0,0 +1,218 @@ +# vi: ts=4 expandtab +# +# Author: Ben Howard + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import json +import random + +from cloudinit import log as logging +from cloudinit import net as cloudnet +from cloudinit import url_helper +from cloudinit import util + +NIC_MAP = {'public': 'eth0', 'private': 'eth1'} + +LOG = logging.getLogger(__name__) + + +def assign_ipv4_link_local(nic=None): + """Bring up NIC using an address using link-local (ip4LL) IPs. On + DigitalOcean, the link-local domain is per-droplet routed, so there + is no risk of collisions. However, to be more safe, the ip4LL + address is random. + """ + + if not nic: + for cdev in sorted(cloudnet.get_devicelist()): + if cloudnet.is_physical(cdev): + nic = cdev + LOG.debug("assigned nic '%s' for link-local discovery", nic) + break + + if not nic: + raise RuntimeError("unable to find interfaces to access the" + "meta-data server. This droplet is broken.") + + addr = "169.254.{0}.{1}/16".format(random.randint(1, 168), + random.randint(0, 255)) + + ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic] + ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up'] + + if not util.which('ip'): + raise RuntimeError("No 'ip' command available to configure ip4LL " + "address") + + try: + (result, _err) = util.subp(ip_addr_cmd) + LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic) + + (result, _err) = util.subp(ip_link_cmd) + LOG.debug("brought device '%s' up", nic) + except Exception: + util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed." + " Droplet networking will be broken", addr, nic) + raise + + return nic + + +def del_ipv4_link_local(nic=None): + """Remove the ip4LL address. While this is not necessary, the ip4LL + address is extraneous and confusing to users. + """ + if not nic: + LOG.debug("no link_local address interface defined, skipping link " + "local address cleanup") + return + + LOG.debug("cleaning up ipv4LL address") + + ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic] + + try: + (result, _err) = util.subp(ip_addr_cmd) + LOG.debug("removed ip4LL addresses from %s", nic) + + except Exception as e: + util.logexc(LOG, "failed to remove ip4LL address from '%s'.", nic, e) + + +def convert_network_configuration(config, dns_servers): + """Convert the DigitalOcean Network description into Cloud-init's netconfig + format. + + Example JSON: + {'public': [ + {'mac': '04:01:58:27:7f:01', + 'ipv4': {'gateway': '45.55.32.1', + 'netmask': '255.255.224.0', + 'ip_address': '45.55.50.93'}, + 'anchor_ipv4': { + 'gateway': '10.17.0.1', + 'netmask': '255.255.0.0', + 'ip_address': '10.17.0.9'}, + 'type': 'public', + 'ipv6': {'gateway': '....', + 'ip_address': '....', + 'cidr': 64}} + ], + 'private': [ + {'mac': '04:01:58:27:7f:02', + 'ipv4': {'gateway': '10.132.0.1', + 'netmask': '255.255.0.0', + 'ip_address': '10.132.75.35'}, + 'type': 'private'} + ] + } + """ + + def _get_subnet_part(pcfg, nameservers=None): + subpart = {'type': 'static', + 'control': 'auto', + 'address': pcfg.get('ip_address'), + 'gateway': pcfg.get('gateway')} + + if nameservers: + subpart['dns_nameservers'] = nameservers + + if ":" in pcfg.get('ip_address'): + subpart['address'] = "{0}/{1}".format(pcfg.get('ip_address'), + pcfg.get('cidr')) + else: + subpart['netmask'] = pcfg.get('netmask') + + return subpart + + all_nics = [] + for k in ('public', 'private'): + if k in config: + all_nics.extend(config[k]) + + macs_to_nics = cloudnet.get_interfaces_by_mac() + nic_configs = [] + + for nic in all_nics: + + mac_address = nic.get('mac') + sysfs_name = macs_to_nics.get(mac_address) + nic_type = nic.get('type', 'unknown') + # Note: the entry 'public' above contains a list, but + # the list will only ever have one nic inside it per digital ocean. + # If it ever had more than one nic, then this code would + # assign all 'public' the same name. + if_name = NIC_MAP.get(nic_type, sysfs_name) + + LOG.debug("mapped %s interface to %s, assigning name of %s", + mac_address, sysfs_name, if_name) + + ncfg = {'type': 'physical', + 'mac_address': mac_address, + 'name': if_name} + + subnets = [] + for netdef in ('ipv4', 'ipv6', 'anchor_ipv4', 'anchor_ipv6'): + raw_subnet = nic.get(netdef, None) + if not raw_subnet: + continue + + sub_part = _get_subnet_part(raw_subnet) + if nic_type == 'public' and 'anchor' not in netdef: + # add DNS resolvers to the public interfaces only + sub_part = _get_subnet_part(raw_subnet, dns_servers) + else: + # remove the gateway any non-public interfaces + if 'gateway' in sub_part: + del sub_part['gateway'] + + subnets.append(sub_part) + + ncfg['subnets'] = subnets + nic_configs.append(ncfg) + LOG.debug("nic '%s' configuration: %s", if_name, ncfg) + + return {'version': 1, 'config': nic_configs} + + +def read_metadata(url, timeout=2, sec_between=2, retries=30): + response = url_helper.readurl(url, timeout=timeout, + sec_between=sec_between, retries=retries) + if not response.ok(): + raise RuntimeError("unable to read metadata at %s" % url) + return json.loads(response.contents.decode()) + + +def read_sysinfo(): + # DigitalOcean embeds vendor ID and instance/droplet_id in the + # SMBIOS information + + # Detect if we are on DigitalOcean and return the Droplet's ID + vendor_name = util.read_dmi_data("system-manufacturer") + if vendor_name != "DigitalOcean": + return (False, None) + + droplet_id = util.read_dmi_data("system-serial-number") + if droplet_id: + LOG.debug("system identified via SMBIOS as DigitalOcean Droplet: %s", + droplet_id) + else: + msg = ("system identified via SMBIOS as a DigitalOcean " + "Droplet, but did not provide an ID. Please file a " + "support ticket at: " + "https://cloud.digitalocean.com/support/tickets/new") + LOG.critical(msg) + raise RuntimeError(msg) + + return (True, droplet_id) diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py index f5d2ef35..bdfe0ba2 100644 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ b/tests/unittests/test_datasource/test_digitalocean.py @@ -20,25 +20,123 @@ import json from cloudinit import helpers from cloudinit import settings from cloudinit.sources import DataSourceDigitalOcean +from cloudinit.sources.helpers import digitalocean -from .. import helpers as test_helpers -from ..helpers import HttprettyTestCase - -httpretty = test_helpers.import_httpretty() +from ..helpers import mock, TestCase DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"] DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co" -DO_META = { - 'user_data': 'user_data_here', - 'vendor_data': 'vendor_data_here', - 'public_keys': DO_SINGLE_KEY, - 'region': 'nyc3', - 'id': '2000000', - 'hostname': 'cloudinit-test', +# the following JSON was taken from droplet (that's why its a string) +DO_META = json.loads(""" +{ + "droplet_id": "22532410", + "hostname": "utl-96268", + "vendor_data": "vendordata goes here", + "user_data": "userdata goes here", + "public_keys": "", + "auth_key": "authorization_key", + "region": "nyc3", + "interfaces": { + "private": [ + { + "ipv4": { + "ip_address": "10.132.6.205", + "netmask": "255.255.0.0", + "gateway": "10.132.0.1" + }, + "mac": "04:01:57:d1:9e:02", + "type": "private" + } + ], + "public": [ + { + "ipv4": { + "ip_address": "192.0.0.20", + "netmask": "255.255.255.0", + "gateway": "104.236.0.1" + }, + "ipv6": { + "ip_address": "2604:A880:0800:0000:1000:0000:0000:0000", + "cidr": 64, + "gateway": "2604:A880:0800:0000:0000:0000:0000:0001" + }, + "anchor_ipv4": { + "ip_address": "10.0.0.5", + "netmask": "255.255.0.0", + "gateway": "10.0.0.1" + }, + "mac": "04:01:57:d1:9e:01", + "type": "public" + } + ] + }, + "floating_ip": { + "ipv4": { + "active": false + } + }, + "dns": { + "nameservers": [ + "2001:4860:4860::8844", + "2001:4860:4860::8888", + "8.8.8.8" + ] + } +} +""") + +# This has no private interface +DO_META_2 = { + "droplet_id": 27223699, + "hostname": "smtest1", + "vendor_data": "\n".join([ + ('"Content-Type: multipart/mixed; ' + 'boundary=\"===============8645434374073493512==\"'), + 'MIME-Version: 1.0', + '', + '--===============8645434374073493512==', + 'MIME-Version: 1.0' + 'Content-Type: text/cloud-config; charset="us-ascii"' + 'Content-Transfer-Encoding: 7bit' + 'Content-Disposition: attachment; filename="cloud-config"' + '', + '#cloud-config', + 'disable_root: false', + 'manage_etc_hosts: true', + '', + '', + '--===============8645434374073493512==' + ]), + "public_keys": [ + "ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies" + ], + "auth_key": "88888888888888888888888888888888", + "region": "nyc3", + "interfaces": { + "public": [{ + "ipv4": { + "ip_address": "45.55.249.133", + "netmask": "255.255.192.0", + "gateway": "45.55.192.1" + }, + "anchor_ipv4": { + "ip_address": "10.17.0.5", + "netmask": "255.255.0.0", + "gateway": "10.17.0.1" + }, + "mac": "ae:cc:08:7c:88:00", + "type": "public" + }] + }, + "floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}}, + "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, + "tags": None, } +DO_META['public_keys'] = DO_SINGLE_KEY + MD_URL = 'http://169.254.169.254/metadata/v1.json' @@ -46,69 +144,189 @@ def _mock_dmi(): return (True, DO_META.get('id')) -def _request_callback(method, uri, headers): - return (200, headers, json.dumps(DO_META)) - - -class TestDataSourceDigitalOcean(HttprettyTestCase): +class TestDataSourceDigitalOcean(TestCase): """ Test reading the meta-data """ - def setUp(self): - self.ds = DataSourceDigitalOcean.DataSourceDigitalOcean( - settings.CFG_BUILTIN, None, - helpers.Paths({})) - self.ds._get_sysinfo = _mock_dmi - super(TestDataSourceDigitalOcean, self).setUp() - - @httpretty.activate - def test_connection(self): - httpretty.register_uri( - httpretty.GET, MD_URL, - body=json.dumps(DO_META)) - - success = self.ds.get_data() - self.assertTrue(success) - - @httpretty.activate - def test_metadata(self): - httpretty.register_uri( - httpretty.GET, MD_URL, - body=_request_callback) - self.ds.get_data() + def get_ds(self, get_sysinfo=_mock_dmi): + ds = DataSourceDigitalOcean.DataSourceDigitalOcean( + settings.CFG_BUILTIN, None, helpers.Paths({})) + ds.use_ip4LL = False + if get_sysinfo is not None: + ds._get_sysinfo = get_sysinfo + return ds - self.assertEqual(DO_META.get('user_data'), - self.ds.get_userdata_raw()) + @mock.patch('cloudinit.sources.helpers.digitalocean.read_sysinfo') + def test_returns_false_not_on_docean(self, m_read_sysinfo): + m_read_sysinfo.return_value = (False, None) + ds = self.get_ds(get_sysinfo=None) + self.assertEqual(False, ds.get_data()) + m_read_sysinfo.assert_called() - self.assertEqual(DO_META.get('vendor_data'), - self.ds.get_vendordata_raw()) + @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') + def test_metadata(self, mock_readmd): + mock_readmd.return_value = DO_META.copy() - self.assertEqual(DO_META.get('region'), - self.ds.availability_zone) + ds = self.get_ds() + ret = ds.get_data() + self.assertTrue(ret) - self.assertEqual(DO_META.get('id'), - self.ds.get_instance_id()) + mock_readmd.assert_called() - self.assertEqual(DO_META.get('hostname'), - self.ds.get_hostname()) + self.assertEqual(DO_META.get('user_data'), ds.get_userdata_raw()) + self.assertEqual(DO_META.get('vendor_data'), ds.get_vendordata_raw()) + self.assertEqual(DO_META.get('region'), ds.availability_zone) + self.assertEqual(DO_META.get('droplet_id'), ds.get_instance_id()) + self.assertEqual(DO_META.get('hostname'), ds.get_hostname()) # Single key self.assertEqual([DO_META.get('public_keys')], - self.ds.get_public_ssh_keys()) + ds.get_public_ssh_keys()) - self.assertIsInstance(self.ds.get_public_ssh_keys(), list) + self.assertIsInstance(ds.get_public_ssh_keys(), list) - @httpretty.activate - def test_multiple_ssh_keys(self): - DO_META['public_keys'] = DO_MULTIPLE_KEYS - httpretty.register_uri( - httpretty.GET, MD_URL, - body=_request_callback) - self.ds.get_data() + @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') + def test_multiple_ssh_keys(self, mock_readmd): + metadata = DO_META.copy() + metadata['public_keys'] = DO_MULTIPLE_KEYS + mock_readmd.return_value = metadata.copy() + + ds = self.get_ds() + ret = ds.get_data() + self.assertTrue(ret) + + mock_readmd.assert_called() # Multiple keys - self.assertEqual(DO_META.get('public_keys'), - self.ds.get_public_ssh_keys()) + self.assertEqual(metadata['public_keys'], ds.get_public_ssh_keys()) + self.assertIsInstance(ds.get_public_ssh_keys(), list) + + +class TestNetworkConvert(TestCase): + + def _get_networking(self): + netcfg = digitalocean.convert_network_configuration( + DO_META['interfaces'], DO_META['dns']['nameservers']) + self.assertIn('config', netcfg) + return netcfg + + def test_networking_defined(self): + netcfg = self._get_networking() + self.assertIsNotNone(netcfg) + + for nic_def in netcfg.get('config'): + print(json.dumps(nic_def, indent=3)) + n_type = nic_def.get('type') + n_subnets = nic_def.get('type') + n_name = nic_def.get('name') + n_mac = nic_def.get('mac_address') + + self.assertIsNotNone(n_type) + self.assertIsNotNone(n_subnets) + self.assertIsNotNone(n_name) + self.assertIsNotNone(n_mac) + + def _get_nic_definition(self, int_type, expected_name): + """helper function to return if_type (i.e. public) and the expected + name used by cloud-init (i.e eth0)""" + netcfg = self._get_networking() + meta_def = (DO_META.get('interfaces')).get(int_type)[0] + + self.assertEqual(int_type, meta_def.get('type')) + + for nic_def in netcfg.get('config'): + print(nic_def) + if nic_def.get('name') == expected_name: + return nic_def, meta_def + + def _get_match_subn(self, subnets, ip_addr): + """get the matching subnet definition based on ip address""" + for subn in subnets: + address = subn.get('address') + self.assertIsNotNone(address) + + # equals won't work because of ipv6 addressing being in + # cidr notation, i.e fe00::1/64 + if ip_addr in address: + print(json.dumps(subn, indent=3)) + return subn + + def test_public_interface_defined(self): + """test that the public interface is defined as eth0""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + self.assertEqual('eth0', nic_def.get('name')) + self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) + self.assertEqual('physical', nic_def.get('type')) + + def test_private_interface_defined(self): + """test that the private interface is defined as eth1""" + (nic_def, meta_def) = self._get_nic_definition('private', 'eth1') + self.assertEqual('eth1', nic_def.get('name')) + self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) + self.assertEqual('physical', nic_def.get('type')) + + def _check_dns_nameservers(self, subn_def): + self.assertIn('dns_nameservers', subn_def) + expected_nameservers = DO_META['dns']['nameservers'] + nic_nameservers = subn_def.get('dns_nameservers') + self.assertEqual(expected_nameservers, nic_nameservers) + + def test_public_interface_ipv6(self): + """test public ipv6 addressing""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv6_def = meta_def.get('ipv6') + self.assertIsNotNone(ipv6_def) + + subn_def = self._get_match_subn(nic_def.get('subnets'), + ipv6_def.get('ip_address')) + + cidr_notated_address = "{0}/{1}".format(ipv6_def.get('ip_address'), + ipv6_def.get('cidr')) + + self.assertEqual(cidr_notated_address, subn_def.get('address')) + self.assertEqual(ipv6_def.get('gateway'), subn_def.get('gateway')) + self._check_dns_nameservers(subn_def) + + def test_public_interface_ipv4(self): + """test public ipv4 addressing""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv4_def = meta_def.get('ipv4') + self.assertIsNotNone(ipv4_def) + + subn_def = self._get_match_subn(nic_def.get('subnets'), + ipv4_def.get('ip_address')) + + self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) + self.assertEqual(ipv4_def.get('gateway'), subn_def.get('gateway')) + self._check_dns_nameservers(subn_def) + + def test_public_interface_anchor_ipv4(self): + """test public ipv4 addressing""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv4_def = meta_def.get('anchor_ipv4') + self.assertIsNotNone(ipv4_def) + + subn_def = self._get_match_subn(nic_def.get('subnets'), + ipv4_def.get('ip_address')) + + self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) + self.assertNotIn('gateway', subn_def) + + def test_convert_without_private(self): + netcfg = digitalocean.convert_network_configuration( + DO_META_2['interfaces'], DO_META_2['dns']['nameservers']) - self.assertIsInstance(self.ds.get_public_ssh_keys(), list) + byname = {} + for i in netcfg['config']: + if 'name' in i: + if i['name'] in byname: + raise ValueError("name '%s' in config twice: %s" % + (i['name'], netcfg)) + byname[i['name']] = i + self.assertTrue('eth0' in byname) + self.assertTrue('subnets' in byname['eth0']) + eth0 = byname['eth0'] + self.assertEqual( + sorted(['45.55.249.133', '10.17.0.5']), + sorted([i['address'] for i in eth0['subnets']])) -- cgit v1.2.3 From 02f6c4bb8cef17b3fe04ef4dc1ef199e20aeb4d9 Mon Sep 17 00:00:00 2001 From: Stéphane Graber Date: Thu, 29 Sep 2016 01:40:32 -0400 Subject: lxd: Update network config for LXD 2.3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prior to LXD 2.3, the bridge configuration was done through distro packaging. Thus, lxd module interacted with debconf. With 2.3 and higher, this is now done inside LXD itself, so we need to use "lxc network" there. For now, this perfectly matches what we had before with debconf and doesn't cover any of the new options. We can always add those later. A set of tests similar to what we had for debconf has been added to make sure things look good. This is tested in Yakkety container running LXD 2.3 and all options seem to be passed through as expected, giving me the bridge I defined. Signed-off-by: Stéphane Graber --- cloudinit/config/cc_lxd.py | 107 +++++++++++++++++++---- tests/unittests/test_handler/test_handler_lxd.py | 51 +++++++++++ 2 files changed, 140 insertions(+), 18 deletions(-) diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 0086840f..cead2c95 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -46,6 +46,7 @@ Example config: """ from cloudinit import util +import os distros = ['ubuntu'] @@ -105,25 +106,43 @@ def handle(name, cfg, cloud, log, args): # Set up lxd-bridge if bridge config is given dconf_comm = "debconf-communicate" - if bridge_cfg and util.which(dconf_comm): - debconf = bridge_to_debconf(bridge_cfg) + if bridge_cfg: + if os.path.exists("/etc/default/lxd-bridge") \ + and util.which(dconf_comm): + # Bridge configured through packaging + + debconf = bridge_to_debconf(bridge_cfg) + + # Update debconf database + try: + log.debug("Setting lxd debconf via " + dconf_comm) + data = "\n".join(["set %s %s" % (k, v) + for k, v in debconf.items()]) + "\n" + util.subp(['debconf-communicate'], data) + except Exception: + util.logexc(log, "Failed to run '%s' for lxd with" % + dconf_comm) + + # Remove the existing configuration file (forces re-generation) + util.del_file("/etc/default/lxd-bridge") + + # Run reconfigure + log.debug("Running dpkg-reconfigure for lxd") + util.subp(['dpkg-reconfigure', 'lxd', + '--frontend=noninteractive']) + else: + # Built-in LXD bridge support + cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg) + if cmd_create: + log.debug("Creating lxd bridge: %s" % + " ".join(cmd_create)) + util.subp(cmd_create) + + if cmd_attach: + log.debug("Setting up default lxd bridge: %s" % + " ".join(cmd_create)) + util.subp(cmd_attach) - # Update debconf database - try: - log.debug("Setting lxd debconf via " + dconf_comm) - data = "\n".join(["set %s %s" % (k, v) - for k, v in debconf.items()]) + "\n" - util.subp(['debconf-communicate'], data) - except Exception: - util.logexc(log, "Failed to run '%s' for lxd with" % dconf_comm) - - # Remove the existing configuration file (forces re-generation) - util.del_file("/etc/default/lxd-bridge") - - # Run reconfigure - log.debug("Running dpkg-reconfigure for lxd") - util.subp(['dpkg-reconfigure', 'lxd', - '--frontend=noninteractive']) elif bridge_cfg: raise RuntimeError( "Unable to configure lxd bridge without %s." + dconf_comm) @@ -177,3 +196,55 @@ def bridge_to_debconf(bridge_cfg): raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) return debconf + + +def bridge_to_cmd(bridge_cfg): + if bridge_cfg.get("mode") == "none": + return None, None + + bridge_name = bridge_cfg.get("name", "lxdbr0") + cmd_create = [] + cmd_attach = ["lxc", "network", "attach-profile", bridge_name, + "default", "eth0", "--force-local"] + + if bridge_cfg.get("mode") == "existing": + return None, cmd_attach + + if bridge_cfg.get("mode") != "new": + raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) + + cmd_create = ["lxc", "network", "create", bridge_name] + + if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"): + cmd_create.append("ipv4.address=%s/%s" % + (bridge_cfg.get("ipv4_address"), + bridge_cfg.get("ipv4_netmask"))) + + if bridge_cfg.get("ipv4_nat", "true") == "true": + cmd_create.append("ipv4.nat=true") + + if bridge_cfg.get("ipv4_dhcp_first") and \ + bridge_cfg.get("ipv4_dhcp_last"): + dhcp_range = "%s-%s" % (bridge_cfg.get("ipv4_dhcp_first"), + bridge_cfg.get("ipv4_dhcp_last")) + cmd_create.append("ipv4.dhcp.ranges=%s" % dhcp_range) + else: + cmd_create.append("ipv4.address=none") + + if bridge_cfg.get("ipv6_address") and bridge_cfg.get("ipv6_netmask"): + cmd_create.append("ipv6.address=%s/%s" % + (bridge_cfg.get("ipv6_address"), + bridge_cfg.get("ipv6_netmask"))) + + if bridge_cfg.get("ipv6_nat", "false") == "true": + cmd_create.append("ipv6.nat=true") + + else: + cmd_create.append("ipv6.address=none") + + if bridge_cfg.get("domain"): + cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain")) + + cmd_create.append("--force-local") + + return cmd_create, cmd_attach diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py index 6f90defb..14366a10 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/test_handler/test_handler_lxd.py @@ -132,3 +132,54 @@ class TestLxd(t_help.TestCase): cc_lxd.bridge_to_debconf(data), {"lxd/setup-bridge": "false", "lxd/bridge-name": ""}) + + def test_lxd_cmd_new_full(self): + data = {"mode": "new", + "name": "testbr0", + "ipv4_address": "10.0.8.1", + "ipv4_netmask": "24", + "ipv4_dhcp_first": "10.0.8.2", + "ipv4_dhcp_last": "10.0.8.254", + "ipv4_dhcp_leases": "250", + "ipv4_nat": "true", + "ipv6_address": "fd98:9e0:3744::1", + "ipv6_netmask": "64", + "ipv6_nat": "true", + "domain": "lxd"} + self.assertEqual( + cc_lxd.bridge_to_cmd(data), + (["lxc", "network", "create", "testbr0", + "ipv4.address=10.0.8.1/24", "ipv4.nat=true", + "ipv4.dhcp.ranges=10.0.8.2-10.0.8.254", + "ipv6.address=fd98:9e0:3744::1/64", + "ipv6.nat=true", "dns.domain=lxd", + "--force-local"], + ["lxc", "network", "attach-profile", + "testbr0", "default", "eth0", "--force-local"])) + + def test_lxd_cmd_new_partial(self): + data = {"mode": "new", + "ipv6_address": "fd98:9e0:3744::1", + "ipv6_netmask": "64", + "ipv6_nat": "true"} + self.assertEqual( + cc_lxd.bridge_to_cmd(data), + (["lxc", "network", "create", "lxdbr0", "ipv4.address=none", + "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true", + "--force-local"], + ["lxc", "network", "attach-profile", + "lxdbr0", "default", "eth0", "--force-local"])) + + def test_lxd_cmd_existing(self): + data = {"mode": "existing", + "name": "testbr0"} + self.assertEqual( + cc_lxd.bridge_to_cmd(data), + (None, ["lxc", "network", "attach-profile", + "testbr0", "default", "eth0", "--force-local"])) + + def test_lxd_cmd_none(self): + data = {"mode": "none"} + self.assertEqual( + cc_lxd.bridge_to_cmd(data), + (None, None)) -- cgit v1.2.3 From 1071b9940b4e114cd2eabf290b739f92fbab33de Mon Sep 17 00:00:00 2001 From: Wesley Wiedenmeier Date: Sun, 28 Aug 2016 17:56:17 -0500 Subject: Improve module documentation and doc cleanup. This adds lots of config module documentation in a standard format. It will greatly improve the content at readthedocs. Additionally: * Add a 'doc' env to tox.ini * Changed default highlight language for sphinx conf from python to yaml most examples in documentation are yaml configs * Updated datasource examples to highlight sh code properly --- cloudinit/config/cc_apt_configure.py | 207 ++++++++++++++ cloudinit/config/cc_apt_pipelining.py | 25 ++ cloudinit/config/cc_bootcmd.py | 28 ++ cloudinit/config/cc_byobu.py | 33 +++ cloudinit/config/cc_ca_certs.py | 32 +++ cloudinit/config/cc_chef.py | 12 +- cloudinit/config/cc_debug.py | 26 +- cloudinit/config/cc_disable_ec2_metadata.py | 20 ++ cloudinit/config/cc_disk_setup.py | 92 ++++++- cloudinit/config/cc_emit_upstart.py | 15 ++ cloudinit/config/cc_fan.py | 49 ++-- cloudinit/config/cc_final_message.py | 25 ++ cloudinit/config/cc_foo.py | 16 +- cloudinit/config/cc_growpart.py | 57 ++++ cloudinit/config/cc_grub_dpkg.py | 34 +++ cloudinit/config/cc_keys_to_console.py | 24 ++ cloudinit/config/cc_landscape.py | 49 ++++ cloudinit/config/cc_locale.py | 20 ++ cloudinit/config/cc_lxd.py | 66 +++-- cloudinit/config/cc_mcollective.py | 41 +++ cloudinit/config/cc_migrator.py | 22 ++ cloudinit/config/cc_mounts.py | 48 ++++ cloudinit/config/cc_ntp.py | 32 +++ .../config/cc_package_update_upgrade_install.py | 35 +++ cloudinit/config/cc_phone_home.py | 34 +++ cloudinit/config/cc_power_state_change.py | 45 ++++ cloudinit/config/cc_puppet.py | 45 ++++ cloudinit/config/cc_resizefs.py | 26 ++ cloudinit/config/cc_resolv_conf.py | 69 ++--- cloudinit/config/cc_rh_subscription.py | 34 +++ cloudinit/config/cc_rightscale_userdata.py | 26 ++ cloudinit/config/cc_rsyslog.py | 249 +++++++++++------ cloudinit/config/cc_runcmd.py | 32 +++ cloudinit/config/cc_salt_minion.py | 33 +++ cloudinit/config/cc_scripts_per_boot.py | 16 ++ cloudinit/config/cc_scripts_per_instance.py | 16 ++ cloudinit/config/cc_scripts_per_once.py | 16 ++ cloudinit/config/cc_scripts_user.py | 19 ++ cloudinit/config/cc_scripts_vendor.py | 22 ++ cloudinit/config/cc_seed_random.py | 52 ++++ cloudinit/config/cc_set_hostname.py | 26 ++ cloudinit/config/cc_set_passwords.py | 46 ++++ cloudinit/config/cc_snappy.py | 113 +++++--- cloudinit/config/cc_spacewalk.py | 25 +- cloudinit/config/cc_ssh.py | 87 ++++++ cloudinit/config/cc_ssh_authkey_fingerprints.py | 21 ++ cloudinit/config/cc_ssh_import_id.py | 24 ++ cloudinit/config/cc_timezone.py | 20 ++ cloudinit/config/cc_ubuntu_init_switch.py | 32 ++- cloudinit/config/cc_update_etc_hosts.py | 43 +++ cloudinit/config/cc_update_hostname.py | 25 ++ cloudinit/config/cc_users_groups.py | 80 ++++++ cloudinit/config/cc_write_files.py | 42 +++ cloudinit/config/cc_yum_add_repo.py | 26 ++ doc/examples/cloud-config-seed-random.txt | 2 +- doc/rtd/conf.py | 2 +- doc/rtd/topics/datasources.rst | 48 ++-- doc/rtd/topics/dir_layout.rst | 4 +- doc/rtd/topics/examples.rst | 52 ++-- doc/rtd/topics/format.rst | 12 +- doc/rtd/topics/modules.rst | 297 +-------------------- doc/rtd/topics/moreinfo.rst | 6 +- doc/sources/altcloud/README.rst | 8 +- doc/sources/configdrive/README.rst | 4 +- tox.ini | 6 + 65 files changed, 2212 insertions(+), 581 deletions(-) diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index fa9505a7..6145fcd2 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -18,6 +18,213 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +""" +Apt Configure +------------- +**Summary:** configure apt + +This module handles both configuration of apt options and adding source lists. +There are configuration options such as ``apt_get_wrapper`` and +``apt_get_command`` that control how cloud-init invokes apt-get. +These configuration options are handled on a per-distro basis, so consult +documentation for cloud-init's distro support for instructions on using +these config options. + +.. note:: + To ensure that apt configuration is valid yaml, any strings containing + special characters, especially ``:`` should be quoted. + +.. note:: + For more information about apt configuration, see the + ``Additional apt configuration`` example. + +**Preserve sources.list:** + +By default, cloud-init will generate a new sources list in +``/etc/apt/sources.list.d`` based on any changes specified in cloud config. +To disable this behavior and preserve the sources list from the pristine image, +set ``preserve_sources_list`` to ``true``. + +.. note:: + The ``preserve_sources_list`` option overrides all other config keys that + would alter ``sources.list`` or ``sources.list.d``, **except** for + additional sources to be added to ``sources.list.d``. + +**Disable source suites:** + +Entries in the sources list can be disabled using ``disable_suites``, which +takes a list of suites to be disabled. If the string ``$RELEASE`` is present in +a suite in the ``disable_suites`` list, it will be replaced with the release +name. If a suite specified in ``disable_suites`` is not present in +``sources.list`` it will be ignored. For convenience, several aliases are +provided for ``disable_suites``: + + - ``updates`` => ``$RELEASE-updates`` + - ``backports`` => ``$RELEASE-backports`` + - ``security`` => ``$RELEASE-security`` + - ``proposed`` => ``$RELEASE-proposed`` + - ``release`` => ``$RELEASE`` + +.. note:: + When a suite is disabled using ``disable_suites``, its entry in + ``sources.list`` is not deleted; it is just commented out. + +**Configure primary and security mirrors:** + +The primary and security archive mirrors can be specified using the ``primary`` +and ``security`` keys, respectively. Both the ``primary`` and ``security`` keys +take a list of configs, allowing mirrors to be specified on a per-architecture +basis. Each config is a dictionary which must have an entry for ``arches``, +specifying which architectures that config entry is for. The keyword +``default`` applies to any architecture not explicitly listed. The mirror url +can be specified with the ``url`` key, or a list of mirrors to check can be +provided in order, with the first mirror that can be resolved being selected. +This allows the same configuration to be used in different environment, with +different hosts used for a local apt mirror. If no mirror is provided by uri or +search, ``search_dns`` may be used to search for dns names in the format +``-mirror`` in each of the following: + + - fqdn of this host per cloud metadata + - localdomain + - domains listed in ``/etc/resolv.conf`` + +If there is a dns entry for ``-mirror``, then it is assumed that there +is a distro mirror at ``http://-mirror./``. If the +``primary`` key is defined, but not the ``security`` key, then then +configuration for ``primary`` is also used for ``security``. If ``search_dns`` +is used for the ``security`` key, the search pattern will be. +``-security-mirror``. + +If no mirrors are specified, or all lookups fail, then default mirrors defined +in the datasource are used. If none are present in the datasource either the +following defaults are used: + + - primary: ``http://archive.ubuntu.com/ubuntu`` + - security: ``http://security.ubuntu.com/ubuntu`` + +**Specify sources.list template:** + +A custom template for rendering ``sources.list`` can be specefied with +``sources_list``. If no ``sources_list`` template is given, cloud-init will +use sane default. Within this template, the following strings will be replaced +with the appropriate values: + + - ``$MIRROR`` + - ``$RELEASE`` + - ``$PRIMARY`` + - ``$SECURITY`` + +**Pass configuration to apt:** + +Apt configuration can be specified using ``conf``. Configuration is specified +as a string. For multiline apt configuration, make sure to follow yaml syntax. + +**Configure apt proxy:** + +Proxy configuration for apt can be specified using ``conf``, but proxy config +keys also exist for convenience. The proxy config keys, ``http_proxy``, +``ftp_proxy``, and ``https_proxy`` may be used to specify a proxy for http, ftp +and https protocols respectively. The ``proxy`` key also exists as an alias for +``http_proxy``. Proxy url is specified in the format +``://[[user][:pass]@]host[:port]/``. + +**Add apt repos by regex:** + +All source entries in ``apt-sources`` that match regex in +``add_apt_repo_match`` will be added to the system using +``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults +to ``^[\w-]+:\w`` + +**Add source list entries:** + +Source list entries can be specified as a dictionary under the ``sources`` +config key, with key in the dict representing a different source file. The key +The key of each source entry will be used as an id that can be referenced in +other config entries, as well as the filename for the source's configuration +under ``/etc/apt/sources.list.d``. If the name does not end with ``.list``, +it will be appended. If there is no configuration for a key in ``sources``, no +file will be written, but the key may still be referred to as an id in other +``sources`` entries. + +Each entry under ``sources`` is a dictionary which may contain any of the +following optional keys: + + - ``source``: a sources.list entry (some variable replacements apply) + - ``keyid``: a key to import via shortid or fingerprint + - ``key``: a raw PGP key + - ``keyserver``: alternate keyserver to pull ``keyid`` key from + +The ``source`` key supports variable replacements for the following strings: + + - ``$MIRROR`` + - ``$PRIMARY`` + - ``$SECURITY`` + - ``$RELEASE`` + +**Internal name:** ``cc_apt_configure`` + +**Module frequency:** per instance + +**Supported distros:** ubuntu, debian + +**Config keys**:: + + apt: + preserve_sources_list: + disable_suites: + - $RELEASE-updates + - backports + - $RELEASE + - mysuite + primary: + - arches: + - amd64 + - i386 + - default + uri: "http://us.archive.ubuntu.com/ubuntu" + search: + - "http://cool.but-sometimes-unreachable.com/ubuntu" + - "http://us.archive.ubuntu.com/ubuntu" + search_dns: + - arches: + - s390x + - arm64 + uri: "http://archive-to-use-for-arm64.example.com/ubuntu" + security: + - arches: + - default + search_dns: true + sources_list: | + deb $MIRROR $RELEASE main restricted + deb-src $MIRROR $RELEASE main restricted + deb $PRIMARY $RELEASE universe restricted + deb $SECURITY $RELEASE-security multiverse + conf: | + APT { + Get { + Assume-Yes "true"; + Fix-Broken "true"; + } + } + proxy: "http://[[user][:pass]@]host[:port]/" + http_proxy: "http://[[user][:pass]@]host[:port]/" + ftp_proxy: "ftp://[[user][:pass]@]host[:port]/" + https_proxy: "https://[[user][:pass]@]host[:port]/" + sources: + source1: + keyid: "keyid" + keyserver: "keyserverurl" + source: "deb http:/// xenial main" + source2: + source "ppa:" + source3: + source "deb $MIRROR $RELEASE multiverse" + key: | + ------BEGIN PGP PUBLIC KEY BLOCK------- + + ------END PGP PUBLIC KEY BLOCK------- +""" + import glob import os import re diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index 40c32c84..ab9d0054 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -16,6 +16,31 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +""" +Apt Pipelining +-------------- +**Summary:** configure apt pipelining + +This module configures apt's ``Acquite::http::Pipeline-Depth`` option, whcih +controls how apt handles HTTP pipelining. It may be useful for pipelining to be +disabled, because some web servers, such as S3 do not pipeline properly (LP: +#948461). The ``apt_pipelining`` config key may be set to ``false`` to disable +pipelining altogether. This is the default behavior. If it is set to ``none``, +``unchanged``, or ``os``, no change will be made to apt configuration and the +default setting for the distro will be used. The pipeline depth can also be +manually specified by setting ``apt_pipelining`` to a number. However, this is +not recommended. + +**Internal name:** ``cc_apt_pipelining`` + +**Module frequency:** per instance + +**Supported distros:** ubuntu, debian + +**Config keys**:: + apt_pipelining: +""" + from cloudinit.settings import PER_INSTANCE from cloudinit import util diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py index b763a3c3..22b23f28 100644 --- a/cloudinit/config/cc_bootcmd.py +++ b/cloudinit/config/cc_bootcmd.py @@ -18,6 +18,34 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +""" +Bootcmd +------- +**Summary:** run commands early in boot process + +This module runs arbitrary commands very early in the boot process, +only slightly after a boothook would run. This is very similar to a +boothook, but more user friendly. The environment variable ``INSTANCE_ID`` +will be set to the current instance id for all run commands. Commands can be +specified either as lists or strings. For invocation details, see ``runcmd``. + +.. note:: + bootcmd should only be used for things that could not be done later in the + boot process. + +**Internal name:** ``cc_bootcmd`` + +**Module frequency:** per always + +**Supported distros:** all + +**Config keys**:: + + bootcmd: + - echo 192.168.1.130 us.archive.ubuntu.com > /etc/hosts + - [ cloud-nit-per, once, mymkfs, mkfs, /dev/vdb ] +""" + import os from cloudinit.settings import PER_ALWAYS diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py index ef0ce7ab..1f00dd90 100644 --- a/cloudinit/config/cc_byobu.py +++ b/cloudinit/config/cc_byobu.py @@ -18,6 +18,39 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +""" +Byobu +----- +**Summary:** enable/disable byobu system wide and for default user + +This module controls whether byobu is enabled or disabled system wide and for +the default system user. If byobu is to be enabled, this module will ensure it +is installed. Likewise, if it is to be disabled, it will be removed if +installed. + +Valid configuration options for this module are: + + - ``enable-system``: enable byobu system wide + - ``enable-user``: enable byobu for the default user + - ``disable-system``: disable byobu system wide + - ``disable-user``: disable byobu for the default user + - ``enable``: enable byobu both system wide and for default user + - ``disable``: disable byobu for all users + - ``user``: alias for ``enable-user`` + - ``system``: alias for ``enable-system`` + +**Internal name:** ``cc_byobu`` + +**Module frequency:** per instance + +**Supported distros:** ubuntu, debian + +**Config keys**:: + + byobu_by_default: +""" + + # Ensure this is aliased to a name not 'distros' # since the module attribute 'distros' # is a list of distros that are supported, not a sub-module diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index 8248b020..53d14060 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -14,6 +14,38 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +""" +CA Certs +-------- +**Summary:** add ca certificates + +This module adds CA certificates to ``/etc/ca-certificates.conf`` and updates +the ssl cert cache using ``update-ca-certificates``. The default certificates +can be removed from the system with the configuration option +``remove-defaults``. + +.. note:: + certificates must be specified using valid yaml. in order to specify a + multiline certificate, the yaml multiline list syntax must be used + +**Internal name:** ``cc_ca_certs`` + +**Module frequency:** per instance + +**Supporte distros:** ubuntu, debian + +**Config keys**:: + + ca-certs: + remove-defaults: + trusted: + - + - | + -----BEGIN CERTIFICATE----- + YOUR-ORGS-TRUSTED-CA-CERT-HERE + -----END CERTIFICATE----- +""" + import os from cloudinit import util diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index 4c28be6a..922fb6af 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -19,9 +19,11 @@ # along with this program. If not, see . """ +Chef +---- **Summary:** module that configures, starts and installs chef. -**Description:** This module enables chef to be installed (from packages or +This module enables chef to be installed (from packages or from gems, or from omnibus). Before this occurs chef configurations are written to disk (validation.pem, client.pem, firstboot.json, client.rb), and needed chef folders/directories are created (/etc/chef and /var/log/chef @@ -33,7 +35,13 @@ chef will have forked into its own process) then a post run function can run that can do finishing activities (such as removing the validation pem file). -It can be configured with the following option structure:: +**Internal name:** ``cc_chef`` + +**Module frequency:** per always + +**Supported distros:** all + +**Config keys**:: chef: directories: (defaulting to /etc/chef, /var/log/chef, /var/lib/chef, diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index bdc32fe6..5ab36469 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -15,22 +15,28 @@ # along with this program. If not, see . """ +Debug +----- **Summary:** helper to debug cloud-init *internal* datastructures. -**Description:** This module will enable for outputting various internal -information that cloud-init sources provide to either a file or to the output -console/log location that this cloud-init has been configured with when -running. +This module will enable for outputting various internal information that +cloud-init sources provide to either a file or to the output console/log +location that this cloud-init has been configured with when running. -It can be configured with the following option structure:: +.. note:: + Log configurations are not output. - debug: - verbose: (defaulting to true) - output: (location to write output, defaulting to console + log) +**Internal name:** ``cc_debug`` -.. note:: +**Module frequency:** per instance - Log configurations are not output. +**Supported distros:** all + +**Config keys**:: + + debug: + verbose: true/false (defaulting to true) + output: (location to write output, defaulting to console + log) """ import copy diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py index 3fd2c20f..5c54e6f4 100644 --- a/cloudinit/config/cc_disable_ec2_metadata.py +++ b/cloudinit/config/cc_disable_ec2_metadata.py @@ -18,6 +18,26 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +""" +Disable EC2 Metadata +-------------------- +**Summary:** disable aws ec2 metadata + +This module can disable the ec2 datasource by rejecting the route to +``169.254.169.254``, the usual route to the datasource. This module is disabled +by default. + +**Internal name:** ``cc_disable_ec2_metadata`` + +**Module frequency:** per always + +**Supported distros:** all + +**Config keys**:: + + disable_ec2_metadata: +""" + from cloudinit import util from cloudinit.settings import PER_ALWAYS diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index 39a23688..efa7a226 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -16,6 +16,96 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . + +""" +Disk Setup +---------- +**Summary:** configure partitions and filesystems + +This module is able to configure simple partition tables and filesystems. + +.. note:: + for more detail about configuration options for disk setup, see the disk + setup example + +For convenience, aliases can be specified for disks using the +``device_aliases`` config key, which takes a dictionary of alias: path +mappings. There are automatic aliases for ``swap`` and ``ephemeral``, where +``swap`` will always refer to the active swap partition and ``ephemeral`` +will refer to the block device of the ephemeral image. + +Disk partitioning is done using the ``disk_setup`` directive. This config +directive accepts a dictionary where each key is either a path to a block +device or an alias specified in ``device_aliases``, and each value is the +configuration options for the device. The ``table_type`` option specifies the +partition table type, either ``mbr`` or ``gpt``. The ``layout`` option +specifies how partitions on the device are to be arranged. If ``layout`` is set +to ``true``, a single partition using all the space on the device will be +created. If set to ``false``, no partitions will be created. Partitions can be +specified by providing a list to ``layout``, where each entry in the list is +either a size or a list containing a size and the numerical value for a +partition type. The size for partitions is specified in **percentage** of disk +space, not in bytes (e.g. a size of 33 would take up 1/3 of the disk space). +The ``overwrite`` option controls whether this module tries to be safe about +writing partition talbes or not. If ``overwrite: false`` is set, the device +will be checked for a partition table and for a file system and if either is +found, the operation will be skipped. If ``overwrite: true`` is set, no checks +will be performed. + +.. note:: + Using ``overwrite: true`` is dangerous and can lead to data loss, so double + check that the correct device has been specified if using this option. + +File system configuration is done using the ``fs_setup`` directive. This config +directive accepts a list of filesystem configs. The device to create the +filesystem on may be specified either as a path or as an alias in the format +``.`` where ```` denotes the partition number on the device. +The partition can also be specified by setting ``partition`` to the desired +partition number. The ``partition`` option may also be set to ``auto``, in +which this module will search for the existance of a filesystem matching the +``label``, ``type`` and ``device`` of the ``fs_setup`` entry and will skip +creating the filesystem if one is found. The ``partition`` option may also be +set to ``any``, in which case any file system that matches ``type`` and +``device`` will cause this module to skip filesystem creation for the +``fs_setup`` entry, regardless of ``label`` matching or not. To write a +filesystem directly to a device, use ``partition: none``. A label can be +specified for the filesystem using ``label``, and the filesystem type can be +specified using ``filesystem``. + +.. note:: + If specifying device using the ``.`` format, + the value of ``partition`` will be overwritten. + +.. note:: + Using ``overwrite: true`` for filesystems is dangerous and can lead to data + loss, so double check the entry in ``fs_setup``. + +**Internal name:** ``cc_disk_setup`` + +**Module frequency:** per instance + +**Supported distros:** all + +**Config keys**:: + + device_aliases: + : + disk_setup: + : + table_type: <'mbr'/'gpt'> + layout: + - [33,82] + - 66 + overwrite: + fs_setup: + - label: