From 970dbd13f5ae40b0f95ea390b72d2b3426e8e4d9 Mon Sep 17 00:00:00 2001 From: LaMont Jones Date: Wed, 21 Sep 2016 10:31:40 -0400 Subject: net: support reading ipv6 dhcp config from initramfs This adds support for understanding 'dhcp6' as a protocol that can be written into /run/net-IFACE.cfg files by the initramfs. The end result is supporting ipv6 dhcp from initramfs boot all the way into iscsi root. LP: #1621615, #1621507 --- tests/unittests/test_net.py | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) (limited to 'tests') diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 41b9a6d0..78c080ca 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -53,6 +53,45 @@ DHCP_EXPECTED_1 = { 'dns_nameservers': ['192.168.122.1']}], } +DHCP6_CONTENT_1 = """ +DEVICE=eno1 +HOSTNAME= +DNSDOMAIN= +reason='PREINIT' +interface='eno1' +DEVICE=eno1 +HOSTNAME= +DNSDOMAIN= +reason='FAIL' +interface='eno1' +DEVICE=eno1 +HOSTNAME= +DNSDOMAIN= +reason='PREINIT6' +interface='eno1' +DEVICE=eno1 +IPV6PROTO=dhcp6 +IPV6ADDR=2001:67c:1562:8010:0:1:: +IPV6NETMASK=64 +IPV6DNS0=2001:67c:1562:8010::2:1 +IPV6DOMAINSEARCH= +HOSTNAME= +DNSDOMAIN= +reason='BOUND6' +interface='eno1' +new_ip6_address='2001:67c:1562:8010:0:1::' +new_ip6_prefixlen='64' +new_dhcp6_name_servers='2001:67c:1562:8010::2:1' +""" + +DHCP6_EXPECTED_1 = { + 'name': 'eno1', + 'type': 'physical', + 'subnets': [{'control': 'manual', + 'dns_nameservers': ['2001:67c:1562:8010::2:1'], + 'netmask': '64', + 'type': 'dhcp6'}]} + STATIC_CONTENT_1 = """ DEVICE='eth1' @@ -590,6 +629,10 @@ class TestCmdlineConfigParsing(TestCase): found = cmdline._klibc_to_config_entry(DHCP_CONTENT_1) self.assertEqual(found, ('eth0', DHCP_EXPECTED_1)) + def test_cmdline_convert_dhcp6(self): + found = cmdline._klibc_to_config_entry(DHCP6_CONTENT_1) + self.assertEqual(found, ('eno1', DHCP6_EXPECTED_1)) + def test_cmdline_convert_static(self): found = cmdline._klibc_to_config_entry(STATIC_CONTENT_1) self.assertEqual(found, ('eth1', STATIC_EXPECTED_1)) -- cgit v1.2.3 From 40a400e42603aa1b80d9f623bc779799b370c091 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 21 Sep 2016 15:45:45 -0400 Subject: subp: add 'update_env' argument In order for a caller to use 'env' argument of subp, they will realistically do: env = os.environ.copy() env['FOO'] = 'BZR' subp(cmd, env=env) This shortens that to be: subp(cmd, update_env={'FOO': 'BZR'}) Add tests, and update growpart tests to use mock when playing with os.environ. --- cloudinit/util.py | 9 ++++++- .../test_handler/test_handler_growpart.py | 4 ++- tests/unittests/test_util.py | 30 ++++++++++++++++++++-- 3 files changed, 39 insertions(+), 4 deletions(-) (limited to 'tests') diff --git a/cloudinit/util.py b/cloudinit/util.py index 6c5cf741..05cb587c 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1762,7 +1762,7 @@ def delete_dir_contents(dirname): def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, - logstring=False, decode="replace", target=None): + logstring=False, decode="replace", target=None, update_env=None): # not supported in cloud-init (yet), for now kept in the call signature # to ease maintaining code shared between cloud-init and curtin @@ -1773,6 +1773,13 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, rcs = [0] devnull_fp = None + + if update_env: + if env is None: + env = os.environ + env = env.copy() + env.update(update_env) + try: if target_path(target) != "/": args = ['chroot', target] + list(args) diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index e653488a..e28067de 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -81,11 +81,11 @@ class TestConfig(TestCase): self.cloud = cloud.Cloud(None, self.paths, None, None, None) self.log = logging.getLogger("TestConfig") self.args = [] - os.environ = {} self.cloud_init = None self.handle = cc_growpart.handle + @mock.patch.dict("os.environ", clear=True) def test_no_resizers_auto_is_fine(self): with mock.patch.object( util, 'subp', @@ -98,6 +98,7 @@ class TestConfig(TestCase): mockobj.assert_called_once_with( ['growpart', '--help'], env={'LANG': 'C'}) + @mock.patch.dict("os.environ", clear=True) def test_no_resizers_mode_growpart_is_exception(self): with mock.patch.object( util, 'subp', @@ -110,6 +111,7 @@ class TestConfig(TestCase): mockobj.assert_called_once_with( ['growpart', '--help'], env={'LANG': 'C'}) + @mock.patch.dict("os.environ", clear=True) def test_mode_auto_prefers_growpart(self): with mock.patch.object( util, 'subp', diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index d2031f59..30f603cb 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -223,8 +223,10 @@ class TestKeyValStrings(helpers.TestCase): class TestGetCmdline(helpers.TestCase): def test_cmdline_reads_debug_env(self): - os.environ['DEBUG_PROC_CMDLINE'] = 'abcd 123' - self.assertEqual(os.environ['DEBUG_PROC_CMDLINE'], util.get_cmdline()) + with mock.patch.dict("os.environ", + values={'DEBUG_PROC_CMDLINE': 'abcd 123'}): + ret = util.get_cmdline() + self.assertEqual("abcd 123", ret) class TestLoadYaml(helpers.TestCase): @@ -516,6 +518,7 @@ class TestSubp(helpers.TestCase): utf8_invalid = b'ab\xaadef' utf8_valid = b'start \xc3\xa9 end' utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7' + printenv = ['bash', '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] def printf_cmd(self, *args): # bash's printf supports \xaa. So does /usr/bin/printf @@ -566,6 +569,29 @@ class TestSubp(helpers.TestCase): self.assertEqual(err, data) self.assertEqual(out, b'') + def test_subp_reads_env(self): + with mock.patch.dict("os.environ", values={'FOO': 'BAR'}): + out, err = util.subp(self.printenv + ['FOO'], capture=True) + self.assertEqual('FOO=BAR', out.splitlines()[0]) + + def test_subp_env_and_update_env(self): + out, err = util.subp( + self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, + env={'FOO': 'BAR'}, + update_env={'HOME': '/myhome', 'K2': 'V2'}) + self.assertEqual( + ['FOO=BAR', 'HOME=/myhome', 'K1=', 'K2=V2'], out.splitlines()) + + def test_subp_update_env(self): + extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'} + with mock.patch.dict("os.environ", values=extra): + out, err = util.subp( + self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, + update_env={'HOME': '/myhome', 'K2': 'V2'}) + + self.assertEqual( + ['FOO=BAR', 'HOME=/myhome', 'K1=V1', 'K2=V2'], out.splitlines()) + def test_returns_none_if_no_capture(self): (out, err) = util.subp(self.stdin2out, data=b'', capture=False) self.assertEqual(err, None) -- cgit v1.2.3 From 0439d8a17d181a2546f2f7cb2d71a04bbb13b186 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Thu, 15 Sep 2016 12:05:15 -0400 Subject: Decode unicode types in decode_binary The test in decode_binary for six.text_type was incorrect as that includes unicode type in Python 2 which should actually be decoded. When the type is string_types we now properly check only for basestring and str in Python 2 and Python 3 respectively and return the given blob without making an attempt to decode. --- cloudinit/util.py | 2 +- tests/unittests/test_util.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/cloudinit/util.py b/cloudinit/util.py index 05cb587c..eb3e5899 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -154,7 +154,7 @@ def target_path(target, path=None): def decode_binary(blob, encoding='utf-8'): # Converts a binary type into a text type using given encoding. - if isinstance(blob, six.text_type): + if isinstance(blob, six.string_types): return blob return blob.decode(encoding) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 30f603cb..fc6b9d40 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -603,4 +603,12 @@ class TestSubp(helpers.TestCase): self.assertEqual("/target/my/path/", util.target_path("/target/", "///my/path/")) + +class TestEncode(helpers.TestCase): + """Test the encoding functions""" + def test_decode_binary_plain_text_with_hex(self): + blob = 'BOOTABLE_FLAG=\x80init=/bin/systemd' + text = util.decode_binary(blob) + self.assertEqual(text, blob) + # vi: ts=4 expandtab -- cgit v1.2.3 From 9f83bb8e80806d3dd79ba426474dc3c696e19a41 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Fri, 19 Aug 2016 16:28:26 -0600 Subject: DigitalOcean: use meta-data for network configruation On DigitalOcean, Network information is provided via Meta-data. It changes the datasource to be a local datasource, meaning it will run before fallback networking is configured. The advantage of that is that before networking is configured it can bring up a network device with ipv4 link-local and hit the metadata service that lives at 169.254.169.254 to find its networking configuration. It then takes down the link local address and lets cloud-init configure networking. The configuring of a network device to go looking for a metadata service is gated by a check of data in the smbios. This guarantees that the code will not run on another system. --- cloudinit/sources/DataSourceDigitalOcean.py | 101 +++--- cloudinit/sources/helpers/digitalocean.py | 218 +++++++++++++ .../unittests/test_datasource/test_digitalocean.py | 338 +++++++++++++++++---- 3 files changed, 543 insertions(+), 114 deletions(-) create mode 100644 cloudinit/sources/helpers/digitalocean.py (limited to 'tests') diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index fc596e17..c5770d5d 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -18,13 +18,12 @@ # DigitalOcean Droplet API: # https://developers.digitalocean.com/documentation/metadata/ -import json - from cloudinit import log as logging from cloudinit import sources -from cloudinit import url_helper from cloudinit import util +import cloudinit.sources.helpers.digitalocean as do_helper + LOG = logging.getLogger(__name__) BUILTIN_DS_CONFIG = { @@ -36,11 +35,13 @@ BUILTIN_DS_CONFIG = { MD_RETRIES = 30 MD_TIMEOUT = 2 MD_WAIT_RETRY = 2 +MD_USE_IPV4LL = True class DataSourceDigitalOcean(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.distro = distro self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}), @@ -48,80 +49,72 @@ class DataSourceDigitalOcean(sources.DataSource): self.metadata_address = self.ds_cfg['metadata_url'] self.retries = self.ds_cfg.get('retries', MD_RETRIES) self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) + self.use_ip4LL = self.ds_cfg.get('use_ip4LL', MD_USE_IPV4LL) self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) + self._network_config = None def _get_sysinfo(self): - # DigitalOcean embeds vendor ID and instance/droplet_id in the - # SMBIOS information - - LOG.debug("checking if instance is a DigitalOcean droplet") - - # Detect if we are on DigitalOcean and return the Droplet's ID - vendor_name = util.read_dmi_data("system-manufacturer") - if vendor_name != "DigitalOcean": - return (False, None) + return do_helper.read_sysinfo() - LOG.info("running on DigitalOcean") - - droplet_id = util.read_dmi_data("system-serial-number") - if droplet_id: - LOG.debug(("system identified via SMBIOS as DigitalOcean Droplet" - "{}").format(droplet_id)) - else: - LOG.critical(("system identified via SMBIOS as a DigitalOcean " - "Droplet, but did not provide an ID. Please file a " - "support ticket at: " - "https://cloud.digitalocean.com/support/tickets/" - "new")) - - return (True, droplet_id) - - def get_data(self, apply_filter=False): + def get_data(self): (is_do, droplet_id) = self._get_sysinfo() # only proceed if we know we are on DigitalOcean if not is_do: return False - LOG.debug("reading metadata from {}".format(self.metadata_address)) - response = url_helper.readurl(self.metadata_address, - timeout=self.timeout, - sec_between=self.wait_retry, - retries=self.retries) + LOG.info("Running on digital ocean. droplet_id=%s" % droplet_id) - contents = util.decode_binary(response.contents) - decoded = json.loads(contents) + ipv4LL_nic = None + if self.use_ip4LL: + ipv4LL_nic = do_helper.assign_ipv4_link_local() - self.metadata = decoded - self.metadata['instance-id'] = decoded.get('droplet_id', droplet_id) - self.metadata['local-hostname'] = decoded.get('hostname', droplet_id) - self.vendordata_raw = decoded.get("vendor_data", None) - self.userdata_raw = decoded.get("user_data", None) - return True + md = do_helper.read_metadata( + self.metadata_address, timeout=self.timeout, + sec_between=self.wait_retry, retries=self.retries) - def get_public_ssh_keys(self): - public_keys = self.metadata.get('public_keys', []) - if isinstance(public_keys, list): - return public_keys - else: - return [public_keys] + self.metadata_full = md + self.metadata['instance-id'] = md.get('droplet_id', droplet_id) + self.metadata['local-hostname'] = md.get('hostname', droplet_id) + self.metadata['interfaces'] = md.get('interfaces') + self.metadata['public-keys'] = md.get('public_keys') + self.metadata['availability_zone'] = md.get('region', 'default') + self.vendordata_raw = md.get("vendor_data", None) + self.userdata_raw = md.get("user_data", None) - @property - def availability_zone(self): - return self.metadata.get('region', 'default') + if ipv4LL_nic: + do_helper.del_ipv4_link_local(ipv4LL_nic) - @property - def launch_index(self): - return None + return True def check_instance_id(self, sys_cfg): return sources.instance_id_matches_system_uuid( self.get_instance_id(), 'system-serial-number') + @property + def network_config(self): + """Configure the networking. This needs to be done each boot, since + the IP information may have changed due to snapshot and/or + migration. + """ + + if self._network_config: + return self._network_config + + interfaces = self.metadata.get('interfaces') + LOG.debug(interfaces) + if not interfaces: + raise Exception("Unable to get meta-data from server....") + + nameservers = self.metadata_full['dns']['nameservers'] + self._network_config = do_helper.convert_network_configuration( + interfaces, nameservers) + return self._network_config + # Used to match classes to dependencies datasources = [ - (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, )), ] diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py new file mode 100644 index 00000000..b0a721c2 --- /dev/null +++ b/cloudinit/sources/helpers/digitalocean.py @@ -0,0 +1,218 @@ +# vi: ts=4 expandtab +# +# Author: Ben Howard + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import json +import random + +from cloudinit import log as logging +from cloudinit import net as cloudnet +from cloudinit import url_helper +from cloudinit import util + +NIC_MAP = {'public': 'eth0', 'private': 'eth1'} + +LOG = logging.getLogger(__name__) + + +def assign_ipv4_link_local(nic=None): + """Bring up NIC using an address using link-local (ip4LL) IPs. On + DigitalOcean, the link-local domain is per-droplet routed, so there + is no risk of collisions. However, to be more safe, the ip4LL + address is random. + """ + + if not nic: + for cdev in sorted(cloudnet.get_devicelist()): + if cloudnet.is_physical(cdev): + nic = cdev + LOG.debug("assigned nic '%s' for link-local discovery", nic) + break + + if not nic: + raise RuntimeError("unable to find interfaces to access the" + "meta-data server. This droplet is broken.") + + addr = "169.254.{0}.{1}/16".format(random.randint(1, 168), + random.randint(0, 255)) + + ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic] + ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up'] + + if not util.which('ip'): + raise RuntimeError("No 'ip' command available to configure ip4LL " + "address") + + try: + (result, _err) = util.subp(ip_addr_cmd) + LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic) + + (result, _err) = util.subp(ip_link_cmd) + LOG.debug("brought device '%s' up", nic) + except Exception: + util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed." + " Droplet networking will be broken", addr, nic) + raise + + return nic + + +def del_ipv4_link_local(nic=None): + """Remove the ip4LL address. While this is not necessary, the ip4LL + address is extraneous and confusing to users. + """ + if not nic: + LOG.debug("no link_local address interface defined, skipping link " + "local address cleanup") + return + + LOG.debug("cleaning up ipv4LL address") + + ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic] + + try: + (result, _err) = util.subp(ip_addr_cmd) + LOG.debug("removed ip4LL addresses from %s", nic) + + except Exception as e: + util.logexc(LOG, "failed to remove ip4LL address from '%s'.", nic, e) + + +def convert_network_configuration(config, dns_servers): + """Convert the DigitalOcean Network description into Cloud-init's netconfig + format. + + Example JSON: + {'public': [ + {'mac': '04:01:58:27:7f:01', + 'ipv4': {'gateway': '45.55.32.1', + 'netmask': '255.255.224.0', + 'ip_address': '45.55.50.93'}, + 'anchor_ipv4': { + 'gateway': '10.17.0.1', + 'netmask': '255.255.0.0', + 'ip_address': '10.17.0.9'}, + 'type': 'public', + 'ipv6': {'gateway': '....', + 'ip_address': '....', + 'cidr': 64}} + ], + 'private': [ + {'mac': '04:01:58:27:7f:02', + 'ipv4': {'gateway': '10.132.0.1', + 'netmask': '255.255.0.0', + 'ip_address': '10.132.75.35'}, + 'type': 'private'} + ] + } + """ + + def _get_subnet_part(pcfg, nameservers=None): + subpart = {'type': 'static', + 'control': 'auto', + 'address': pcfg.get('ip_address'), + 'gateway': pcfg.get('gateway')} + + if nameservers: + subpart['dns_nameservers'] = nameservers + + if ":" in pcfg.get('ip_address'): + subpart['address'] = "{0}/{1}".format(pcfg.get('ip_address'), + pcfg.get('cidr')) + else: + subpart['netmask'] = pcfg.get('netmask') + + return subpart + + all_nics = [] + for k in ('public', 'private'): + if k in config: + all_nics.extend(config[k]) + + macs_to_nics = cloudnet.get_interfaces_by_mac() + nic_configs = [] + + for nic in all_nics: + + mac_address = nic.get('mac') + sysfs_name = macs_to_nics.get(mac_address) + nic_type = nic.get('type', 'unknown') + # Note: the entry 'public' above contains a list, but + # the list will only ever have one nic inside it per digital ocean. + # If it ever had more than one nic, then this code would + # assign all 'public' the same name. + if_name = NIC_MAP.get(nic_type, sysfs_name) + + LOG.debug("mapped %s interface to %s, assigning name of %s", + mac_address, sysfs_name, if_name) + + ncfg = {'type': 'physical', + 'mac_address': mac_address, + 'name': if_name} + + subnets = [] + for netdef in ('ipv4', 'ipv6', 'anchor_ipv4', 'anchor_ipv6'): + raw_subnet = nic.get(netdef, None) + if not raw_subnet: + continue + + sub_part = _get_subnet_part(raw_subnet) + if nic_type == 'public' and 'anchor' not in netdef: + # add DNS resolvers to the public interfaces only + sub_part = _get_subnet_part(raw_subnet, dns_servers) + else: + # remove the gateway any non-public interfaces + if 'gateway' in sub_part: + del sub_part['gateway'] + + subnets.append(sub_part) + + ncfg['subnets'] = subnets + nic_configs.append(ncfg) + LOG.debug("nic '%s' configuration: %s", if_name, ncfg) + + return {'version': 1, 'config': nic_configs} + + +def read_metadata(url, timeout=2, sec_between=2, retries=30): + response = url_helper.readurl(url, timeout=timeout, + sec_between=sec_between, retries=retries) + if not response.ok(): + raise RuntimeError("unable to read metadata at %s" % url) + return json.loads(response.contents.decode()) + + +def read_sysinfo(): + # DigitalOcean embeds vendor ID and instance/droplet_id in the + # SMBIOS information + + # Detect if we are on DigitalOcean and return the Droplet's ID + vendor_name = util.read_dmi_data("system-manufacturer") + if vendor_name != "DigitalOcean": + return (False, None) + + droplet_id = util.read_dmi_data("system-serial-number") + if droplet_id: + LOG.debug("system identified via SMBIOS as DigitalOcean Droplet: %s", + droplet_id) + else: + msg = ("system identified via SMBIOS as a DigitalOcean " + "Droplet, but did not provide an ID. Please file a " + "support ticket at: " + "https://cloud.digitalocean.com/support/tickets/new") + LOG.critical(msg) + raise RuntimeError(msg) + + return (True, droplet_id) diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py index f5d2ef35..bdfe0ba2 100644 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ b/tests/unittests/test_datasource/test_digitalocean.py @@ -20,25 +20,123 @@ import json from cloudinit import helpers from cloudinit import settings from cloudinit.sources import DataSourceDigitalOcean +from cloudinit.sources.helpers import digitalocean -from .. import helpers as test_helpers -from ..helpers import HttprettyTestCase - -httpretty = test_helpers.import_httpretty() +from ..helpers import mock, TestCase DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"] DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co" -DO_META = { - 'user_data': 'user_data_here', - 'vendor_data': 'vendor_data_here', - 'public_keys': DO_SINGLE_KEY, - 'region': 'nyc3', - 'id': '2000000', - 'hostname': 'cloudinit-test', +# the following JSON was taken from droplet (that's why its a string) +DO_META = json.loads(""" +{ + "droplet_id": "22532410", + "hostname": "utl-96268", + "vendor_data": "vendordata goes here", + "user_data": "userdata goes here", + "public_keys": "", + "auth_key": "authorization_key", + "region": "nyc3", + "interfaces": { + "private": [ + { + "ipv4": { + "ip_address": "10.132.6.205", + "netmask": "255.255.0.0", + "gateway": "10.132.0.1" + }, + "mac": "04:01:57:d1:9e:02", + "type": "private" + } + ], + "public": [ + { + "ipv4": { + "ip_address": "192.0.0.20", + "netmask": "255.255.255.0", + "gateway": "104.236.0.1" + }, + "ipv6": { + "ip_address": "2604:A880:0800:0000:1000:0000:0000:0000", + "cidr": 64, + "gateway": "2604:A880:0800:0000:0000:0000:0000:0001" + }, + "anchor_ipv4": { + "ip_address": "10.0.0.5", + "netmask": "255.255.0.0", + "gateway": "10.0.0.1" + }, + "mac": "04:01:57:d1:9e:01", + "type": "public" + } + ] + }, + "floating_ip": { + "ipv4": { + "active": false + } + }, + "dns": { + "nameservers": [ + "2001:4860:4860::8844", + "2001:4860:4860::8888", + "8.8.8.8" + ] + } +} +""") + +# This has no private interface +DO_META_2 = { + "droplet_id": 27223699, + "hostname": "smtest1", + "vendor_data": "\n".join([ + ('"Content-Type: multipart/mixed; ' + 'boundary=\"===============8645434374073493512==\"'), + 'MIME-Version: 1.0', + '', + '--===============8645434374073493512==', + 'MIME-Version: 1.0' + 'Content-Type: text/cloud-config; charset="us-ascii"' + 'Content-Transfer-Encoding: 7bit' + 'Content-Disposition: attachment; filename="cloud-config"' + '', + '#cloud-config', + 'disable_root: false', + 'manage_etc_hosts: true', + '', + '', + '--===============8645434374073493512==' + ]), + "public_keys": [ + "ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies" + ], + "auth_key": "88888888888888888888888888888888", + "region": "nyc3", + "interfaces": { + "public": [{ + "ipv4": { + "ip_address": "45.55.249.133", + "netmask": "255.255.192.0", + "gateway": "45.55.192.1" + }, + "anchor_ipv4": { + "ip_address": "10.17.0.5", + "netmask": "255.255.0.0", + "gateway": "10.17.0.1" + }, + "mac": "ae:cc:08:7c:88:00", + "type": "public" + }] + }, + "floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}}, + "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, + "tags": None, } +DO_META['public_keys'] = DO_SINGLE_KEY + MD_URL = 'http://169.254.169.254/metadata/v1.json' @@ -46,69 +144,189 @@ def _mock_dmi(): return (True, DO_META.get('id')) -def _request_callback(method, uri, headers): - return (200, headers, json.dumps(DO_META)) - - -class TestDataSourceDigitalOcean(HttprettyTestCase): +class TestDataSourceDigitalOcean(TestCase): """ Test reading the meta-data """ - def setUp(self): - self.ds = DataSourceDigitalOcean.DataSourceDigitalOcean( - settings.CFG_BUILTIN, None, - helpers.Paths({})) - self.ds._get_sysinfo = _mock_dmi - super(TestDataSourceDigitalOcean, self).setUp() - - @httpretty.activate - def test_connection(self): - httpretty.register_uri( - httpretty.GET, MD_URL, - body=json.dumps(DO_META)) - - success = self.ds.get_data() - self.assertTrue(success) - - @httpretty.activate - def test_metadata(self): - httpretty.register_uri( - httpretty.GET, MD_URL, - body=_request_callback) - self.ds.get_data() + def get_ds(self, get_sysinfo=_mock_dmi): + ds = DataSourceDigitalOcean.DataSourceDigitalOcean( + settings.CFG_BUILTIN, None, helpers.Paths({})) + ds.use_ip4LL = False + if get_sysinfo is not None: + ds._get_sysinfo = get_sysinfo + return ds - self.assertEqual(DO_META.get('user_data'), - self.ds.get_userdata_raw()) + @mock.patch('cloudinit.sources.helpers.digitalocean.read_sysinfo') + def test_returns_false_not_on_docean(self, m_read_sysinfo): + m_read_sysinfo.return_value = (False, None) + ds = self.get_ds(get_sysinfo=None) + self.assertEqual(False, ds.get_data()) + m_read_sysinfo.assert_called() - self.assertEqual(DO_META.get('vendor_data'), - self.ds.get_vendordata_raw()) + @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') + def test_metadata(self, mock_readmd): + mock_readmd.return_value = DO_META.copy() - self.assertEqual(DO_META.get('region'), - self.ds.availability_zone) + ds = self.get_ds() + ret = ds.get_data() + self.assertTrue(ret) - self.assertEqual(DO_META.get('id'), - self.ds.get_instance_id()) + mock_readmd.assert_called() - self.assertEqual(DO_META.get('hostname'), - self.ds.get_hostname()) + self.assertEqual(DO_META.get('user_data'), ds.get_userdata_raw()) + self.assertEqual(DO_META.get('vendor_data'), ds.get_vendordata_raw()) + self.assertEqual(DO_META.get('region'), ds.availability_zone) + self.assertEqual(DO_META.get('droplet_id'), ds.get_instance_id()) + self.assertEqual(DO_META.get('hostname'), ds.get_hostname()) # Single key self.assertEqual([DO_META.get('public_keys')], - self.ds.get_public_ssh_keys()) + ds.get_public_ssh_keys()) - self.assertIsInstance(self.ds.get_public_ssh_keys(), list) + self.assertIsInstance(ds.get_public_ssh_keys(), list) - @httpretty.activate - def test_multiple_ssh_keys(self): - DO_META['public_keys'] = DO_MULTIPLE_KEYS - httpretty.register_uri( - httpretty.GET, MD_URL, - body=_request_callback) - self.ds.get_data() + @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') + def test_multiple_ssh_keys(self, mock_readmd): + metadata = DO_META.copy() + metadata['public_keys'] = DO_MULTIPLE_KEYS + mock_readmd.return_value = metadata.copy() + + ds = self.get_ds() + ret = ds.get_data() + self.assertTrue(ret) + + mock_readmd.assert_called() # Multiple keys - self.assertEqual(DO_META.get('public_keys'), - self.ds.get_public_ssh_keys()) + self.assertEqual(metadata['public_keys'], ds.get_public_ssh_keys()) + self.assertIsInstance(ds.get_public_ssh_keys(), list) + + +class TestNetworkConvert(TestCase): + + def _get_networking(self): + netcfg = digitalocean.convert_network_configuration( + DO_META['interfaces'], DO_META['dns']['nameservers']) + self.assertIn('config', netcfg) + return netcfg + + def test_networking_defined(self): + netcfg = self._get_networking() + self.assertIsNotNone(netcfg) + + for nic_def in netcfg.get('config'): + print(json.dumps(nic_def, indent=3)) + n_type = nic_def.get('type') + n_subnets = nic_def.get('type') + n_name = nic_def.get('name') + n_mac = nic_def.get('mac_address') + + self.assertIsNotNone(n_type) + self.assertIsNotNone(n_subnets) + self.assertIsNotNone(n_name) + self.assertIsNotNone(n_mac) + + def _get_nic_definition(self, int_type, expected_name): + """helper function to return if_type (i.e. public) and the expected + name used by cloud-init (i.e eth0)""" + netcfg = self._get_networking() + meta_def = (DO_META.get('interfaces')).get(int_type)[0] + + self.assertEqual(int_type, meta_def.get('type')) + + for nic_def in netcfg.get('config'): + print(nic_def) + if nic_def.get('name') == expected_name: + return nic_def, meta_def + + def _get_match_subn(self, subnets, ip_addr): + """get the matching subnet definition based on ip address""" + for subn in subnets: + address = subn.get('address') + self.assertIsNotNone(address) + + # equals won't work because of ipv6 addressing being in + # cidr notation, i.e fe00::1/64 + if ip_addr in address: + print(json.dumps(subn, indent=3)) + return subn + + def test_public_interface_defined(self): + """test that the public interface is defined as eth0""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + self.assertEqual('eth0', nic_def.get('name')) + self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) + self.assertEqual('physical', nic_def.get('type')) + + def test_private_interface_defined(self): + """test that the private interface is defined as eth1""" + (nic_def, meta_def) = self._get_nic_definition('private', 'eth1') + self.assertEqual('eth1', nic_def.get('name')) + self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) + self.assertEqual('physical', nic_def.get('type')) + + def _check_dns_nameservers(self, subn_def): + self.assertIn('dns_nameservers', subn_def) + expected_nameservers = DO_META['dns']['nameservers'] + nic_nameservers = subn_def.get('dns_nameservers') + self.assertEqual(expected_nameservers, nic_nameservers) + + def test_public_interface_ipv6(self): + """test public ipv6 addressing""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv6_def = meta_def.get('ipv6') + self.assertIsNotNone(ipv6_def) + + subn_def = self._get_match_subn(nic_def.get('subnets'), + ipv6_def.get('ip_address')) + + cidr_notated_address = "{0}/{1}".format(ipv6_def.get('ip_address'), + ipv6_def.get('cidr')) + + self.assertEqual(cidr_notated_address, subn_def.get('address')) + self.assertEqual(ipv6_def.get('gateway'), subn_def.get('gateway')) + self._check_dns_nameservers(subn_def) + + def test_public_interface_ipv4(self): + """test public ipv4 addressing""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv4_def = meta_def.get('ipv4') + self.assertIsNotNone(ipv4_def) + + subn_def = self._get_match_subn(nic_def.get('subnets'), + ipv4_def.get('ip_address')) + + self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) + self.assertEqual(ipv4_def.get('gateway'), subn_def.get('gateway')) + self._check_dns_nameservers(subn_def) + + def test_public_interface_anchor_ipv4(self): + """test public ipv4 addressing""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv4_def = meta_def.get('anchor_ipv4') + self.assertIsNotNone(ipv4_def) + + subn_def = self._get_match_subn(nic_def.get('subnets'), + ipv4_def.get('ip_address')) + + self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) + self.assertNotIn('gateway', subn_def) + + def test_convert_without_private(self): + netcfg = digitalocean.convert_network_configuration( + DO_META_2['interfaces'], DO_META_2['dns']['nameservers']) - self.assertIsInstance(self.ds.get_public_ssh_keys(), list) + byname = {} + for i in netcfg['config']: + if 'name' in i: + if i['name'] in byname: + raise ValueError("name '%s' in config twice: %s" % + (i['name'], netcfg)) + byname[i['name']] = i + self.assertTrue('eth0' in byname) + self.assertTrue('subnets' in byname['eth0']) + eth0 = byname['eth0'] + self.assertEqual( + sorted(['45.55.249.133', '10.17.0.5']), + sorted([i['address'] for i in eth0['subnets']])) -- cgit v1.2.3 From 02f6c4bb8cef17b3fe04ef4dc1ef199e20aeb4d9 Mon Sep 17 00:00:00 2001 From: Stéphane Graber Date: Thu, 29 Sep 2016 01:40:32 -0400 Subject: lxd: Update network config for LXD 2.3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prior to LXD 2.3, the bridge configuration was done through distro packaging. Thus, lxd module interacted with debconf. With 2.3 and higher, this is now done inside LXD itself, so we need to use "lxc network" there. For now, this perfectly matches what we had before with debconf and doesn't cover any of the new options. We can always add those later. A set of tests similar to what we had for debconf has been added to make sure things look good. This is tested in Yakkety container running LXD 2.3 and all options seem to be passed through as expected, giving me the bridge I defined. Signed-off-by: Stéphane Graber --- cloudinit/config/cc_lxd.py | 107 +++++++++++++++++++---- tests/unittests/test_handler/test_handler_lxd.py | 51 +++++++++++ 2 files changed, 140 insertions(+), 18 deletions(-) (limited to 'tests') diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 0086840f..cead2c95 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -46,6 +46,7 @@ Example config: """ from cloudinit import util +import os distros = ['ubuntu'] @@ -105,25 +106,43 @@ def handle(name, cfg, cloud, log, args): # Set up lxd-bridge if bridge config is given dconf_comm = "debconf-communicate" - if bridge_cfg and util.which(dconf_comm): - debconf = bridge_to_debconf(bridge_cfg) + if bridge_cfg: + if os.path.exists("/etc/default/lxd-bridge") \ + and util.which(dconf_comm): + # Bridge configured through packaging + + debconf = bridge_to_debconf(bridge_cfg) + + # Update debconf database + try: + log.debug("Setting lxd debconf via " + dconf_comm) + data = "\n".join(["set %s %s" % (k, v) + for k, v in debconf.items()]) + "\n" + util.subp(['debconf-communicate'], data) + except Exception: + util.logexc(log, "Failed to run '%s' for lxd with" % + dconf_comm) + + # Remove the existing configuration file (forces re-generation) + util.del_file("/etc/default/lxd-bridge") + + # Run reconfigure + log.debug("Running dpkg-reconfigure for lxd") + util.subp(['dpkg-reconfigure', 'lxd', + '--frontend=noninteractive']) + else: + # Built-in LXD bridge support + cmd_create, cmd_attach = bridge_to_cmd(bridge_cfg) + if cmd_create: + log.debug("Creating lxd bridge: %s" % + " ".join(cmd_create)) + util.subp(cmd_create) + + if cmd_attach: + log.debug("Setting up default lxd bridge: %s" % + " ".join(cmd_create)) + util.subp(cmd_attach) - # Update debconf database - try: - log.debug("Setting lxd debconf via " + dconf_comm) - data = "\n".join(["set %s %s" % (k, v) - for k, v in debconf.items()]) + "\n" - util.subp(['debconf-communicate'], data) - except Exception: - util.logexc(log, "Failed to run '%s' for lxd with" % dconf_comm) - - # Remove the existing configuration file (forces re-generation) - util.del_file("/etc/default/lxd-bridge") - - # Run reconfigure - log.debug("Running dpkg-reconfigure for lxd") - util.subp(['dpkg-reconfigure', 'lxd', - '--frontend=noninteractive']) elif bridge_cfg: raise RuntimeError( "Unable to configure lxd bridge without %s." + dconf_comm) @@ -177,3 +196,55 @@ def bridge_to_debconf(bridge_cfg): raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) return debconf + + +def bridge_to_cmd(bridge_cfg): + if bridge_cfg.get("mode") == "none": + return None, None + + bridge_name = bridge_cfg.get("name", "lxdbr0") + cmd_create = [] + cmd_attach = ["lxc", "network", "attach-profile", bridge_name, + "default", "eth0", "--force-local"] + + if bridge_cfg.get("mode") == "existing": + return None, cmd_attach + + if bridge_cfg.get("mode") != "new": + raise Exception("invalid bridge mode \"%s\"" % bridge_cfg.get("mode")) + + cmd_create = ["lxc", "network", "create", bridge_name] + + if bridge_cfg.get("ipv4_address") and bridge_cfg.get("ipv4_netmask"): + cmd_create.append("ipv4.address=%s/%s" % + (bridge_cfg.get("ipv4_address"), + bridge_cfg.get("ipv4_netmask"))) + + if bridge_cfg.get("ipv4_nat", "true") == "true": + cmd_create.append("ipv4.nat=true") + + if bridge_cfg.get("ipv4_dhcp_first") and \ + bridge_cfg.get("ipv4_dhcp_last"): + dhcp_range = "%s-%s" % (bridge_cfg.get("ipv4_dhcp_first"), + bridge_cfg.get("ipv4_dhcp_last")) + cmd_create.append("ipv4.dhcp.ranges=%s" % dhcp_range) + else: + cmd_create.append("ipv4.address=none") + + if bridge_cfg.get("ipv6_address") and bridge_cfg.get("ipv6_netmask"): + cmd_create.append("ipv6.address=%s/%s" % + (bridge_cfg.get("ipv6_address"), + bridge_cfg.get("ipv6_netmask"))) + + if bridge_cfg.get("ipv6_nat", "false") == "true": + cmd_create.append("ipv6.nat=true") + + else: + cmd_create.append("ipv6.address=none") + + if bridge_cfg.get("domain"): + cmd_create.append("dns.domain=%s" % bridge_cfg.get("domain")) + + cmd_create.append("--force-local") + + return cmd_create, cmd_attach diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py index 6f90defb..14366a10 100644 --- a/tests/unittests/test_handler/test_handler_lxd.py +++ b/tests/unittests/test_handler/test_handler_lxd.py @@ -132,3 +132,54 @@ class TestLxd(t_help.TestCase): cc_lxd.bridge_to_debconf(data), {"lxd/setup-bridge": "false", "lxd/bridge-name": ""}) + + def test_lxd_cmd_new_full(self): + data = {"mode": "new", + "name": "testbr0", + "ipv4_address": "10.0.8.1", + "ipv4_netmask": "24", + "ipv4_dhcp_first": "10.0.8.2", + "ipv4_dhcp_last": "10.0.8.254", + "ipv4_dhcp_leases": "250", + "ipv4_nat": "true", + "ipv6_address": "fd98:9e0:3744::1", + "ipv6_netmask": "64", + "ipv6_nat": "true", + "domain": "lxd"} + self.assertEqual( + cc_lxd.bridge_to_cmd(data), + (["lxc", "network", "create", "testbr0", + "ipv4.address=10.0.8.1/24", "ipv4.nat=true", + "ipv4.dhcp.ranges=10.0.8.2-10.0.8.254", + "ipv6.address=fd98:9e0:3744::1/64", + "ipv6.nat=true", "dns.domain=lxd", + "--force-local"], + ["lxc", "network", "attach-profile", + "testbr0", "default", "eth0", "--force-local"])) + + def test_lxd_cmd_new_partial(self): + data = {"mode": "new", + "ipv6_address": "fd98:9e0:3744::1", + "ipv6_netmask": "64", + "ipv6_nat": "true"} + self.assertEqual( + cc_lxd.bridge_to_cmd(data), + (["lxc", "network", "create", "lxdbr0", "ipv4.address=none", + "ipv6.address=fd98:9e0:3744::1/64", "ipv6.nat=true", + "--force-local"], + ["lxc", "network", "attach-profile", + "lxdbr0", "default", "eth0", "--force-local"])) + + def test_lxd_cmd_existing(self): + data = {"mode": "existing", + "name": "testbr0"} + self.assertEqual( + cc_lxd.bridge_to_cmd(data), + (None, ["lxc", "network", "attach-profile", + "testbr0", "default", "eth0", "--force-local"])) + + def test_lxd_cmd_none(self): + data = {"mode": "none"} + self.assertEqual( + cc_lxd.bridge_to_cmd(data), + (None, None)) -- cgit v1.2.3 From 760a4f1852ee7241c00804ebf210b6a4b1e9063d Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Tue, 4 Oct 2016 09:56:10 -0500 Subject: unittests: fix use of mock 2.0 'assert_called' when running make check Some of the new DigitalOcean unittests were written to use 'assert_called', which is only available in mock versions 2.0. Because of this, the failure would only occur in releases less than yakkety and not in 'tox'. Add a 'xenial' entry to tox.ini with versions from xenial. --- .../unittests/test_datasource/test_digitalocean.py | 6 ++--- tox.ini | 27 +++++++++++++++++++++- 2 files changed, 29 insertions(+), 4 deletions(-) (limited to 'tests') diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py index bdfe0ba2..7bde0820 100644 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ b/tests/unittests/test_datasource/test_digitalocean.py @@ -162,7 +162,7 @@ class TestDataSourceDigitalOcean(TestCase): m_read_sysinfo.return_value = (False, None) ds = self.get_ds(get_sysinfo=None) self.assertEqual(False, ds.get_data()) - m_read_sysinfo.assert_called() + self.assertTrue(m_read_sysinfo.called) @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') def test_metadata(self, mock_readmd): @@ -172,7 +172,7 @@ class TestDataSourceDigitalOcean(TestCase): ret = ds.get_data() self.assertTrue(ret) - mock_readmd.assert_called() + self.assertTrue(mock_readmd.called) self.assertEqual(DO_META.get('user_data'), ds.get_userdata_raw()) self.assertEqual(DO_META.get('vendor_data'), ds.get_vendordata_raw()) @@ -196,7 +196,7 @@ class TestDataSourceDigitalOcean(TestCase): ret = ds.get_data() self.assertTrue(ret) - mock_readmd.assert_called() + self.assertTrue(mock_readmd.called) # Multiple keys self.assertEqual(metadata['public_keys'], ds.get_public_ssh_keys()) diff --git a/tox.ini b/tox.ini index 9424ae51..729de2a6 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py27,py3,flake8 +envlist = py27, py3, flake8, xenial recreate = True [testenv] @@ -34,3 +34,28 @@ basepython = python3 deps = {[testenv]deps} sphinx commands = {envpython} -m sphinx {posargs:doc/rtd doc/rtd_html} + +[testenv:xenial] +basepython = python3 +deps = + # requirements + jinja2==2.8 + pyyaml==3.11 + PrettyTable==0.7.2 + oauthlib==1.0.3 + pyserial==3.0.1 + configobj==5.0.6 + requests==2.9.1 + # jsonpatch ubuntu is 1.10, not 1.19 (#839779) + jsonpatch==1.10 + six==1.10.0 + # test-requirements + httpretty==0.8.6 + mock==1.3.0 + nose==1.3.7 + unittest2==1.1.0 + contextlib2==0.5.1 + pep8==1.7.0 + pyflakes==1.1.0 + flake8==2.5.4 + hacking==0.10.2 -- cgit v1.2.3 From c4aeba3f54eca687dd11837d1ec59de9f82c1cf6 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 30 Sep 2016 11:13:51 -0400 Subject: tests: silence the Cheetah UserWarning about NameMapper C version. This silences a warning made by Cheetah in pip installed environments: UserWarning: You don't have the C version of NameMapper installed! I'm disabling Cheetah's useStackFrames option ... The reason for the monkey patching is that the warning goes to stderr during nose and breaks up its expected output. The side affect of it is that tests would run with Cheetah's 'useStackFrames' enabled which is "painfully slow with the Python version of NameMapper". --- tests/unittests/__init__.py | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'tests') diff --git a/tests/unittests/__init__.py b/tests/unittests/__init__.py index e69de29b..1b34b5af 100644 --- a/tests/unittests/__init__.py +++ b/tests/unittests/__init__.py @@ -0,0 +1,9 @@ +try: + # For test cases, avoid the following UserWarning to stderr: + # You don't have the C version of NameMapper installed ... + from Cheetah import NameMapper as _nm + _nm.C_VERSION = True +except ImportError: + pass + +# vi: ts=4 expandtab -- cgit v1.2.3 From f0747c4b4cf073273e11d383f0354257be7276ed Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Fri, 30 Sep 2016 22:30:48 -0700 Subject: Move user/group functions to new ug_util file The amount of code to do user and group normalization and extraction deserves its own file so move the code that does this to a new file and update references to the old location. This removes some of the funkyness done in config modules to avoid namespace and attribute clashes as well. --- cloudinit/config/cc_byobu.py | 11 +- cloudinit/config/cc_set_passwords.py | 10 +- cloudinit/config/cc_ssh.py | 10 +- cloudinit/config/cc_ssh_authkey_fingerprints.py | 8 +- cloudinit/config/cc_ssh_import_id.py | 8 +- cloudinit/distros/__init__.py | 269 ------------------ cloudinit/distros/ug_util.py | 299 +++++++++++++++++++++ .../test_distros/test_user_data_normalize.py | 5 +- 8 files changed, 315 insertions(+), 305 deletions(-) mode change 100644 => 100755 cloudinit/config/cc_byobu.py mode change 100644 => 100755 cloudinit/config/cc_set_passwords.py mode change 100644 => 100755 cloudinit/config/cc_ssh.py mode change 100644 => 100755 cloudinit/config/cc_ssh_authkey_fingerprints.py mode change 100644 => 100755 cloudinit/config/cc_ssh_import_id.py mode change 100644 => 100755 cloudinit/distros/__init__.py create mode 100755 cloudinit/distros/ug_util.py mode change 100644 => 100755 tests/unittests/test_distros/test_user_data_normalize.py (limited to 'tests') diff --git a/cloudinit/config/cc_byobu.py b/cloudinit/config/cc_byobu.py old mode 100644 new mode 100755 index 1f00dd90..4a616e26 --- a/cloudinit/config/cc_byobu.py +++ b/cloudinit/config/cc_byobu.py @@ -50,12 +50,7 @@ Valid configuration options for this module are: byobu_by_default: """ - -# Ensure this is aliased to a name not 'distros' -# since the module attribute 'distros' -# is a list of distros that are supported, not a sub-module -from cloudinit import distros as ds - +from cloudinit.distros import ug_util from cloudinit import util distros = ['ubuntu', 'debian'] @@ -94,8 +89,8 @@ def handle(name, cfg, cloud, log, args): shcmd = "" if mod_user: - (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) - (user, _user_config) = ds.extract_default(users) + (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) + (user, _user_config) = ug_util.extract_default(users) if not user: log.warn(("No default byobu user provided, " "can not launch %s for the default user"), bl_inst) diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py old mode 100644 new mode 100755 index 94716017..6fc00517 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -66,11 +66,7 @@ enabled, disabled, or left to system defaults using ``ssh_pwauth``. import sys -# Ensure this is aliased to a name not 'distros' -# since the module attribute 'distros' -# is a list of distros that are supported, not a sub-module -from cloudinit import distros as ds - +from cloudinit.distros import ug_util from cloudinit import ssh_util from cloudinit import util @@ -99,8 +95,8 @@ def handle(_name, cfg, cloud, log, args): expire = util.get_cfg_option_bool(chfg, 'expire', expire) if not plist and password: - (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) - (user, _user_config) = ds.extract_default(users) + (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) + (user, _user_config) = ug_util.extract_default(users) if user: plist = "%s:%s" % (user, password) else: diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py old mode 100644 new mode 100755 index 6138fb53..576fa58a --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -109,11 +109,7 @@ import glob import os import sys -# Ensure this is aliased to a name not 'distros' -# since the module attribute 'distros' -# is a list of distros that are supported, not a sub-module -from cloudinit import distros as ds - +from cloudinit.distros import ug_util from cloudinit import ssh_util from cloudinit import util @@ -197,8 +193,8 @@ def handle(_name, cfg, cloud, log, _args): "file %s", keytype, keyfile) try: - (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) - (user, _user_config) = ds.extract_default(users) + (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) + (user, _user_config) = ug_util.extract_default(users) disable_root = util.get_cfg_option_bool(cfg, "disable_root", True) disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts", DISABLE_ROOT_OPTS) diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py old mode 100644 new mode 100755 index 6f3d0ee2..7eeb0f84 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -42,11 +42,7 @@ import hashlib from prettytable import PrettyTable -# Ensure this is aliased to a name not 'distros' -# since the module attribute 'distros' -# is a list of distros that are supported, not a sub-module -from cloudinit import distros as ds - +from cloudinit.distros import ug_util from cloudinit import ssh_util from cloudinit import util @@ -119,7 +115,7 @@ def handle(name, cfg, cloud, log, _args): return hash_meth = util.get_cfg_option_str(cfg, "authkey_hash", "md5") - (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) + (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) for (user_name, _cfg) in users.items(): (key_fn, key_entries) = ssh_util.extract_authorized_keys(user_name) _pprint_key_entries(user_name, key_fn, diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py old mode 100644 new mode 100755 index 99359c87..1be96dc5 --- a/cloudinit/config/cc_ssh_import_id.py +++ b/cloudinit/config/cc_ssh_import_id.py @@ -42,11 +42,7 @@ either ``lp:`` for launchpad or ``gh:`` for github to the username. - lp:user """ -# Ensure this is aliased to a name not 'distros' -# since the module attribute 'distros' -# is a list of distros that are supported, not a sub-module -from cloudinit import distros as ds - +from cloudinit.distros import ug_util from cloudinit import util import pwd @@ -67,7 +63,7 @@ def handle(_name, cfg, cloud, log, args): return # import for cloudinit created users - (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro) + (users, _groups) = ug_util.normalize_users_groups(cfg, cloud.distro) elist = [] for (user, user_cfg) in users.items(): import_ids = [] diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py old mode 100644 new mode 100755 index b1192e84..78adf5f9 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -685,275 +685,6 @@ def _get_arch_package_mirror_info(package_mirrors, arch): return default -# Normalizes a input group configuration -# which can be a comma seperated list of -# group names, or a list of group names -# or a python dictionary of group names -# to a list of members of that group. -# -# The output is a dictionary of group -# names => members of that group which -# is the standard form used in the rest -# of cloud-init -def _normalize_groups(grp_cfg): - if isinstance(grp_cfg, six.string_types): - grp_cfg = grp_cfg.strip().split(",") - if isinstance(grp_cfg, list): - c_grp_cfg = {} - for i in grp_cfg: - if isinstance(i, dict): - for k, v in i.items(): - if k not in c_grp_cfg: - if isinstance(v, list): - c_grp_cfg[k] = list(v) - elif isinstance(v, six.string_types): - c_grp_cfg[k] = [v] - else: - raise TypeError("Bad group member type %s" % - type_utils.obj_name(v)) - else: - if isinstance(v, list): - c_grp_cfg[k].extend(v) - elif isinstance(v, six.string_types): - c_grp_cfg[k].append(v) - else: - raise TypeError("Bad group member type %s" % - type_utils.obj_name(v)) - elif isinstance(i, six.string_types): - if i not in c_grp_cfg: - c_grp_cfg[i] = [] - else: - raise TypeError("Unknown group name type %s" % - type_utils.obj_name(i)) - grp_cfg = c_grp_cfg - groups = {} - if isinstance(grp_cfg, dict): - for (grp_name, grp_members) in grp_cfg.items(): - groups[grp_name] = util.uniq_merge_sorted(grp_members) - else: - raise TypeError(("Group config must be list, dict " - " or string types only and not %s") % - type_utils.obj_name(grp_cfg)) - return groups - - -# Normalizes a input group configuration -# which can be a comma seperated list of -# user names, or a list of string user names -# or a list of dictionaries with components -# that define the user config + 'name' (if -# a 'name' field does not exist then the -# default user is assumed to 'own' that -# configuration. -# -# The output is a dictionary of user -# names => user config which is the standard -# form used in the rest of cloud-init. Note -# the default user will have a special config -# entry 'default' which will be marked as true -# all other users will be marked as false. -def _normalize_users(u_cfg, def_user_cfg=None): - if isinstance(u_cfg, dict): - ad_ucfg = [] - for (k, v) in u_cfg.items(): - if isinstance(v, (bool, int, float) + six.string_types): - if util.is_true(v): - ad_ucfg.append(str(k)) - elif isinstance(v, dict): - v['name'] = k - ad_ucfg.append(v) - else: - raise TypeError(("Unmappable user value type %s" - " for key %s") % (type_utils.obj_name(v), k)) - u_cfg = ad_ucfg - elif isinstance(u_cfg, six.string_types): - u_cfg = util.uniq_merge_sorted(u_cfg) - - users = {} - for user_config in u_cfg: - if isinstance(user_config, (list,) + six.string_types): - for u in util.uniq_merge(user_config): - if u and u not in users: - users[u] = {} - elif isinstance(user_config, dict): - if 'name' in user_config: - n = user_config.pop('name') - prev_config = users.get(n) or {} - users[n] = util.mergemanydict([prev_config, - user_config]) - else: - # Assume the default user then - prev_config = users.get('default') or {} - users['default'] = util.mergemanydict([prev_config, - user_config]) - else: - raise TypeError(("User config must be dictionary/list " - " or string types only and not %s") % - type_utils.obj_name(user_config)) - - # Ensure user options are in the right python friendly format - if users: - c_users = {} - for (uname, uconfig) in users.items(): - c_uconfig = {} - for (k, v) in uconfig.items(): - k = k.replace('-', '_').strip() - if k: - c_uconfig[k] = v - c_users[uname] = c_uconfig - users = c_users - - # Fixup the default user into the real - # default user name and replace it... - def_user = None - if users and 'default' in users: - def_config = users.pop('default') - if def_user_cfg: - # Pickup what the default 'real name' is - # and any groups that are provided by the - # default config - def_user_cfg = def_user_cfg.copy() - def_user = def_user_cfg.pop('name') - def_groups = def_user_cfg.pop('groups', []) - # Pickup any config + groups for that user name - # that we may have previously extracted - parsed_config = users.pop(def_user, {}) - parsed_groups = parsed_config.get('groups', []) - # Now merge our extracted groups with - # anything the default config provided - users_groups = util.uniq_merge_sorted(parsed_groups, def_groups) - parsed_config['groups'] = ",".join(users_groups) - # The real config for the default user is the - # combination of the default user config provided - # by the distro, the default user config provided - # by the above merging for the user 'default' and - # then the parsed config from the user's 'real name' - # which does not have to be 'default' (but could be) - users[def_user] = util.mergemanydict([def_user_cfg, - def_config, - parsed_config]) - - # Ensure that only the default user that we - # found (if any) is actually marked as being - # the default user - if users: - for (uname, uconfig) in users.items(): - if def_user and uname == def_user: - uconfig['default'] = True - else: - uconfig['default'] = False - - return users - - -# Normalizes a set of user/users and group -# dictionary configuration into a useable -# format that the rest of cloud-init can -# understand using the default user -# provided by the input distrobution (if any) -# to allow for mapping of the 'default' user. -# -# Output is a dictionary of group names -> [member] (list) -# and a dictionary of user names -> user configuration (dict) -# -# If 'user' exists it will override -# the 'users'[0] entry (if a list) otherwise it will -# just become an entry in the returned dictionary (no override) -def normalize_users_groups(cfg, distro): - if not cfg: - cfg = {} - - users = {} - groups = {} - if 'groups' in cfg: - groups = _normalize_groups(cfg['groups']) - - # Handle the previous style of doing this where the first user - # overrides the concept of the default user if provided in the user: XYZ - # format. - old_user = {} - if 'user' in cfg and cfg['user']: - old_user = cfg['user'] - # Translate it into the format that is more useful - # going forward - if isinstance(old_user, six.string_types): - old_user = { - 'name': old_user, - } - if not isinstance(old_user, dict): - LOG.warn(("Format for 'user' key must be a string or " - "dictionary and not %s"), type_utils.obj_name(old_user)) - old_user = {} - - # If no old user format, then assume the distro - # provides what the 'default' user maps to, but notice - # that if this is provided, we won't automatically inject - # a 'default' user into the users list, while if a old user - # format is provided we will. - distro_user_config = {} - try: - distro_user_config = distro.get_default_user() - except NotImplementedError: - LOG.warn(("Distro has not implemented default user " - "access. No distribution provided default user" - " will be normalized.")) - - # Merge the old user (which may just be an empty dict when not - # present with the distro provided default user configuration so - # that the old user style picks up all the distribution specific - # attributes (if any) - default_user_config = util.mergemanydict([old_user, distro_user_config]) - - base_users = cfg.get('users', []) - if not isinstance(base_users, (list, dict) + six.string_types): - LOG.warn(("Format for 'users' key must be a comma separated string" - " or a dictionary or a list and not %s"), - type_utils.obj_name(base_users)) - base_users = [] - - if old_user: - # Ensure that when user: is provided that this user - # always gets added (as the default user) - if isinstance(base_users, list): - # Just add it on at the end... - base_users.append({'name': 'default'}) - elif isinstance(base_users, dict): - base_users['default'] = dict(base_users).get('default', True) - elif isinstance(base_users, six.string_types): - # Just append it on to be re-parsed later - base_users += ",default" - - users = _normalize_users(base_users, default_user_config) - return (users, groups) - - -# Given a user dictionary config it will -# extract the default user name and user config -# from that list and return that tuple or -# return (None, None) if no default user is -# found in the given input -def extract_default(users, default_name=None, default_config=None): - if not users: - users = {} - - def safe_find(entry): - config = entry[1] - if not config or 'default' not in config: - return False - else: - return config['default'] - - tmp_users = users.items() - tmp_users = dict(filter(safe_find, tmp_users)) - if not tmp_users: - return (default_name, default_config) - else: - name = list(tmp_users)[0] - config = tmp_users[name] - config.pop('default', None) - return (name, config) - - def fetch(name): locs, looked_locs = importer.find_module(name, ['', __name__], ['Distro']) if not locs: diff --git a/cloudinit/distros/ug_util.py b/cloudinit/distros/ug_util.py new file mode 100755 index 00000000..99301530 --- /dev/null +++ b/cloudinit/distros/ug_util.py @@ -0,0 +1,299 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2012 Canonical Ltd. +# Copyright (C) 2012, 2013 Hewlett-Packard Development Company, L.P. +# Copyright (C) 2012 Yahoo! Inc. +# +# Author: Scott Moser +# Author: Juerg Haefliger +# Author: Joshua Harlow +# Author: Ben Howard +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import six + +from cloudinit import log as logging +from cloudinit import type_utils +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +# Normalizes a input group configuration +# which can be a comma seperated list of +# group names, or a list of group names +# or a python dictionary of group names +# to a list of members of that group. +# +# The output is a dictionary of group +# names => members of that group which +# is the standard form used in the rest +# of cloud-init +def _normalize_groups(grp_cfg): + if isinstance(grp_cfg, six.string_types): + grp_cfg = grp_cfg.strip().split(",") + if isinstance(grp_cfg, list): + c_grp_cfg = {} + for i in grp_cfg: + if isinstance(i, dict): + for k, v in i.items(): + if k not in c_grp_cfg: + if isinstance(v, list): + c_grp_cfg[k] = list(v) + elif isinstance(v, six.string_types): + c_grp_cfg[k] = [v] + else: + raise TypeError("Bad group member type %s" % + type_utils.obj_name(v)) + else: + if isinstance(v, list): + c_grp_cfg[k].extend(v) + elif isinstance(v, six.string_types): + c_grp_cfg[k].append(v) + else: + raise TypeError("Bad group member type %s" % + type_utils.obj_name(v)) + elif isinstance(i, six.string_types): + if i not in c_grp_cfg: + c_grp_cfg[i] = [] + else: + raise TypeError("Unknown group name type %s" % + type_utils.obj_name(i)) + grp_cfg = c_grp_cfg + groups = {} + if isinstance(grp_cfg, dict): + for (grp_name, grp_members) in grp_cfg.items(): + groups[grp_name] = util.uniq_merge_sorted(grp_members) + else: + raise TypeError(("Group config must be list, dict " + " or string types only and not %s") % + type_utils.obj_name(grp_cfg)) + return groups + + +# Normalizes a input group configuration +# which can be a comma seperated list of +# user names, or a list of string user names +# or a list of dictionaries with components +# that define the user config + 'name' (if +# a 'name' field does not exist then the +# default user is assumed to 'own' that +# configuration. +# +# The output is a dictionary of user +# names => user config which is the standard +# form used in the rest of cloud-init. Note +# the default user will have a special config +# entry 'default' which will be marked as true +# all other users will be marked as false. +def _normalize_users(u_cfg, def_user_cfg=None): + if isinstance(u_cfg, dict): + ad_ucfg = [] + for (k, v) in u_cfg.items(): + if isinstance(v, (bool, int, float) + six.string_types): + if util.is_true(v): + ad_ucfg.append(str(k)) + elif isinstance(v, dict): + v['name'] = k + ad_ucfg.append(v) + else: + raise TypeError(("Unmappable user value type %s" + " for key %s") % (type_utils.obj_name(v), k)) + u_cfg = ad_ucfg + elif isinstance(u_cfg, six.string_types): + u_cfg = util.uniq_merge_sorted(u_cfg) + + users = {} + for user_config in u_cfg: + if isinstance(user_config, (list,) + six.string_types): + for u in util.uniq_merge(user_config): + if u and u not in users: + users[u] = {} + elif isinstance(user_config, dict): + if 'name' in user_config: + n = user_config.pop('name') + prev_config = users.get(n) or {} + users[n] = util.mergemanydict([prev_config, + user_config]) + else: + # Assume the default user then + prev_config = users.get('default') or {} + users['default'] = util.mergemanydict([prev_config, + user_config]) + else: + raise TypeError(("User config must be dictionary/list " + " or string types only and not %s") % + type_utils.obj_name(user_config)) + + # Ensure user options are in the right python friendly format + if users: + c_users = {} + for (uname, uconfig) in users.items(): + c_uconfig = {} + for (k, v) in uconfig.items(): + k = k.replace('-', '_').strip() + if k: + c_uconfig[k] = v + c_users[uname] = c_uconfig + users = c_users + + # Fixup the default user into the real + # default user name and replace it... + def_user = None + if users and 'default' in users: + def_config = users.pop('default') + if def_user_cfg: + # Pickup what the default 'real name' is + # and any groups that are provided by the + # default config + def_user_cfg = def_user_cfg.copy() + def_user = def_user_cfg.pop('name') + def_groups = def_user_cfg.pop('groups', []) + # Pickup any config + groups for that user name + # that we may have previously extracted + parsed_config = users.pop(def_user, {}) + parsed_groups = parsed_config.get('groups', []) + # Now merge our extracted groups with + # anything the default config provided + users_groups = util.uniq_merge_sorted(parsed_groups, def_groups) + parsed_config['groups'] = ",".join(users_groups) + # The real config for the default user is the + # combination of the default user config provided + # by the distro, the default user config provided + # by the above merging for the user 'default' and + # then the parsed config from the user's 'real name' + # which does not have to be 'default' (but could be) + users[def_user] = util.mergemanydict([def_user_cfg, + def_config, + parsed_config]) + + # Ensure that only the default user that we + # found (if any) is actually marked as being + # the default user + if users: + for (uname, uconfig) in users.items(): + if def_user and uname == def_user: + uconfig['default'] = True + else: + uconfig['default'] = False + + return users + + +# Normalizes a set of user/users and group +# dictionary configuration into a useable +# format that the rest of cloud-init can +# understand using the default user +# provided by the input distrobution (if any) +# to allow for mapping of the 'default' user. +# +# Output is a dictionary of group names -> [member] (list) +# and a dictionary of user names -> user configuration (dict) +# +# If 'user' exists it will override +# the 'users'[0] entry (if a list) otherwise it will +# just become an entry in the returned dictionary (no override) +def normalize_users_groups(cfg, distro): + if not cfg: + cfg = {} + + users = {} + groups = {} + if 'groups' in cfg: + groups = _normalize_groups(cfg['groups']) + + # Handle the previous style of doing this where the first user + # overrides the concept of the default user if provided in the user: XYZ + # format. + old_user = {} + if 'user' in cfg and cfg['user']: + old_user = cfg['user'] + # Translate it into the format that is more useful + # going forward + if isinstance(old_user, six.string_types): + old_user = { + 'name': old_user, + } + if not isinstance(old_user, dict): + LOG.warn(("Format for 'user' key must be a string or " + "dictionary and not %s"), type_utils.obj_name(old_user)) + old_user = {} + + # If no old user format, then assume the distro + # provides what the 'default' user maps to, but notice + # that if this is provided, we won't automatically inject + # a 'default' user into the users list, while if a old user + # format is provided we will. + distro_user_config = {} + try: + distro_user_config = distro.get_default_user() + except NotImplementedError: + LOG.warn(("Distro has not implemented default user " + "access. No distribution provided default user" + " will be normalized.")) + + # Merge the old user (which may just be an empty dict when not + # present with the distro provided default user configuration so + # that the old user style picks up all the distribution specific + # attributes (if any) + default_user_config = util.mergemanydict([old_user, distro_user_config]) + + base_users = cfg.get('users', []) + if not isinstance(base_users, (list, dict) + six.string_types): + LOG.warn(("Format for 'users' key must be a comma separated string" + " or a dictionary or a list and not %s"), + type_utils.obj_name(base_users)) + base_users = [] + + if old_user: + # Ensure that when user: is provided that this user + # always gets added (as the default user) + if isinstance(base_users, list): + # Just add it on at the end... + base_users.append({'name': 'default'}) + elif isinstance(base_users, dict): + base_users['default'] = dict(base_users).get('default', True) + elif isinstance(base_users, six.string_types): + # Just append it on to be re-parsed later + base_users += ",default" + + users = _normalize_users(base_users, default_user_config) + return (users, groups) + + +# Given a user dictionary config it will +# extract the default user name and user config +# from that list and return that tuple or +# return (None, None) if no default user is +# found in the given input +def extract_default(users, default_name=None, default_config=None): + if not users: + users = {} + + def safe_find(entry): + config = entry[1] + if not config or 'default' not in config: + return False + else: + return config['default'] + + tmp_users = users.items() + tmp_users = dict(filter(safe_find, tmp_users)) + if not tmp_users: + return (default_name, default_config) + else: + name = list(tmp_users)[0] + config = tmp_users[name] + config.pop('default', None) + return (name, config) diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py old mode 100644 new mode 100755 index a887a930..b24888fc --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -1,4 +1,5 @@ from cloudinit import distros +from cloudinit.distros import ug_util from cloudinit import helpers from cloudinit import settings @@ -29,7 +30,7 @@ class TestUGNormalize(TestCase): return distro def _norm(self, cfg, distro): - return distros.normalize_users_groups(cfg, distro) + return ug_util.normalize_users_groups(cfg, distro) def test_group_dict(self): distro = self._make_distro('ubuntu') @@ -236,7 +237,7 @@ class TestUGNormalize(TestCase): } (users, _groups) = self._norm(ug_cfg, distro) self.assertIn('bob', users) - (name, config) = distros.extract_default(users) + (name, config) = ug_util.extract_default(users) self.assertEqual(name, 'bob') expected_config = {} def_config = None -- cgit v1.2.3 From e8730078df8c99696b1b684e09c803eef7c4926c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 30 Sep 2016 15:53:42 -0400 Subject: Fix python2.6 things found running in centos 6. This gets the tests running in centos 6. * ProcessExecutionError: remove setting of .message Nothing in cloud-init seems to use .message anywhere, so it does not seem necessary. The reason to change it is that on 2.6 it spits out: cloudinit/util.py:286: DeprecationWarning: BaseException.message * tox.ini: add a centos6 environment the tox versions listed here replicate a centos6 install with packages from EPEL. You will still need a python2.6 to run this env so we do not enable it by default. --- cloudinit/sources/DataSourceAltCloud.py | 6 ++---- cloudinit/sources/helpers/azure.py | 2 +- cloudinit/util.py | 7 ++----- tests/unittests/test_handler/test_handler_apt_conf_v1.py | 2 +- tests/unittests/test_util.py | 2 +- tox.ini | 16 ++++++++++++++++ 6 files changed, 23 insertions(+), 12 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 48136f7c..20345389 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -195,8 +195,7 @@ class DataSourceAltCloud(sources.DataSource): (cmd_out, _err) = util.subp(cmd) LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) except ProcessExecutionError as _err: - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), - _err.message) + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) return False except OSError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) @@ -211,8 +210,7 @@ class DataSourceAltCloud(sources.DataSource): (cmd_out, _err) = util.subp(cmd) LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) except ProcessExecutionError as _err: - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), - _err.message) + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) return False except OSError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 689ed4cc..1b3e9b70 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -232,7 +232,7 @@ class WALinuxAgentShim(object): def _get_value_from_leases_file(fallback_lease_file): leases = [] content = util.load_file(fallback_lease_file) - LOG.debug("content is {}".format(content)) + LOG.debug("content is %s", content) for line in content.splitlines(): if 'unknown-245' in line: # Example line from Ubuntu diff --git a/cloudinit/util.py b/cloudinit/util.py index eb3e5899..4cff83c5 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -199,7 +199,7 @@ def fully_decoded_payload(part): encoding = charset.input_codec else: encoding = 'utf-8' - return cte_payload.decode(encoding, errors='surrogateescape') + return cte_payload.decode(encoding, 'surrogateescape') return cte_payload @@ -282,9 +282,6 @@ class ProcessExecutionError(IOError): 'reason': self.reason, } IOError.__init__(self, message) - # For backward compatibility with Python 2. - if not hasattr(self, 'message'): - self.message = message class SeLinuxGuard(object): @@ -1821,7 +1818,7 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, def ldecode(data, m='utf-8'): if not isinstance(data, bytes): return data - return data.decode(m, errors=decode) + return data.decode(m, decode) out = ldecode(out) err = ldecode(err) diff --git a/tests/unittests/test_handler/test_handler_apt_conf_v1.py b/tests/unittests/test_handler/test_handler_apt_conf_v1.py index 45714efd..64acc3e0 100644 --- a/tests/unittests/test_handler/test_handler_apt_conf_v1.py +++ b/tests/unittests/test_handler/test_handler_apt_conf_v1.py @@ -118,7 +118,7 @@ class TestConversion(TestCase): def test_convert_with_apt_mirror(self): mirror = 'http://my.mirror/ubuntu' f = cc_apt_configure.convert_to_v3_apt_format({'apt_mirror': mirror}) - self.assertIn(mirror, {m['uri'] for m in f['apt']['primary']}) + self.assertIn(mirror, set(m['uri'] for m in f['apt']['primary'])) def test_no_old_content(self): mirror = 'http://my.mirror/ubuntu' diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index fc6b9d40..881509aa 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -553,7 +553,7 @@ class TestSubp(helpers.TestCase): def test_subp_decode_invalid_utf8_replaces(self): (out, _err) = util.subp(self.stdin2out, capture=True, data=self.utf8_invalid) - expected = self.utf8_invalid.decode('utf-8', errors='replace') + expected = self.utf8_invalid.decode('utf-8', 'replace') self.assertEqual(out, expected) def test_subp_decode_strict_raises(self): diff --git a/tox.ini b/tox.ini index 729de2a6..277858ed 100644 --- a/tox.ini +++ b/tox.ini @@ -59,3 +59,19 @@ deps = pyflakes==1.1.0 flake8==2.5.4 hacking==0.10.2 + +[testenv:centos6] +basepython = python2.6 +commands = nosetests {posargs:tests} +deps = + # requirements + argparse==1.2.1 + jinja2==2.2.1 + pyyaml==3.10 + PrettyTable==0.7.2 + oauthlib==0.6.0 + configobj==4.6.0 + requests==2.6.0 + jsonpatch==1.2 + six==1.9.0 + -r{toxinidir}/test-requirements.txt -- cgit v1.2.3 From 9972d246947f1a6ec102b978b99b26acc43133ec Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 29 Sep 2016 12:45:02 -0400 Subject: OpenNebula: replace 'ip' parsing with cloudinit.net usage. Replace the parsing of 'ip' to get a link and mac address list in OpenNebula's datasource with usage of cloudinit.net. This makes test cases there not depend on 'ip' availability and also uses common code. --- cloudinit/sources/DataSourceOpenNebula.py | 34 ++++++++++------------ tests/unittests/test_datasource/test_opennebula.py | 23 +++++++-------- 2 files changed, 25 insertions(+), 32 deletions(-) (limited to 'tests') diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 635a836c..ba5f3f92 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -30,6 +30,7 @@ import re import string from cloudinit import log as logging +from cloudinit import net from cloudinit import sources from cloudinit import util @@ -120,17 +121,11 @@ class BrokenContextDiskDir(Exception): class OpenNebulaNetwork(object): - REG_DEV_MAC = re.compile( - r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?', - re.MULTILINE | re.DOTALL) - - def __init__(self, ip, context): - self.ip = ip + def __init__(self, context, system_nics_by_mac=None): self.context = context - self.ifaces = self.get_ifaces() - - def get_ifaces(self): - return self.REG_DEV_MAC.findall(self.ip) + if system_nics_by_mac is None: + system_nics_by_mac = get_physical_nics_by_mac() + self.ifaces = system_nics_by_mac def mac2ip(self, mac): components = mac.split(':')[2:] @@ -188,9 +183,7 @@ class OpenNebulaNetwork(object): conf.append('iface lo inet loopback') conf.append('') - for i in self.ifaces: - dev = i[0] - mac = i[1] + for mac, dev in self.ifaces.items(): ip_components = self.mac2ip(mac) conf.append('auto ' + dev) @@ -405,16 +398,19 @@ def read_context_disk_dir(source_dir, asuser=None): # generate static /etc/network/interfaces # only if there are any required context variables # http://opennebula.org/documentation:rel3.8:cong#network_configuration - for k in context: - if re.match(r'^ETH\d+_IP$', k): - (out, _) = util.subp(['ip', 'link']) - net = OpenNebulaNetwork(out, context) - results['network-interfaces'] = net.gen_conf() - break + ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP$', k)] + if ipaddr_keys: + onet = OpenNebulaNetwork(context) + results['network-interfaces'] = onet.gen_conf() return results +def get_physical_nics_by_mac(): + devs = net.get_interfaces_by_mac() + return dict([(m, n) for m, n in devs.items() if net.is_physical(n)]) + + # Legacy: Must be present in case we load an old pkl object DataSourceOpenNebulaNet = DataSourceOpenNebula diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index d796f030..ce5b5550 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -1,7 +1,7 @@ from cloudinit import helpers from cloudinit.sources import DataSourceOpenNebula as ds from cloudinit import util -from ..helpers import TestCase, populate_dir +from ..helpers import mock, populate_dir, TestCase import os import pwd @@ -31,12 +31,7 @@ SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i' HOSTNAME = 'foo.example.com' PUBLIC_IP = '10.0.0.3' -CMD_IP_OUT = '''\ -1: lo: mtu 16436 qdisc noqueue state UNKNOWN - link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 -2: eth0: mtu 1500 qdisc mq state UP qlen 1000 - link/ether 02:00:0a:12:01:01 brd ff:ff:ff:ff:ff:ff -''' +DS_PATH = "cloudinit.sources.DataSourceOpenNebula" class TestOpenNebulaDataSource(TestCase): @@ -233,18 +228,19 @@ class TestOpenNebulaDataSource(TestCase): class TestOpenNebulaNetwork(unittest.TestCase): - def setUp(self): - super(TestOpenNebulaNetwork, self).setUp() + system_nics = {'02:00:0a:12:01:01': 'eth0'} def test_lo(self): - net = ds.OpenNebulaNetwork('', {}) + net = ds.OpenNebulaNetwork(context={}, system_nics_by_mac={}) self.assertEqual(net.gen_conf(), u'''\ auto lo iface lo inet loopback ''') - def test_eth0(self): - net = ds.OpenNebulaNetwork(CMD_IP_OUT, {}) + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_eth0(self, m_get_phys_by_mac): + m_get_phys_by_mac.return_value = self.system_nics + net = ds.OpenNebulaNetwork({}) self.assertEqual(net.gen_conf(), u'''\ auto lo iface lo inet loopback @@ -267,7 +263,8 @@ iface eth0 inet static 'ETH0_DNS': '1.2.3.6 1.2.3.7' } - net = ds.OpenNebulaNetwork(CMD_IP_OUT, context) + net = ds.OpenNebulaNetwork(context, + system_nics_by_mac=self.system_nics) self.assertEqual(net.gen_conf(), u'''\ auto lo iface lo inet loopback -- cgit v1.2.3 From d8534561ba76db25b6fc0044eb1bfda63686e859 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 1 Sep 2016 15:49:20 -0500 Subject: Add support for snap create-user on Ubuntu Core images. Ubuntu Core images use the `snap create-user` to add users to an Ubuntu Core system. Add support for creating snap users by adding a key to the users dictionary. users: - name: bob snapuser: bob@bobcom.io Or via the 'snappy' dictionary: snappy: email: bob@bobcom.io Users may also create a snap user without contacting the SSO by providing a 'system-user' assertion by importing them into snapd. Additionally, Ubuntu Core systems have a read-only /etc/passwd such that the normal useradd/groupadd commands do not function without an additional flag, '--extrausers', which redirects the pwd to /var/lib/extrausers. Move the system_is_snappy() check from cc_snappy module to util for re-use and then update the Distro class to append '--extrausers' if the system is Ubuntu Core. --- cloudinit/config/cc_snap_config.py | 184 +++++++++++++ cloudinit/config/cc_snappy.py | 18 +- cloudinit/distros/__init__.py | 35 +++ cloudinit/util.py | 12 + config/cloud.cfg | 1 + doc/examples/cloud-config-user-groups.txt | 8 + .../test_distros/test_user_data_normalize.py | 65 +++++ .../unittests/test_handler/test_handler_snappy.py | 293 ++++++++++++++++++++- 8 files changed, 601 insertions(+), 15 deletions(-) create mode 100644 cloudinit/config/cc_snap_config.py (limited to 'tests') diff --git a/cloudinit/config/cc_snap_config.py b/cloudinit/config/cc_snap_config.py new file mode 100644 index 00000000..275a2d09 --- /dev/null +++ b/cloudinit/config/cc_snap_config.py @@ -0,0 +1,184 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2016 Canonical Ltd. +# +# Author: Ryan Harper +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +""" +Snappy +------ +**Summary:** snap_config modules allows configuration of snapd. + +This module uses the same ``snappy`` namespace for configuration but +acts only only a subset of the configuration. + +If ``assertions`` is set and the user has included a list of assertions +then cloud-init will collect the assertions into a single assertion file +and invoke ``snap ack `` which will attempt +to load the provided assertions into the snapd assertion database. + +If ``email`` is set, this value is used to create an authorized user for +contacting and installing snaps from the Ubuntu Store. This is done by +calling ``snap create-user`` command. + +If ``known`` is set to True, then it is expected the user also included +an assertion of type ``system-user``. When ``snap create-user`` is called +cloud-init will append '--known' flag which instructs snapd to look for +a system-user assertion with the details. If ``known`` is not set, then +``snap create-user`` will contact the Ubuntu SSO for validating and importing +a system-user for the instance. + +.. note:: + If the system is already managed, then cloud-init will not attempt to + create a system-user. + +**Internal name:** ``cc_snap_config`` + +**Module frequency:** per instance + +**Supported distros:** any with 'snapd' available + +**Config keys**:: + + #cloud-config + snappy: + assertions: + - | + + - | + + email: user@user.org + known: true + +""" + +from cloudinit import log as logging +from cloudinit.settings import PER_INSTANCE +from cloudinit import util + +LOG = logging.getLogger(__name__) + +frequency = PER_INSTANCE +SNAPPY_CMD = "snap" +ASSERTIONS_FILE = "/var/lib/cloud/instance/snapd.assertions" + + +""" +snappy: + assertions: + - | + + - | + + email: foo@foo.io + known: true +""" + + +def add_assertions(assertions=None): + """Import list of assertions. + + Import assertions by concatenating each assertion into a + string separated by a '\n'. Write this string to a instance file and + then invoke `snap ack /path/to/file` and check for errors. + If snap exits 0, then all assertions are imported. + """ + if not assertions: + assertions = [] + + if not isinstance(assertions, list): + raise ValueError('assertion parameter was not a list: %s', assertions) + + snap_cmd = [SNAPPY_CMD, 'ack'] + combined = "\n".join(assertions) + if len(combined) == 0: + raise ValueError("Assertion list is empty") + + for asrt in assertions: + LOG.debug('Acking: %s', asrt.split('\n')[0:2]) + + util.write_file(ASSERTIONS_FILE, combined.encode('utf-8')) + util.subp(snap_cmd + [ASSERTIONS_FILE], capture=True) + + +def add_snap_user(cfg=None): + """Add a snap system-user if provided with email under snappy config. + + - Check that system is not already managed. + - Check that if using a system-user assertion, that it's + imported into snapd. + + Returns a dictionary to be passed to Distro.create_user + """ + + if not cfg: + cfg = {} + + if not isinstance(cfg, dict): + raise ValueError('configuration parameter was not a dict: %s', cfg) + + snapuser = cfg.get('email', None) + if not snapuser: + return + + usercfg = { + 'snapuser': snapuser, + 'known': cfg.get('known', False), + } + + # query if we're already registered + out, _ = util.subp([SNAPPY_CMD, 'managed'], capture=True) + if out.strip() == "true": + LOG.warning('This device is already managed. ' + 'Skipping system-user creation') + return + + if usercfg.get('known'): + # Check that we imported a system-user assertion + out, _ = util.subp([SNAPPY_CMD, 'known', 'system-user'], + capture=True) + if len(out) == 0: + LOG.error('Missing "system-user" assertion. ' + 'Check "snappy" user-data assertions.') + return + + return usercfg + + +def handle(name, cfg, cloud, log, args): + cfgin = cfg.get('snappy') + if not cfgin: + LOG.debug('No snappy config provided, skipping') + return + + if not(util.system_is_snappy()): + LOG.debug("%s: system not snappy", name) + return + + assertions = cfgin.get('assertions', []) + if len(assertions) > 0: + LOG.debug('Importing user-provided snap assertions') + add_assertions(assertions) + + # Create a snap user if requested. + # Snap systems contact the store with a user's email + # and extract information needed to create a local user. + # A user may provide a 'system-user' assertion which includes + # the required information. Using such an assertion to create + # a local user requires specifying 'known: true' in the supplied + # user-data. + usercfg = add_snap_user(cfg=cfgin) + if usercfg: + cloud.distro.create_user(usercfg.get('snapuser'), **usercfg) diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py index 36db9e67..e03ec483 100644 --- a/cloudinit/config/cc_snappy.py +++ b/cloudinit/config/cc_snappy.py @@ -257,24 +257,14 @@ def disable_enable_ssh(enabled): util.write_file(not_to_be_run, "cloud-init\n") -def system_is_snappy(): - # channel.ini is configparser loadable. - # snappy will move to using /etc/system-image/config.d/*.ini - # this is certainly not a perfect test, but good enough for now. - content = util.load_file("/etc/system-image/channel.ini", quiet=True) - if 'ubuntu-core' in content.lower(): - return True - if os.path.isdir("/etc/system-image/config.d/"): - return True - return False - - def set_snappy_command(): global SNAPPY_CMD if util.which("snappy-go"): SNAPPY_CMD = "snappy-go" - else: + elif util.which("snappy"): SNAPPY_CMD = "snappy" + else: + SNAPPY_CMD = "snap" LOG.debug("snappy command is '%s'", SNAPPY_CMD) @@ -289,7 +279,7 @@ def handle(name, cfg, cloud, log, args): LOG.debug("%s: System is not snappy. disabling", name) return - if sys_snappy.lower() == "auto" and not(system_is_snappy()): + if sys_snappy.lower() == "auto" and not(util.system_is_snappy()): LOG.debug("%s: 'auto' mode, and system not snappy", name) return diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 78adf5f9..4a726430 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -367,6 +367,9 @@ class Distro(object): adduser_cmd = ['useradd', name] log_adduser_cmd = ['useradd', name] + if util.system_is_snappy(): + adduser_cmd.append('--extrausers') + log_adduser_cmd.append('--extrausers') # Since we are creating users, we want to carefully validate the # inputs. If something goes wrong, we can end up with a system @@ -445,6 +448,32 @@ class Distro(object): util.logexc(LOG, "Failed to create user %s", name) raise e + def add_snap_user(self, name, **kwargs): + """ + Add a snappy user to the system using snappy tools + """ + + snapuser = kwargs.get('snapuser') + known = kwargs.get('known', False) + adduser_cmd = ["snap", "create-user", "--sudoer", "--json"] + if known: + adduser_cmd.append("--known") + adduser_cmd.append(snapuser) + + # Run the command + LOG.debug("Adding snap user %s", name) + try: + (out, err) = util.subp(adduser_cmd, logstring=adduser_cmd, + capture=True) + LOG.debug("snap create-user returned: %s:%s", out, err) + jobj = util.load_json(out) + username = jobj.get('username', None) + except Exception as e: + util.logexc(LOG, "Failed to create snap user %s", name) + raise e + + return username + def create_user(self, name, **kwargs): """ Creates users for the system using the GNU passwd tools. This @@ -452,6 +481,10 @@ class Distro(object): distros where useradd is not desirable or not available. """ + # Add a snap user, if requested + if 'snapuser' in kwargs: + return self.add_snap_user(name, **kwargs) + # Add the user self.add_user(name, **kwargs) @@ -602,6 +635,8 @@ class Distro(object): def create_group(self, name, members=None): group_add_cmd = ['groupadd', name] + if util.system_is_snappy(): + group_add_cmd.append('--extrausers') if not members: members = [] diff --git a/cloudinit/util.py b/cloudinit/util.py index 4cff83c5..4b3fd0cb 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2374,3 +2374,15 @@ def get_installed_packages(target=None): pkgs_inst.add(re.sub(":.*", "", pkg)) return pkgs_inst + + +def system_is_snappy(): + # channel.ini is configparser loadable. + # snappy will move to using /etc/system-image/config.d/*.ini + # this is certainly not a perfect test, but good enough for now. + content = load_file("/etc/system-image/channel.ini", quiet=True) + if 'ubuntu-core' in content.lower(): + return True + if os.path.isdir("/etc/system-image/config.d/"): + return True + return False diff --git a/config/cloud.cfg b/config/cloud.cfg index d608dc86..1b93e7f9 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -45,6 +45,7 @@ cloud_config_modules: # Emit the cloud config ready event # this can be used by upstart jobs for 'start on cloud-config'. - emit_upstart + - snap_config - ssh-import-id - locale - set-passwords diff --git a/doc/examples/cloud-config-user-groups.txt b/doc/examples/cloud-config-user-groups.txt index 0e8ed243..9c5202f5 100644 --- a/doc/examples/cloud-config-user-groups.txt +++ b/doc/examples/cloud-config-user-groups.txt @@ -30,6 +30,7 @@ users: gecos: Magic Cloud App Daemon User inactive: true system: true + - snapuser: joe@joeuser.io # Valid Values: # name: The user's login name @@ -80,6 +81,13 @@ users: # cloud-init does not parse/check the syntax of the sudo # directive. # system: Create the user as a system user. This means no home directory. +# snapuser: Create a Snappy (Ubuntu-Core) user via the snap create-user +# command available on Ubuntu systems. If the user has an account +# on the Ubuntu SSO, specifying the email will allow snap to +# request a username and any public ssh keys and will import +# these into the system with username specifed by SSO account. +# If 'username' is not set in SSO, then username will be the +# shortname before the email domain. # # Default user creation: diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index b24888fc..33bf922d 100755 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -4,6 +4,7 @@ from cloudinit import helpers from cloudinit import settings from ..helpers import TestCase +import mock bcfg = { @@ -296,3 +297,67 @@ class TestUGNormalize(TestCase): self.assertIn('bob', users) self.assertEqual({'default': False}, users['joe']) self.assertEqual({'default': False}, users['bob']) + + @mock.patch('cloudinit.util.subp') + def test_create_snap_user(self, mock_subp): + mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n', + '')] + distro = self._make_distro('ubuntu') + ug_cfg = { + 'users': [ + {'name': 'joe', 'snapuser': 'joe@joe.com'}, + ], + } + (users, _groups) = self._norm(ug_cfg, distro) + for (user, config) in users.items(): + print('user=%s config=%s' % (user, config)) + username = distro.create_user(user, **config) + + snapcmd = ['snap', 'create-user', '--sudoer', '--json', 'joe@joe.com'] + mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd) + self.assertEqual(username, 'joe') + + @mock.patch('cloudinit.util.subp') + def test_create_snap_user_known(self, mock_subp): + mock_subp.side_effect = [('{"username": "joe", "ssh-key-count": 1}\n', + '')] + distro = self._make_distro('ubuntu') + ug_cfg = { + 'users': [ + {'name': 'joe', 'snapuser': 'joe@joe.com', 'known': True}, + ], + } + (users, _groups) = self._norm(ug_cfg, distro) + for (user, config) in users.items(): + print('user=%s config=%s' % (user, config)) + username = distro.create_user(user, **config) + + snapcmd = ['snap', 'create-user', '--sudoer', '--json', '--known', + 'joe@joe.com'] + mock_subp.assert_called_with(snapcmd, capture=True, logstring=snapcmd) + self.assertEqual(username, 'joe') + + @mock.patch('cloudinit.util.system_is_snappy') + @mock.patch('cloudinit.util.is_group') + @mock.patch('cloudinit.util.subp') + def test_add_user_on_snappy_system(self, mock_subp, mock_isgrp, + mock_snappy): + mock_isgrp.return_value = False + mock_subp.return_value = True + mock_snappy.return_value = True + distro = self._make_distro('ubuntu') + ug_cfg = { + 'users': [ + {'name': 'joe', 'groups': 'users', 'create_groups': True}, + ], + } + (users, _groups) = self._norm(ug_cfg, distro) + for (user, config) in users.items(): + print('user=%s config=%s' % (user, config)) + distro.add_user(user, **config) + + groupcmd = ['groupadd', 'users', '--extrausers'] + addcmd = ['useradd', 'joe', '--extrausers', '--groups', 'users', '-m'] + + mock_subp.assert_any_call(groupcmd) + mock_subp.assert_any_call(addcmd, logstring=addcmd) diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py index 57dce1bc..e320dd82 100644 --- a/tests/unittests/test_handler/test_handler_snappy.py +++ b/tests/unittests/test_handler/test_handler_snappy.py @@ -1,14 +1,22 @@ from cloudinit.config.cc_snappy import ( makeop, get_package_ops, render_snap_op) -from cloudinit import util +from cloudinit.config.cc_snap_config import ( + add_assertions, add_snap_user, ASSERTIONS_FILE) +from cloudinit import (distros, helpers, cloud, util) +from cloudinit.config.cc_snap_config import handle as snap_handle +from cloudinit.sources import DataSourceNone +from ..helpers import FilesystemMockingTestCase, mock from .. import helpers as t_help +import logging import os import shutil import tempfile +import textwrap import yaml +LOG = logging.getLogger(__name__) ALLOWED = (dict, list, int, str) @@ -287,6 +295,289 @@ class TestInstallPackages(t_help.TestCase): self.assertEqual(yaml.safe_load(mydata), data_found) +class TestSnapConfig(FilesystemMockingTestCase): + + SYSTEM_USER_ASSERTION = textwrap.dedent(""" + type: system-user + authority-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp + brand-id: LqvZQdfyfGlYvtep4W6Oj6pFXP9t1Ksp + email: foo@bar.com + password: $6$E5YiAuMIPAwX58jG$miomhVNui/vf7f/3ctB/f0RWSKFxG0YXzrJ9rtJ1ikvzt + series: + - 16 + since: 2016-09-10T16:34:00+03:00 + until: 2017-11-10T16:34:00+03:00 + username: baz + sign-key-sha3-384: RuVvnp4n52GilycjfbbTCI3_L8Y6QlIE75wxMc0KzGV3AUQqVd9GuXoj + + AcLBXAQAAQoABgUCV/UU1wAKCRBKnlMoJQLkZVeLD/9/+hIeVywtzsDA3oxl+P+u9D13y9s6svP + Jd6Wnf4FTw6sq1GjBE4ZA7lrwSaRCUJ9Vcsvf2q9OGPY7mOb2TBxaDe0PbUMjrSrqllSSQwhpNI + zG+NxkkKuxsUmLzFa+k9m6cyojNbw5LFhQZBQCGlr3JYqC0tIREq/UsZxj+90TUC87lDJwkU8GF + s4CR+rejZj4itIcDcVxCSnJH6hv6j2JrJskJmvObqTnoOlcab+JXdamXqbldSP3UIhWoyVjqzkj + +to7mXgx+cCUA9+ngNCcfUG+1huGGTWXPCYkZ78HvErcRlIdeo4d3xwtz1cl/w3vYnq9og1XwsP + Yfetr3boig2qs1Y+j/LpsfYBYncgWjeDfAB9ZZaqQz/oc8n87tIPZDJHrusTlBfop8CqcM4xsKS + d+wnEY8e/F24mdSOYmS1vQCIDiRU3MKb6x138Ud6oHXFlRBbBJqMMctPqWDunWzb5QJ7YR0I39q + BrnEqv5NE0G7w6HOJ1LSPG5Hae3P4T2ea+ATgkb03RPr3KnXnzXg4TtBbW1nytdlgoNc/BafE1H + f3NThcq9gwX4xWZ2PAWnqVPYdDMyCtzW3Ck+o6sIzx+dh4gDLPHIi/6TPe/pUuMop9CBpWwez7V + v1z+1+URx6Xlq3Jq18y5pZ6fY3IDJ6km2nQPMzcm4Q==""") + + ACCOUNT_ASSERTION = textwrap.dedent(""" + type: account-key + authority-id: canonical + revision: 2 + public-key-sha3-384: BWDEoaqyr25nF5SNCvEv2v7QnM9QsfCc0PBMYD_i2NGSQ32EF2d4D0 + account-id: canonical + name: store + since: 2016-04-01T00:00:00.0Z + body-length: 717 + sign-key-sha3-384: -CvQKAwRQ5h3Ffn10FILJoEZUXOv6km9FwA80-Rcj-f-6jadQ89VRswH + + AcbBTQRWhcGAARAA0KKYYQWuHOrsFVi4p4l7ZzSvX7kLgJFFeFgOkzdWKBTHEnsMKjl5mefFe9j + qe8NlmJdfY7BenP7XeBtwKp700H/t9lLrZbpTNAPHXYxEWFJp5bPqIcJYBZ+29oLVLN1Tc5X482 + vCiDqL8+pPYqBrK2fNlyPlNNSum9wI70rDDL4r6FVvr+osTnGejibdV8JphWX+lrSQDnRSdM8KJ + UM43vTgLGTi9W54oRhsA2OFexRfRksTrnqGoonCjqX5wO3OFSaMDzMsO2MJ/hPfLgDqw53qjzuK + Iec9OL3k5basvu2cj5u9tKwVFDsCKK2GbKUsWWpx2KTpOifmhmiAbzkTHbH9KaoMS7p0kJwhTQG + o9aJ9VMTWHJc/NCBx7eu451u6d46sBPCXS/OMUh2766fQmoRtO1OwCTxsRKG2kkjbMn54UdFULl + VfzvyghMNRKIezsEkmM8wueTqGUGZWa6CEZqZKwhe/PROxOPYzqtDH18XZknbU1n5lNb7vNfem9 + 2ai+3+JyFnW9UhfvpVF7gzAgdyCqNli4C6BIN43uwoS8HkykocZS/+Gv52aUQ/NZ8BKOHLw+7an + Q0o8W9ltSLZbEMxFIPSN0stiZlkXAp6DLyvh1Y4wXSynDjUondTpej2fSvSlCz/W5v5V7qA4nIc + vUvV7RjVzv17ut0AEQEAAQ== + + AcLDXAQAAQoABgUCV83k9QAKCRDUpVvql9g3IBT8IACKZ7XpiBZ3W4lqbPssY6On81WmxQLtvsM + WTp6zZpl/wWOSt2vMNUk9pvcmrNq1jG9CuhDfWFLGXEjcrrmVkN3YuCOajMSPFCGrxsIBLSRt/b + nrKykdLAAzMfG8rP1d82bjFFiIieE+urQ0Kcv09Jtdvavq3JT1Tek5mFyyfhHNlQEKOzWqmRWiL + 3c3VOZUs1ZD8TSlnuq/x+5T0X0YtOyGjSlVxk7UybbyMNd6MZfNaMpIG4x+mxD3KHFtBAC7O6kL + eX3i6j5nCY5UABfA3DZEAkWP4zlmdBEOvZ9t293NaDdOpzsUHRkoi0Zez/9BHQ/kwx/uNc2WqrY + inCmu16JGNeXqsyinnLl7Ghn2RwhvDMlLxF6RTx8xdx1yk6p3PBTwhZMUvuZGjUtN/AG8BmVJQ1 + rsGSRkkSywvnhVJRB2sudnrMBmNS2goJbzSbmJnOlBrd2WsV0T9SgNMWZBiov3LvU4o2SmAb6b+ + rYwh8H5QHcuuYJuxDjFhPswIp6Wes5T6hUicf3SWtObcDS4HSkVS4ImBjjX9YgCuFy7QdnooOWE + aPvkRw3XCVeYq0K6w9GRsk1YFErD4XmXXZjDYY650MX9v42Sz5MmphHV8jdIY5ssbadwFSe2rCQ + 6UX08zy7RsIb19hTndE6ncvSNDChUR9eEnCm73eYaWTWTnq1cxdVP/s52r8uss++OYOkPWqh5nO + haRn7INjH/yZX4qXjNXlTjo0PnHH0q08vNKDwLhxS+D9du+70FeacXFyLIbcWllSbJ7DmbumGpF + yYbtj3FDDPzachFQdIG3lSt+cSUGeyfSs6wVtc3cIPka/2Urx7RprfmoWSI6+a5NcLdj0u2z8O9 + HxeIgxDpg/3gT8ZIuFKePMcLDM19Fh/p0ysCsX+84B9chNWtsMSmIaE57V+959MVtsLu7SLb9gi + skrju0pQCwsu2wHMLTNd1f3PTHmrr49hxetTus07HSQUApMtAGKzQilF5zqFjbyaTd4xgQbd+PK + CjFyzQTDOcUhXpuUGt/IzlqiFfsCsmbj2K4KdSNYMlqIgZ3Azu8KvZLIhsyN7v5vNIZSPfEbjde + ClU9r0VRiJmtYBUjcSghD9LWn+yRLwOxhfQVjm0cBwIt5R/yPF/qC76yIVuWUtM5Y2/zJR1J8OF + qWchvlImHtvDzS9FQeLyzJAOjvZ2CnWp2gILgUz0WQdOk1Dq8ax7KS9BQ42zxw9EZAEPw3PEFqR + IQsRTONp+iVS8YxSmoYZjDlCgRMWUmawez/Fv5b9Fb/XkO5Eq4e+KfrpUujXItaipb+tV8h5v3t + oG3Ie3WOHrVjCLXIdYslpL1O4nadqR6Xv58pHj6k""") + + test_assertions = [ACCOUNT_ASSERTION, SYSTEM_USER_ASSERTION] + + def setUp(self): + super(TestSnapConfig, self).setUp() + self.subp = util.subp + self.new_root = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, self.new_root) + + def _get_cloud(self, distro, metadata=None): + self.patchUtils(self.new_root) + paths = helpers.Paths({}) + cls = distros.fetch(distro) + mydist = cls(distro, {}, paths) + myds = DataSourceNone.DataSourceNone({}, mydist, paths) + if metadata: + myds.metadata.update(metadata) + return cloud.Cloud(myds, paths, {}, mydist, None) + + @mock.patch('cloudinit.util.write_file') + @mock.patch('cloudinit.util.subp') + def test_snap_config_add_assertions(self, msubp, mwrite): + add_assertions(self.test_assertions) + + combined = "\n".join(self.test_assertions) + mwrite.assert_any_call(ASSERTIONS_FILE, combined.encode('utf-8')) + msubp.assert_called_with(['snap', 'ack', ASSERTIONS_FILE], + capture=True) + + def test_snap_config_add_assertions_empty(self): + self.assertRaises(ValueError, add_assertions, []) + + def test_add_assertions_nonlist(self): + self.assertRaises(ValueError, add_assertions, {}) + + @mock.patch('cloudinit.util.write_file') + @mock.patch('cloudinit.util.subp') + def test_snap_config_add_assertions_ack_fails(self, msubp, mwrite): + msubp.side_effect = [util.ProcessExecutionError("Invalid assertion")] + self.assertRaises(util.ProcessExecutionError, add_assertions, + self.test_assertions) + + @mock.patch('cloudinit.config.cc_snap_config.add_assertions') + @mock.patch('cloudinit.config.cc_snap_config.util') + def test_snap_config_handle_no_config(self, mock_util, mock_add): + cfg = {} + cc = self._get_cloud('ubuntu') + cc.distro = mock.MagicMock() + cc.distro.name = 'ubuntu' + mock_util.which.return_value = None + snap_handle('snap_config', cfg, cc, LOG, None) + mock_add.assert_not_called() + + def test_snap_config_add_snap_user_no_config(self): + usercfg = add_snap_user(cfg=None) + self.assertEqual(usercfg, None) + + def test_snap_config_add_snap_user_not_dict(self): + cfg = ['foobar'] + self.assertRaises(ValueError, add_snap_user, cfg) + + def test_snap_config_add_snap_user_no_email(self): + cfg = {'assertions': [], 'known': True} + usercfg = add_snap_user(cfg=cfg) + self.assertEqual(usercfg, None) + + @mock.patch('cloudinit.config.cc_snap_config.util') + def test_snap_config_add_snap_user_email_only(self, mock_util): + email = 'janet@planetjanet.org' + cfg = {'email': email} + mock_util.which.return_value = None + mock_util.system_is_snappy.return_value = True + mock_util.subp.side_effect = [ + ("false\n", ""), # snap managed + ] + + usercfg = add_snap_user(cfg=cfg) + + self.assertEqual(usercfg, {'snapuser': email, 'known': False}) + + @mock.patch('cloudinit.config.cc_snap_config.util') + def test_snap_config_add_snap_user_email_known(self, mock_util): + email = 'janet@planetjanet.org' + known = True + cfg = {'email': email, 'known': known} + mock_util.which.return_value = None + mock_util.system_is_snappy.return_value = True + mock_util.subp.side_effect = [ + ("false\n", ""), # snap managed + (self.SYSTEM_USER_ASSERTION, ""), # snap known system-user + ] + + usercfg = add_snap_user(cfg=cfg) + + self.assertEqual(usercfg, {'snapuser': email, 'known': known}) + + @mock.patch('cloudinit.config.cc_snap_config.add_assertions') + @mock.patch('cloudinit.config.cc_snap_config.util') + def test_snap_config_handle_system_not_snappy(self, mock_util, mock_add): + cfg = {'snappy': {'assertions': self.test_assertions}} + cc = self._get_cloud('ubuntu') + cc.distro = mock.MagicMock() + cc.distro.name = 'ubuntu' + mock_util.which.return_value = None + mock_util.system_is_snappy.return_value = False + + snap_handle('snap_config', cfg, cc, LOG, None) + + mock_add.assert_not_called() + + @mock.patch('cloudinit.config.cc_snap_config.add_assertions') + @mock.patch('cloudinit.config.cc_snap_config.util') + def test_snap_config_handle_snapuser(self, mock_util, mock_add): + email = 'janet@planetjanet.org' + cfg = { + 'snappy': { + 'assertions': self.test_assertions, + 'email': email, + } + } + cc = self._get_cloud('ubuntu') + cc.distro = mock.MagicMock() + cc.distro.name = 'ubuntu' + mock_util.which.return_value = None + mock_util.system_is_snappy.return_value = True + mock_util.subp.side_effect = [ + ("false\n", ""), # snap managed + ] + + snap_handle('snap_config', cfg, cc, LOG, None) + + mock_add.assert_called_with(self.test_assertions) + usercfg = {'snapuser': email, 'known': False} + cc.distro.create_user.assert_called_with(email, **usercfg) + + @mock.patch('cloudinit.config.cc_snap_config.add_assertions') + @mock.patch('cloudinit.config.cc_snap_config.util') + def test_snap_config_handle_snapuser_known(self, mock_util, mock_add): + email = 'janet@planetjanet.org' + cfg = { + 'snappy': { + 'assertions': self.test_assertions, + 'email': email, + 'known': True, + } + } + cc = self._get_cloud('ubuntu') + cc.distro = mock.MagicMock() + cc.distro.name = 'ubuntu' + mock_util.which.return_value = None + mock_util.system_is_snappy.return_value = True + mock_util.subp.side_effect = [ + ("false\n", ""), # snap managed + (self.SYSTEM_USER_ASSERTION, ""), # snap known system-user + ] + + snap_handle('snap_config', cfg, cc, LOG, None) + + mock_add.assert_called_with(self.test_assertions) + usercfg = {'snapuser': email, 'known': True} + cc.distro.create_user.assert_called_with(email, **usercfg) + + @mock.patch('cloudinit.config.cc_snap_config.add_assertions') + @mock.patch('cloudinit.config.cc_snap_config.util') + def test_snap_config_handle_snapuser_known_managed(self, mock_util, + mock_add): + email = 'janet@planetjanet.org' + cfg = { + 'snappy': { + 'assertions': self.test_assertions, + 'email': email, + 'known': True, + } + } + cc = self._get_cloud('ubuntu') + cc.distro = mock.MagicMock() + cc.distro.name = 'ubuntu' + mock_util.which.return_value = None + mock_util.system_is_snappy.return_value = True + mock_util.subp.side_effect = [ + ("true\n", ""), # snap managed + ] + + snap_handle('snap_config', cfg, cc, LOG, None) + + mock_add.assert_called_with(self.test_assertions) + cc.distro.create_user.assert_not_called() + + @mock.patch('cloudinit.config.cc_snap_config.add_assertions') + @mock.patch('cloudinit.config.cc_snap_config.util') + def test_snap_config_handle_snapuser_known_no_assertion(self, mock_util, + mock_add): + email = 'janet@planetjanet.org' + cfg = { + 'snappy': { + 'assertions': [self.ACCOUNT_ASSERTION], + 'email': email, + 'known': True, + } + } + cc = self._get_cloud('ubuntu') + cc.distro = mock.MagicMock() + cc.distro.name = 'ubuntu' + mock_util.which.return_value = None + mock_util.system_is_snappy.return_value = True + mock_util.subp.side_effect = [ + ("true\n", ""), # snap managed + ("", ""), # snap known system-user + ] + + snap_handle('snap_config', cfg, cc, LOG, None) + + mock_add.assert_called_with([self.ACCOUNT_ASSERTION]) + cc.distro.create_user.assert_not_called() + + def makeop_tmpd(tmpd, op, name, config=None, path=None, cfgfile=None): if cfgfile: cfgfile = os.path.sep.join([tmpd, cfgfile]) -- cgit v1.2.3 From 1e55f4127f356b930e2c1ad36dcb6bed24f3beb2 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 24 Oct 2016 10:31:21 -0400 Subject: unittests: do not read system /etc/cloud/cloud.cfg.d Many of the unit tests in test_data would inadvertantly read the system's /etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d. This was first noticed on a system deployed by MAAS, where files in /etc/cloud/cloud.cfg.d/ are root read-only. This changes those tests to actually make use of FilesystemMockingTestCase functionality and adds 'reRoot()' to that class which is easier to use for at least this use case. LP: #1635350 --- tests/unittests/helpers.py | 8 ++++++++ tests/unittests/test_data.py | 45 +++++++++++++++----------------------------- 2 files changed, 23 insertions(+), 30 deletions(-) (limited to 'tests') diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 1cdc05a1..a2355a79 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -205,6 +205,14 @@ class FilesystemMockingTestCase(ResourceUsingTestCase): self.patched_funcs.enter_context( mock.patch.object(sys, 'stderr', stderr)) + def reRoot(self, root=None): + if root is None: + root = tempfile.mkdtemp() + self.addCleanup(shutil.rmtree, root) + self.patchUtils(root) + self.patchOS(root) + return root + def import_httpretty(): """Import HTTPretty and monkey patch Python 3.4 issue. diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 13db8a4c..55d9b93f 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -3,8 +3,6 @@ import gzip import logging import os -import shutil -import tempfile try: from unittest import mock @@ -98,10 +96,7 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): ci = stages.Init() ci.datasource = FakeDataSource(blob) - new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, new_root) - self.patchUtils(new_root) - self.patchOS(new_root) + self.reRoot() ci.fetch() ci.consume_data() cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) @@ -127,9 +122,7 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): { "op": "add", "path": "/foo", "value": "quxC" } ] ''' - new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, new_root) - self._patchIn(new_root) + self.reRoot() initer = stages.Init() initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob) initer.read_cfg() @@ -167,9 +160,7 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): { "op": "add", "path": "/foo", "value": "quxC" } ] ''' - new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, new_root) - self._patchIn(new_root) + self.reRoot() initer = stages.Init() initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob) initer.read_cfg() @@ -212,12 +203,9 @@ c: d message.attach(message_cc) message.attach(message_jp) + self.reRoot() ci = stages.Init() ci.datasource = FakeDataSource(str(message)) - new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, new_root) - self.patchUtils(new_root) - self.patchOS(new_root) ci.fetch() ci.consume_data() cc_contents = util.load_file(ci.paths.get_ipath("cloud_config")) @@ -245,9 +233,7 @@ name: user run: - z ''' - new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, new_root) - self._patchIn(new_root) + self.reRoot() initer = stages.Init() initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob) initer.read_cfg() @@ -281,9 +267,7 @@ vendor_data: enabled: True prefix: /bin/true ''' - new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, new_root) - self._patchIn(new_root) + new_root = self.reRoot() initer = stages.Init() initer.datasource = FakeDataSource(user_blob, vendordata=vendor_blob) initer.read_cfg() @@ -342,10 +326,7 @@ p: 1 paths = c_helpers.Paths({}, ds=FakeDataSource('')) cloud_cfg = handlers.cloud_config.CloudConfigPartHandler(paths) - new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, new_root) - self.patchUtils(new_root) - self.patchOS(new_root) + self.reRoot() cloud_cfg.handle_part(None, handlers.CONTENT_START, None, None, None, None) for i, m in enumerate(messages): @@ -365,6 +346,7 @@ p: 1 def test_unhandled_type_warning(self): """Raw text without magic is ignored but shows warning.""" + self.reRoot() ci = stages.Init() data = "arbitrary text\n" ci.datasource = FakeDataSource(data) @@ -402,10 +384,7 @@ c: 4 message.attach(gzip_part(base_content2)) ci = stages.Init() ci.datasource = FakeDataSource(str(message)) - new_root = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, new_root) - self.patchUtils(new_root) - self.patchOS(new_root) + self.reRoot() ci.fetch() ci.consume_data() contents = util.load_file(ci.paths.get_ipath("cloud_config")) @@ -418,6 +397,7 @@ c: 4 def test_mime_text_plain(self): """Mime message of type text/plain is ignored but shows warning.""" + self.reRoot() ci = stages.Init() message = MIMEBase("text", "plain") message.set_payload("Just text") @@ -435,6 +415,7 @@ c: 4 def test_shellscript(self): """Raw text starting #!/bin/sh is treated as script.""" + self.reRoot() ci = stages.Init() script = "#!/bin/sh\necho hello\n" ci.datasource = FakeDataSource(script) @@ -453,6 +434,7 @@ c: 4 def test_mime_text_x_shellscript(self): """Mime message of type text/x-shellscript is treated as script.""" + self.reRoot() ci = stages.Init() script = "#!/bin/sh\necho hello\n" message = MIMEBase("text", "x-shellscript") @@ -473,6 +455,7 @@ c: 4 def test_mime_text_plain_shell(self): """Mime type text/plain starting #!/bin/sh is treated as script.""" + self.reRoot() ci = stages.Init() script = "#!/bin/sh\necho hello\n" message = MIMEBase("text", "plain") @@ -493,6 +476,7 @@ c: 4 def test_mime_application_octet_stream(self): """Mime type application/octet-stream is ignored but shows warning.""" + self.reRoot() ci = stages.Init() message = MIMEBase("application", "octet-stream") message.set_payload(b'\xbf\xe6\xb2\xc3\xd3\xba\x13\xa4\xd8\xa1\xcc') @@ -516,6 +500,7 @@ c: 4 {'content': non_decodable}] message = b'#cloud-config-archive\n' + util.yaml_dumps(data).encode() + self.reRoot() ci = stages.Init() ci.datasource = FakeDataSource(message) -- cgit v1.2.3 From 4f8ceffb2e3a9feefcb718bda7a7f0f21ef7ab7c Mon Sep 17 00:00:00 2001 From: "kaihuan.pkh" Date: Thu, 13 Oct 2016 20:31:49 +0800 Subject: AliYun: Add new datasource for Ali-Cloud ECS Support AliYun(Ali-Cloud ECS). This datasource inherits from EC2, the main difference is the meta-server address is changed to 100.100.100.200. The datasource behaves similarly to EC2 and relies on network polling. As such, it is not enabled by default. --- cloudinit/sources/DataSourceAliYun.py | 49 ++++++++ cloudinit/sources/DataSourceEc2.py | 18 ++- tests/unittests/test_datasource/test_aliyun.py | 148 +++++++++++++++++++++++++ 3 files changed, 205 insertions(+), 10 deletions(-) create mode 100644 cloudinit/sources/DataSourceAliYun.py create mode 100644 tests/unittests/test_datasource/test_aliyun.py (limited to 'tests') diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py new file mode 100644 index 00000000..19957212 --- /dev/null +++ b/cloudinit/sources/DataSourceAliYun.py @@ -0,0 +1,49 @@ +# vi: ts=4 expandtab + +import os + +from cloudinit import sources +from cloudinit.sources import DataSourceEc2 as EC2 + +DEF_MD_VERSION = "2016-01-01" + + +class DataSourceAliYun(EC2.DataSourceEc2): + metadata_urls = ["http://100.100.100.200"] + + def __init__(self, sys_cfg, distro, paths): + super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths) + self.seed_dir = os.path.join(paths.seed_dir, "AliYun") + self.api_ver = DEF_MD_VERSION + + def get_hostname(self, fqdn=False, _resolve_ip=False): + return self.metadata.get('hostname', 'localhost.localdomain') + + def get_public_ssh_keys(self): + return parse_public_keys(self.metadata.get('public-keys', {})) + + +def parse_public_keys(public_keys): + keys = [] + for key_id, key_body in public_keys.items(): + if isinstance(key_body, str): + keys.append(key_body.strip()) + elif isinstance(key_body, list): + keys.extend(key_body) + elif isinstance(key_body, dict): + key = key_body.get('openssh-key', []) + if isinstance(key, str): + keys.append(key.strip()) + elif isinstance(key, list): + keys.extend(key) + return keys + +# Used to match classes to dependencies +datasources = [ + (DataSourceAliYun, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 6fe2a0bb..bc84ef5d 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -31,21 +31,19 @@ from cloudinit import util LOG = logging.getLogger(__name__) -DEF_MD_URL = "http://169.254.169.254" - # Which version we are requesting of the ec2 metadata apis DEF_MD_VERSION = '2009-04-04' -# Default metadata urls that will be used if none are provided -# They will be checked for 'resolveability' and some of the -# following may be discarded if they do not resolve -DEF_MD_URLS = [DEF_MD_URL, "http://instance-data.:8773"] - class DataSourceEc2(sources.DataSource): + # Default metadata urls that will be used if none are provided + # They will be checked for 'resolveability' and some of the + # following may be discarded if they do not resolve + metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"] + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) - self.metadata_address = DEF_MD_URL + self.metadata_address = None self.seed_dir = os.path.join(paths.seed_dir, "ec2") self.api_ver = DEF_MD_VERSION @@ -106,7 +104,7 @@ class DataSourceEc2(sources.DataSource): return False # Remove addresses from the list that wont resolve. - mdurls = mcfg.get("metadata_urls", DEF_MD_URLS) + mdurls = mcfg.get("metadata_urls", self.metadata_urls) filtered = [x for x in mdurls if util.is_resolvable_url(x)] if set(filtered) != set(mdurls): @@ -117,7 +115,7 @@ class DataSourceEc2(sources.DataSource): mdurls = filtered else: LOG.warn("Empty metadata url list! using default list") - mdurls = DEF_MD_URLS + mdurls = self.metadata_urls urls = [] url2base = {} diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py new file mode 100644 index 00000000..6f1de072 --- /dev/null +++ b/tests/unittests/test_datasource/test_aliyun.py @@ -0,0 +1,148 @@ +import functools +import httpretty +import os + +from .. import helpers as test_helpers +from cloudinit import helpers +from cloudinit.sources import DataSourceAliYun as ay + +DEFAULT_METADATA = { + 'instance-id': 'aliyun-test-vm-00', + 'eipv4': '10.0.0.1', + 'hostname': 'test-hostname', + 'image-id': 'm-test', + 'launch-index': '0', + 'mac': '00:16:3e:00:00:00', + 'network-type': 'vpc', + 'private-ipv4': '192.168.0.1', + 'serial-number': 'test-string', + 'vpc-cidr-block': '192.168.0.0/16', + 'vpc-id': 'test-vpc', + 'vswitch-id': 'test-vpc', + 'vswitch-cidr-block': '192.168.0.0/16', + 'zone-id': 'test-zone-1', + 'ntp-conf': {'ntp_servers': [ + 'ntp1.aliyun.com', + 'ntp2.aliyun.com', + 'ntp3.aliyun.com']}, + 'source-address': ['http://mirrors.aliyun.com', + 'http://mirrors.aliyuncs.com'], + 'public-keys': {'key-pair-1': {'openssh-key': 'ssh-rsa AAAAB3...'}, + 'key-pair-2': {'openssh-key': 'ssh-rsa AAAAB3...'}} +} + +DEFAULT_USERDATA = """\ +#cloud-config + +hostname: localhost""" + + +def register_mock_metaserver(base_url, data): + def register_helper(register, base_url, body): + if isinstance(body, str): + register(base_url, body) + elif isinstance(body, list): + register(base_url.rstrip('/'), '\n'.join(body) + '\n') + elif isinstance(body, dict): + vals = [] + for k, v in body.items(): + if isinstance(v, (str, list)): + suffix = k.rstrip('/') + else: + suffix = k.rstrip('/') + '/' + vals.append(suffix) + url = base_url.rstrip('/') + '/' + suffix + register_helper(register, url, v) + register(base_url, '\n'.join(vals) + '\n') + + register = functools.partial(httpretty.register_uri, httpretty.GET) + register_helper(register, base_url, data) + + +class TestAliYunDatasource(test_helpers.HttprettyTestCase): + def setUp(self): + super(TestAliYunDatasource, self).setUp() + cfg = {'datasource': {'AliYun': {'timeout': '1', 'max_wait': '1'}}} + distro = {} + paths = helpers.Paths({}) + self.ds = ay.DataSourceAliYun(cfg, distro, paths) + self.metadata_address = self.ds.metadata_urls[0] + self.api_ver = self.ds.api_ver + + @property + def default_metadata(self): + return DEFAULT_METADATA + + @property + def default_userdata(self): + return DEFAULT_USERDATA + + @property + def metadata_url(self): + return os.path.join(self.metadata_address, + self.api_ver, 'meta-data') + '/' + + @property + def userdata_url(self): + return os.path.join(self.metadata_address, + self.api_ver, 'user-data') + + def regist_default_server(self): + register_mock_metaserver(self.metadata_url, self.default_metadata) + register_mock_metaserver(self.userdata_url, self.default_userdata) + + def _test_get_data(self): + self.assertEqual(self.ds.metadata, self.default_metadata) + self.assertEqual(self.ds.userdata_raw, + self.default_userdata.encode('utf8')) + + def _test_get_sshkey(self): + pub_keys = [v['openssh-key'] for (_, v) in + self.default_metadata['public-keys'].items()] + self.assertEqual(self.ds.get_public_ssh_keys(), pub_keys) + + def _test_get_iid(self): + self.assertEqual(self.default_metadata['instance-id'], + self.ds.get_instance_id()) + + def _test_host_name(self): + self.assertEqual(self.default_metadata['hostname'], + self.ds.get_hostname()) + + @httpretty.activate + def test_with_mock_server(self): + self.regist_default_server() + self.ds.get_data() + self._test_get_data() + self._test_get_sshkey() + self._test_get_iid() + self._test_host_name() + + def test_parse_public_keys(self): + public_keys = {} + self.assertEqual(ay.parse_public_keys(public_keys), []) + + public_keys = {'key-pair-0': 'ssh-key-0'} + self.assertEqual(ay.parse_public_keys(public_keys), + [public_keys['key-pair-0']]) + + public_keys = {'key-pair-0': 'ssh-key-0', 'key-pair-1': 'ssh-key-1'} + self.assertEqual(set(ay.parse_public_keys(public_keys)), + set([public_keys['key-pair-0'], + public_keys['key-pair-1']])) + + public_keys = {'key-pair-0': ['ssh-key-0', 'ssh-key-1']} + self.assertEqual(ay.parse_public_keys(public_keys), + public_keys['key-pair-0']) + + public_keys = {'key-pair-0': {'openssh-key': []}} + self.assertEqual(ay.parse_public_keys(public_keys), []) + + public_keys = {'key-pair-0': {'openssh-key': 'ssh-key-0'}} + self.assertEqual(ay.parse_public_keys(public_keys), + [public_keys['key-pair-0']['openssh-key']]) + + public_keys = {'key-pair-0': {'openssh-key': ['ssh-key-0', + 'ssh-key-1']}} + self.assertEqual(ay.parse_public_keys(public_keys), + public_keys['key-pair-0']['openssh-key']) -- cgit v1.2.3 From 3416e2ee7f65defdb15aab861a85767d13e8c34c Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Sat, 29 Oct 2016 09:29:53 -0400 Subject: dmidecode: Allow dmidecode to be used on aarch64 aarch64 systems have functional dmidecode, so allow that to be used. - aarch64 has support for dmidecode as well --- cloudinit/util.py | 3 ++- tests/unittests/test_util.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/cloudinit/util.py b/cloudinit/util.py index 4b3fd0cb..9a3d3cd7 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2342,7 +2342,8 @@ def read_dmi_data(key): # running dmidecode can be problematic on some arches (LP: #1243287) uname_arch = os.uname()[4] if not (uname_arch == "x86_64" or - (uname_arch.startswith("i") and uname_arch[2:] == "86")): + (uname_arch.startswith("i") and uname_arch[2:] == "86") or + uname_arch == 'aarch64'): LOG.debug("dmidata is not supported on %s", uname_arch) return None diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 881509aa..f6a8ab75 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -386,7 +386,7 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase): dmi_name = 'use-dmidecode' self._configure_dmidecode_return(dmi_name, dmi_val) - expected = {'armel': None, 'aarch64': None, 'x86_64': dmi_val} + expected = {'armel': None, 'aarch64': dmi_val, 'x86_64': dmi_val} found = {} # we do not run the 'dmi-decode' binary on some arches # verify that anything requested that is not in the sysfs dir -- cgit v1.2.3