From 80f5ec4be0f781b26eca51d90d51abfab396b3f6 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 19 Sep 2016 11:48:54 -0400 Subject: Adjust mounts and disk configuration for systemd. The end result of all of these changes is to get mounts managed by cloud-init to occur only after cloud-init.service is done. We need to do that so that filesystems that are set up by cloud-init (in disk_setup) do not get mounted by stale entries in /etc/fstab before the setup occurs. This can occur in 2 ways: a.) new instance with old /etc/fstab b.) same instance where disk needs adjusting (Azure resize will re-format the ephemeral disk). The list of changes here is: - move mounts and disk_setup module to cloud-init.service rather than config. cloud-init.service runs earlier in boot so it can get those mount points done earlier. - on systemd add 'x-systemd.requires=cloud-init.service' to fstab options - cloud-init-local.service: add Before=basic.target - cloud-init.service: - extend After, Before, and Wants to multiple lines rather than one long line. - sort consistently with cloud-init-local.service - add DefaultDependencies=no - add Before=default.target - add Conflicts=shutdown.target LP: #1611074 --- cloudinit/sources/DataSourceAzure.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index dbc2bb68..b802b03e 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -252,7 +252,7 @@ class DataSourceAzureNet(sources.DataSource): cc_modules_override = support_new_ephemeral(self.sys_cfg) if cc_modules_override: - self.cfg['cloud_config_modules'] = cc_modules_override + self.cfg['cloud_init_modules'] = cc_modules_override return True @@ -283,11 +283,14 @@ def find_fabric_formatted_ephemeral_part(): device_location = potential_location break if device_location is None: + LOG.debug("no azure resource disk partition path found") return None ntfs_devices = util.find_devs_with("TYPE=ntfs") real_device = os.path.realpath(device_location) if real_device in ntfs_devices: return device_location + LOG.debug("'%s' existed (%s) but was not ntfs formated", + device_location, real_device) return None @@ -342,7 +345,7 @@ def support_new_ephemeral(cfg): LOG.debug("cloud-init will format ephemeral0.1 this boot.") LOG.debug("setting disk_setup and mounts modules 'always' for this boot") - cc_modules = cfg.get('cloud_config_modules') + cc_modules = cfg.get('cloud_init_modules') if not cc_modules: return None -- cgit v1.2.3 From 9f83bb8e80806d3dd79ba426474dc3c696e19a41 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Fri, 19 Aug 2016 16:28:26 -0600 Subject: DigitalOcean: use meta-data for network configruation On DigitalOcean, Network information is provided via Meta-data. It changes the datasource to be a local datasource, meaning it will run before fallback networking is configured. The advantage of that is that before networking is configured it can bring up a network device with ipv4 link-local and hit the metadata service that lives at 169.254.169.254 to find its networking configuration. It then takes down the link local address and lets cloud-init configure networking. The configuring of a network device to go looking for a metadata service is gated by a check of data in the smbios. This guarantees that the code will not run on another system. --- cloudinit/sources/DataSourceDigitalOcean.py | 101 +++--- cloudinit/sources/helpers/digitalocean.py | 218 +++++++++++++ .../unittests/test_datasource/test_digitalocean.py | 338 +++++++++++++++++---- 3 files changed, 543 insertions(+), 114 deletions(-) create mode 100644 cloudinit/sources/helpers/digitalocean.py (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index fc596e17..c5770d5d 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -18,13 +18,12 @@ # DigitalOcean Droplet API: # https://developers.digitalocean.com/documentation/metadata/ -import json - from cloudinit import log as logging from cloudinit import sources -from cloudinit import url_helper from cloudinit import util +import cloudinit.sources.helpers.digitalocean as do_helper + LOG = logging.getLogger(__name__) BUILTIN_DS_CONFIG = { @@ -36,11 +35,13 @@ BUILTIN_DS_CONFIG = { MD_RETRIES = 30 MD_TIMEOUT = 2 MD_WAIT_RETRY = 2 +MD_USE_IPV4LL = True class DataSourceDigitalOcean(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.distro = distro self.metadata = dict() self.ds_cfg = util.mergemanydict([ util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}), @@ -48,80 +49,72 @@ class DataSourceDigitalOcean(sources.DataSource): self.metadata_address = self.ds_cfg['metadata_url'] self.retries = self.ds_cfg.get('retries', MD_RETRIES) self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) + self.use_ip4LL = self.ds_cfg.get('use_ip4LL', MD_USE_IPV4LL) self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) + self._network_config = None def _get_sysinfo(self): - # DigitalOcean embeds vendor ID and instance/droplet_id in the - # SMBIOS information - - LOG.debug("checking if instance is a DigitalOcean droplet") - - # Detect if we are on DigitalOcean and return the Droplet's ID - vendor_name = util.read_dmi_data("system-manufacturer") - if vendor_name != "DigitalOcean": - return (False, None) + return do_helper.read_sysinfo() - LOG.info("running on DigitalOcean") - - droplet_id = util.read_dmi_data("system-serial-number") - if droplet_id: - LOG.debug(("system identified via SMBIOS as DigitalOcean Droplet" - "{}").format(droplet_id)) - else: - LOG.critical(("system identified via SMBIOS as a DigitalOcean " - "Droplet, but did not provide an ID. Please file a " - "support ticket at: " - "https://cloud.digitalocean.com/support/tickets/" - "new")) - - return (True, droplet_id) - - def get_data(self, apply_filter=False): + def get_data(self): (is_do, droplet_id) = self._get_sysinfo() # only proceed if we know we are on DigitalOcean if not is_do: return False - LOG.debug("reading metadata from {}".format(self.metadata_address)) - response = url_helper.readurl(self.metadata_address, - timeout=self.timeout, - sec_between=self.wait_retry, - retries=self.retries) + LOG.info("Running on digital ocean. droplet_id=%s" % droplet_id) - contents = util.decode_binary(response.contents) - decoded = json.loads(contents) + ipv4LL_nic = None + if self.use_ip4LL: + ipv4LL_nic = do_helper.assign_ipv4_link_local() - self.metadata = decoded - self.metadata['instance-id'] = decoded.get('droplet_id', droplet_id) - self.metadata['local-hostname'] = decoded.get('hostname', droplet_id) - self.vendordata_raw = decoded.get("vendor_data", None) - self.userdata_raw = decoded.get("user_data", None) - return True + md = do_helper.read_metadata( + self.metadata_address, timeout=self.timeout, + sec_between=self.wait_retry, retries=self.retries) - def get_public_ssh_keys(self): - public_keys = self.metadata.get('public_keys', []) - if isinstance(public_keys, list): - return public_keys - else: - return [public_keys] + self.metadata_full = md + self.metadata['instance-id'] = md.get('droplet_id', droplet_id) + self.metadata['local-hostname'] = md.get('hostname', droplet_id) + self.metadata['interfaces'] = md.get('interfaces') + self.metadata['public-keys'] = md.get('public_keys') + self.metadata['availability_zone'] = md.get('region', 'default') + self.vendordata_raw = md.get("vendor_data", None) + self.userdata_raw = md.get("user_data", None) - @property - def availability_zone(self): - return self.metadata.get('region', 'default') + if ipv4LL_nic: + do_helper.del_ipv4_link_local(ipv4LL_nic) - @property - def launch_index(self): - return None + return True def check_instance_id(self, sys_cfg): return sources.instance_id_matches_system_uuid( self.get_instance_id(), 'system-serial-number') + @property + def network_config(self): + """Configure the networking. This needs to be done each boot, since + the IP information may have changed due to snapshot and/or + migration. + """ + + if self._network_config: + return self._network_config + + interfaces = self.metadata.get('interfaces') + LOG.debug(interfaces) + if not interfaces: + raise Exception("Unable to get meta-data from server....") + + nameservers = self.metadata_full['dns']['nameservers'] + self._network_config = do_helper.convert_network_configuration( + interfaces, nameservers) + return self._network_config + # Used to match classes to dependencies datasources = [ - (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, )), ] diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py new file mode 100644 index 00000000..b0a721c2 --- /dev/null +++ b/cloudinit/sources/helpers/digitalocean.py @@ -0,0 +1,218 @@ +# vi: ts=4 expandtab +# +# Author: Ben Howard + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import json +import random + +from cloudinit import log as logging +from cloudinit import net as cloudnet +from cloudinit import url_helper +from cloudinit import util + +NIC_MAP = {'public': 'eth0', 'private': 'eth1'} + +LOG = logging.getLogger(__name__) + + +def assign_ipv4_link_local(nic=None): + """Bring up NIC using an address using link-local (ip4LL) IPs. On + DigitalOcean, the link-local domain is per-droplet routed, so there + is no risk of collisions. However, to be more safe, the ip4LL + address is random. + """ + + if not nic: + for cdev in sorted(cloudnet.get_devicelist()): + if cloudnet.is_physical(cdev): + nic = cdev + LOG.debug("assigned nic '%s' for link-local discovery", nic) + break + + if not nic: + raise RuntimeError("unable to find interfaces to access the" + "meta-data server. This droplet is broken.") + + addr = "169.254.{0}.{1}/16".format(random.randint(1, 168), + random.randint(0, 255)) + + ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic] + ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up'] + + if not util.which('ip'): + raise RuntimeError("No 'ip' command available to configure ip4LL " + "address") + + try: + (result, _err) = util.subp(ip_addr_cmd) + LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic) + + (result, _err) = util.subp(ip_link_cmd) + LOG.debug("brought device '%s' up", nic) + except Exception: + util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed." + " Droplet networking will be broken", addr, nic) + raise + + return nic + + +def del_ipv4_link_local(nic=None): + """Remove the ip4LL address. While this is not necessary, the ip4LL + address is extraneous and confusing to users. + """ + if not nic: + LOG.debug("no link_local address interface defined, skipping link " + "local address cleanup") + return + + LOG.debug("cleaning up ipv4LL address") + + ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic] + + try: + (result, _err) = util.subp(ip_addr_cmd) + LOG.debug("removed ip4LL addresses from %s", nic) + + except Exception as e: + util.logexc(LOG, "failed to remove ip4LL address from '%s'.", nic, e) + + +def convert_network_configuration(config, dns_servers): + """Convert the DigitalOcean Network description into Cloud-init's netconfig + format. + + Example JSON: + {'public': [ + {'mac': '04:01:58:27:7f:01', + 'ipv4': {'gateway': '45.55.32.1', + 'netmask': '255.255.224.0', + 'ip_address': '45.55.50.93'}, + 'anchor_ipv4': { + 'gateway': '10.17.0.1', + 'netmask': '255.255.0.0', + 'ip_address': '10.17.0.9'}, + 'type': 'public', + 'ipv6': {'gateway': '....', + 'ip_address': '....', + 'cidr': 64}} + ], + 'private': [ + {'mac': '04:01:58:27:7f:02', + 'ipv4': {'gateway': '10.132.0.1', + 'netmask': '255.255.0.0', + 'ip_address': '10.132.75.35'}, + 'type': 'private'} + ] + } + """ + + def _get_subnet_part(pcfg, nameservers=None): + subpart = {'type': 'static', + 'control': 'auto', + 'address': pcfg.get('ip_address'), + 'gateway': pcfg.get('gateway')} + + if nameservers: + subpart['dns_nameservers'] = nameservers + + if ":" in pcfg.get('ip_address'): + subpart['address'] = "{0}/{1}".format(pcfg.get('ip_address'), + pcfg.get('cidr')) + else: + subpart['netmask'] = pcfg.get('netmask') + + return subpart + + all_nics = [] + for k in ('public', 'private'): + if k in config: + all_nics.extend(config[k]) + + macs_to_nics = cloudnet.get_interfaces_by_mac() + nic_configs = [] + + for nic in all_nics: + + mac_address = nic.get('mac') + sysfs_name = macs_to_nics.get(mac_address) + nic_type = nic.get('type', 'unknown') + # Note: the entry 'public' above contains a list, but + # the list will only ever have one nic inside it per digital ocean. + # If it ever had more than one nic, then this code would + # assign all 'public' the same name. + if_name = NIC_MAP.get(nic_type, sysfs_name) + + LOG.debug("mapped %s interface to %s, assigning name of %s", + mac_address, sysfs_name, if_name) + + ncfg = {'type': 'physical', + 'mac_address': mac_address, + 'name': if_name} + + subnets = [] + for netdef in ('ipv4', 'ipv6', 'anchor_ipv4', 'anchor_ipv6'): + raw_subnet = nic.get(netdef, None) + if not raw_subnet: + continue + + sub_part = _get_subnet_part(raw_subnet) + if nic_type == 'public' and 'anchor' not in netdef: + # add DNS resolvers to the public interfaces only + sub_part = _get_subnet_part(raw_subnet, dns_servers) + else: + # remove the gateway any non-public interfaces + if 'gateway' in sub_part: + del sub_part['gateway'] + + subnets.append(sub_part) + + ncfg['subnets'] = subnets + nic_configs.append(ncfg) + LOG.debug("nic '%s' configuration: %s", if_name, ncfg) + + return {'version': 1, 'config': nic_configs} + + +def read_metadata(url, timeout=2, sec_between=2, retries=30): + response = url_helper.readurl(url, timeout=timeout, + sec_between=sec_between, retries=retries) + if not response.ok(): + raise RuntimeError("unable to read metadata at %s" % url) + return json.loads(response.contents.decode()) + + +def read_sysinfo(): + # DigitalOcean embeds vendor ID and instance/droplet_id in the + # SMBIOS information + + # Detect if we are on DigitalOcean and return the Droplet's ID + vendor_name = util.read_dmi_data("system-manufacturer") + if vendor_name != "DigitalOcean": + return (False, None) + + droplet_id = util.read_dmi_data("system-serial-number") + if droplet_id: + LOG.debug("system identified via SMBIOS as DigitalOcean Droplet: %s", + droplet_id) + else: + msg = ("system identified via SMBIOS as a DigitalOcean " + "Droplet, but did not provide an ID. Please file a " + "support ticket at: " + "https://cloud.digitalocean.com/support/tickets/new") + LOG.critical(msg) + raise RuntimeError(msg) + + return (True, droplet_id) diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py index f5d2ef35..bdfe0ba2 100644 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ b/tests/unittests/test_datasource/test_digitalocean.py @@ -20,25 +20,123 @@ import json from cloudinit import helpers from cloudinit import settings from cloudinit.sources import DataSourceDigitalOcean +from cloudinit.sources.helpers import digitalocean -from .. import helpers as test_helpers -from ..helpers import HttprettyTestCase - -httpretty = test_helpers.import_httpretty() +from ..helpers import mock, TestCase DO_MULTIPLE_KEYS = ["ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@do.co", "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@do.co"] DO_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@do.co" -DO_META = { - 'user_data': 'user_data_here', - 'vendor_data': 'vendor_data_here', - 'public_keys': DO_SINGLE_KEY, - 'region': 'nyc3', - 'id': '2000000', - 'hostname': 'cloudinit-test', +# the following JSON was taken from droplet (that's why its a string) +DO_META = json.loads(""" +{ + "droplet_id": "22532410", + "hostname": "utl-96268", + "vendor_data": "vendordata goes here", + "user_data": "userdata goes here", + "public_keys": "", + "auth_key": "authorization_key", + "region": "nyc3", + "interfaces": { + "private": [ + { + "ipv4": { + "ip_address": "10.132.6.205", + "netmask": "255.255.0.0", + "gateway": "10.132.0.1" + }, + "mac": "04:01:57:d1:9e:02", + "type": "private" + } + ], + "public": [ + { + "ipv4": { + "ip_address": "192.0.0.20", + "netmask": "255.255.255.0", + "gateway": "104.236.0.1" + }, + "ipv6": { + "ip_address": "2604:A880:0800:0000:1000:0000:0000:0000", + "cidr": 64, + "gateway": "2604:A880:0800:0000:0000:0000:0000:0001" + }, + "anchor_ipv4": { + "ip_address": "10.0.0.5", + "netmask": "255.255.0.0", + "gateway": "10.0.0.1" + }, + "mac": "04:01:57:d1:9e:01", + "type": "public" + } + ] + }, + "floating_ip": { + "ipv4": { + "active": false + } + }, + "dns": { + "nameservers": [ + "2001:4860:4860::8844", + "2001:4860:4860::8888", + "8.8.8.8" + ] + } +} +""") + +# This has no private interface +DO_META_2 = { + "droplet_id": 27223699, + "hostname": "smtest1", + "vendor_data": "\n".join([ + ('"Content-Type: multipart/mixed; ' + 'boundary=\"===============8645434374073493512==\"'), + 'MIME-Version: 1.0', + '', + '--===============8645434374073493512==', + 'MIME-Version: 1.0' + 'Content-Type: text/cloud-config; charset="us-ascii"' + 'Content-Transfer-Encoding: 7bit' + 'Content-Disposition: attachment; filename="cloud-config"' + '', + '#cloud-config', + 'disable_root: false', + 'manage_etc_hosts: true', + '', + '', + '--===============8645434374073493512==' + ]), + "public_keys": [ + "ssh-rsa AAAAB3NzaN...N3NtHw== smoser@brickies" + ], + "auth_key": "88888888888888888888888888888888", + "region": "nyc3", + "interfaces": { + "public": [{ + "ipv4": { + "ip_address": "45.55.249.133", + "netmask": "255.255.192.0", + "gateway": "45.55.192.1" + }, + "anchor_ipv4": { + "ip_address": "10.17.0.5", + "netmask": "255.255.0.0", + "gateway": "10.17.0.1" + }, + "mac": "ae:cc:08:7c:88:00", + "type": "public" + }] + }, + "floating_ip": {"ipv4": {"active": True, "ip_address": "138.197.59.92"}}, + "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, + "tags": None, } +DO_META['public_keys'] = DO_SINGLE_KEY + MD_URL = 'http://169.254.169.254/metadata/v1.json' @@ -46,69 +144,189 @@ def _mock_dmi(): return (True, DO_META.get('id')) -def _request_callback(method, uri, headers): - return (200, headers, json.dumps(DO_META)) - - -class TestDataSourceDigitalOcean(HttprettyTestCase): +class TestDataSourceDigitalOcean(TestCase): """ Test reading the meta-data """ - def setUp(self): - self.ds = DataSourceDigitalOcean.DataSourceDigitalOcean( - settings.CFG_BUILTIN, None, - helpers.Paths({})) - self.ds._get_sysinfo = _mock_dmi - super(TestDataSourceDigitalOcean, self).setUp() - - @httpretty.activate - def test_connection(self): - httpretty.register_uri( - httpretty.GET, MD_URL, - body=json.dumps(DO_META)) - - success = self.ds.get_data() - self.assertTrue(success) - - @httpretty.activate - def test_metadata(self): - httpretty.register_uri( - httpretty.GET, MD_URL, - body=_request_callback) - self.ds.get_data() + def get_ds(self, get_sysinfo=_mock_dmi): + ds = DataSourceDigitalOcean.DataSourceDigitalOcean( + settings.CFG_BUILTIN, None, helpers.Paths({})) + ds.use_ip4LL = False + if get_sysinfo is not None: + ds._get_sysinfo = get_sysinfo + return ds - self.assertEqual(DO_META.get('user_data'), - self.ds.get_userdata_raw()) + @mock.patch('cloudinit.sources.helpers.digitalocean.read_sysinfo') + def test_returns_false_not_on_docean(self, m_read_sysinfo): + m_read_sysinfo.return_value = (False, None) + ds = self.get_ds(get_sysinfo=None) + self.assertEqual(False, ds.get_data()) + m_read_sysinfo.assert_called() - self.assertEqual(DO_META.get('vendor_data'), - self.ds.get_vendordata_raw()) + @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') + def test_metadata(self, mock_readmd): + mock_readmd.return_value = DO_META.copy() - self.assertEqual(DO_META.get('region'), - self.ds.availability_zone) + ds = self.get_ds() + ret = ds.get_data() + self.assertTrue(ret) - self.assertEqual(DO_META.get('id'), - self.ds.get_instance_id()) + mock_readmd.assert_called() - self.assertEqual(DO_META.get('hostname'), - self.ds.get_hostname()) + self.assertEqual(DO_META.get('user_data'), ds.get_userdata_raw()) + self.assertEqual(DO_META.get('vendor_data'), ds.get_vendordata_raw()) + self.assertEqual(DO_META.get('region'), ds.availability_zone) + self.assertEqual(DO_META.get('droplet_id'), ds.get_instance_id()) + self.assertEqual(DO_META.get('hostname'), ds.get_hostname()) # Single key self.assertEqual([DO_META.get('public_keys')], - self.ds.get_public_ssh_keys()) + ds.get_public_ssh_keys()) - self.assertIsInstance(self.ds.get_public_ssh_keys(), list) + self.assertIsInstance(ds.get_public_ssh_keys(), list) - @httpretty.activate - def test_multiple_ssh_keys(self): - DO_META['public_keys'] = DO_MULTIPLE_KEYS - httpretty.register_uri( - httpretty.GET, MD_URL, - body=_request_callback) - self.ds.get_data() + @mock.patch('cloudinit.sources.helpers.digitalocean.read_metadata') + def test_multiple_ssh_keys(self, mock_readmd): + metadata = DO_META.copy() + metadata['public_keys'] = DO_MULTIPLE_KEYS + mock_readmd.return_value = metadata.copy() + + ds = self.get_ds() + ret = ds.get_data() + self.assertTrue(ret) + + mock_readmd.assert_called() # Multiple keys - self.assertEqual(DO_META.get('public_keys'), - self.ds.get_public_ssh_keys()) + self.assertEqual(metadata['public_keys'], ds.get_public_ssh_keys()) + self.assertIsInstance(ds.get_public_ssh_keys(), list) + + +class TestNetworkConvert(TestCase): + + def _get_networking(self): + netcfg = digitalocean.convert_network_configuration( + DO_META['interfaces'], DO_META['dns']['nameservers']) + self.assertIn('config', netcfg) + return netcfg + + def test_networking_defined(self): + netcfg = self._get_networking() + self.assertIsNotNone(netcfg) + + for nic_def in netcfg.get('config'): + print(json.dumps(nic_def, indent=3)) + n_type = nic_def.get('type') + n_subnets = nic_def.get('type') + n_name = nic_def.get('name') + n_mac = nic_def.get('mac_address') + + self.assertIsNotNone(n_type) + self.assertIsNotNone(n_subnets) + self.assertIsNotNone(n_name) + self.assertIsNotNone(n_mac) + + def _get_nic_definition(self, int_type, expected_name): + """helper function to return if_type (i.e. public) and the expected + name used by cloud-init (i.e eth0)""" + netcfg = self._get_networking() + meta_def = (DO_META.get('interfaces')).get(int_type)[0] + + self.assertEqual(int_type, meta_def.get('type')) + + for nic_def in netcfg.get('config'): + print(nic_def) + if nic_def.get('name') == expected_name: + return nic_def, meta_def + + def _get_match_subn(self, subnets, ip_addr): + """get the matching subnet definition based on ip address""" + for subn in subnets: + address = subn.get('address') + self.assertIsNotNone(address) + + # equals won't work because of ipv6 addressing being in + # cidr notation, i.e fe00::1/64 + if ip_addr in address: + print(json.dumps(subn, indent=3)) + return subn + + def test_public_interface_defined(self): + """test that the public interface is defined as eth0""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + self.assertEqual('eth0', nic_def.get('name')) + self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) + self.assertEqual('physical', nic_def.get('type')) + + def test_private_interface_defined(self): + """test that the private interface is defined as eth1""" + (nic_def, meta_def) = self._get_nic_definition('private', 'eth1') + self.assertEqual('eth1', nic_def.get('name')) + self.assertEqual(meta_def.get('mac'), nic_def.get('mac_address')) + self.assertEqual('physical', nic_def.get('type')) + + def _check_dns_nameservers(self, subn_def): + self.assertIn('dns_nameservers', subn_def) + expected_nameservers = DO_META['dns']['nameservers'] + nic_nameservers = subn_def.get('dns_nameservers') + self.assertEqual(expected_nameservers, nic_nameservers) + + def test_public_interface_ipv6(self): + """test public ipv6 addressing""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv6_def = meta_def.get('ipv6') + self.assertIsNotNone(ipv6_def) + + subn_def = self._get_match_subn(nic_def.get('subnets'), + ipv6_def.get('ip_address')) + + cidr_notated_address = "{0}/{1}".format(ipv6_def.get('ip_address'), + ipv6_def.get('cidr')) + + self.assertEqual(cidr_notated_address, subn_def.get('address')) + self.assertEqual(ipv6_def.get('gateway'), subn_def.get('gateway')) + self._check_dns_nameservers(subn_def) + + def test_public_interface_ipv4(self): + """test public ipv4 addressing""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv4_def = meta_def.get('ipv4') + self.assertIsNotNone(ipv4_def) + + subn_def = self._get_match_subn(nic_def.get('subnets'), + ipv4_def.get('ip_address')) + + self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) + self.assertEqual(ipv4_def.get('gateway'), subn_def.get('gateway')) + self._check_dns_nameservers(subn_def) + + def test_public_interface_anchor_ipv4(self): + """test public ipv4 addressing""" + (nic_def, meta_def) = self._get_nic_definition('public', 'eth0') + ipv4_def = meta_def.get('anchor_ipv4') + self.assertIsNotNone(ipv4_def) + + subn_def = self._get_match_subn(nic_def.get('subnets'), + ipv4_def.get('ip_address')) + + self.assertEqual(ipv4_def.get('netmask'), subn_def.get('netmask')) + self.assertNotIn('gateway', subn_def) + + def test_convert_without_private(self): + netcfg = digitalocean.convert_network_configuration( + DO_META_2['interfaces'], DO_META_2['dns']['nameservers']) - self.assertIsInstance(self.ds.get_public_ssh_keys(), list) + byname = {} + for i in netcfg['config']: + if 'name' in i: + if i['name'] in byname: + raise ValueError("name '%s' in config twice: %s" % + (i['name'], netcfg)) + byname[i['name']] = i + self.assertTrue('eth0' in byname) + self.assertTrue('subnets' in byname['eth0']) + eth0 = byname['eth0'] + self.assertEqual( + sorted(['45.55.249.133', '10.17.0.5']), + sorted([i['address'] for i in eth0['subnets']])) -- cgit v1.2.3 From 808edb127507d91ecee1834aaf5cf1f000cd6e28 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 20 Sep 2016 10:27:54 -0400 Subject: MAAS: improve the main of datasource to look at kernel cmdline config. This just looks in one other maas related path for a config file. The file '91_kernel_cmdline_url' is written by cloud-init when it gets a cloud-config-url parameter. Also now we read the config even if a url is specified to potentially fill in credentials. --- cloudinit/sources/DataSourceMAAS.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index ab93c0a2..81abcd47 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -310,12 +310,12 @@ if __name__ == "__main__": creds = {'consumer_key': args.ckey, 'token_key': args.tkey, 'token_secret': args.tsec, 'consumer_secret': args.csec} - maaspkg_cfg = "/etc/cloud/cloud.cfg.d/90_dpkg_maas.cfg" - if (args.config is None and args.url is None and - os.path.exists(maaspkg_cfg) and - os.access(maaspkg_cfg, os.R_OK)): - sys.stderr.write("Used config in %s.\n" % maaspkg_cfg) - args.config = maaspkg_cfg + if args.config is None: + for fname in ('91_kernel_cmdline_url', '90_dpkg_maas'): + fpath = "/etc/cloud/cloud.cfg.d/" + fname + ".cfg" + if os.path.exists(fpath) and os.access(fpath, os.R_OK): + sys.stderr.write("Used config in %s.\n" % fpath) + args.config = fpath if args.config: cfg = util.read_conf(args.config) -- cgit v1.2.3 From e8730078df8c99696b1b684e09c803eef7c4926c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 30 Sep 2016 15:53:42 -0400 Subject: Fix python2.6 things found running in centos 6. This gets the tests running in centos 6. * ProcessExecutionError: remove setting of .message Nothing in cloud-init seems to use .message anywhere, so it does not seem necessary. The reason to change it is that on 2.6 it spits out: cloudinit/util.py:286: DeprecationWarning: BaseException.message * tox.ini: add a centos6 environment the tox versions listed here replicate a centos6 install with packages from EPEL. You will still need a python2.6 to run this env so we do not enable it by default. --- cloudinit/sources/DataSourceAltCloud.py | 6 ++---- cloudinit/sources/helpers/azure.py | 2 +- cloudinit/util.py | 7 ++----- tests/unittests/test_handler/test_handler_apt_conf_v1.py | 2 +- tests/unittests/test_util.py | 2 +- tox.ini | 16 ++++++++++++++++ 6 files changed, 23 insertions(+), 12 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 48136f7c..20345389 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -195,8 +195,7 @@ class DataSourceAltCloud(sources.DataSource): (cmd_out, _err) = util.subp(cmd) LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) except ProcessExecutionError as _err: - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), - _err.message) + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) return False except OSError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) @@ -211,8 +210,7 @@ class DataSourceAltCloud(sources.DataSource): (cmd_out, _err) = util.subp(cmd) LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) except ProcessExecutionError as _err: - util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), - _err.message) + util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err) return False except OSError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 689ed4cc..1b3e9b70 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -232,7 +232,7 @@ class WALinuxAgentShim(object): def _get_value_from_leases_file(fallback_lease_file): leases = [] content = util.load_file(fallback_lease_file) - LOG.debug("content is {}".format(content)) + LOG.debug("content is %s", content) for line in content.splitlines(): if 'unknown-245' in line: # Example line from Ubuntu diff --git a/cloudinit/util.py b/cloudinit/util.py index eb3e5899..4cff83c5 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -199,7 +199,7 @@ def fully_decoded_payload(part): encoding = charset.input_codec else: encoding = 'utf-8' - return cte_payload.decode(encoding, errors='surrogateescape') + return cte_payload.decode(encoding, 'surrogateescape') return cte_payload @@ -282,9 +282,6 @@ class ProcessExecutionError(IOError): 'reason': self.reason, } IOError.__init__(self, message) - # For backward compatibility with Python 2. - if not hasattr(self, 'message'): - self.message = message class SeLinuxGuard(object): @@ -1821,7 +1818,7 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False, def ldecode(data, m='utf-8'): if not isinstance(data, bytes): return data - return data.decode(m, errors=decode) + return data.decode(m, decode) out = ldecode(out) err = ldecode(err) diff --git a/tests/unittests/test_handler/test_handler_apt_conf_v1.py b/tests/unittests/test_handler/test_handler_apt_conf_v1.py index 45714efd..64acc3e0 100644 --- a/tests/unittests/test_handler/test_handler_apt_conf_v1.py +++ b/tests/unittests/test_handler/test_handler_apt_conf_v1.py @@ -118,7 +118,7 @@ class TestConversion(TestCase): def test_convert_with_apt_mirror(self): mirror = 'http://my.mirror/ubuntu' f = cc_apt_configure.convert_to_v3_apt_format({'apt_mirror': mirror}) - self.assertIn(mirror, {m['uri'] for m in f['apt']['primary']}) + self.assertIn(mirror, set(m['uri'] for m in f['apt']['primary'])) def test_no_old_content(self): mirror = 'http://my.mirror/ubuntu' diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index fc6b9d40..881509aa 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -553,7 +553,7 @@ class TestSubp(helpers.TestCase): def test_subp_decode_invalid_utf8_replaces(self): (out, _err) = util.subp(self.stdin2out, capture=True, data=self.utf8_invalid) - expected = self.utf8_invalid.decode('utf-8', errors='replace') + expected = self.utf8_invalid.decode('utf-8', 'replace') self.assertEqual(out, expected) def test_subp_decode_strict_raises(self): diff --git a/tox.ini b/tox.ini index 729de2a6..277858ed 100644 --- a/tox.ini +++ b/tox.ini @@ -59,3 +59,19 @@ deps = pyflakes==1.1.0 flake8==2.5.4 hacking==0.10.2 + +[testenv:centos6] +basepython = python2.6 +commands = nosetests {posargs:tests} +deps = + # requirements + argparse==1.2.1 + jinja2==2.2.1 + pyyaml==3.10 + PrettyTable==0.7.2 + oauthlib==0.6.0 + configobj==4.6.0 + requests==2.6.0 + jsonpatch==1.2 + six==1.9.0 + -r{toxinidir}/test-requirements.txt -- cgit v1.2.3 From 9972d246947f1a6ec102b978b99b26acc43133ec Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 29 Sep 2016 12:45:02 -0400 Subject: OpenNebula: replace 'ip' parsing with cloudinit.net usage. Replace the parsing of 'ip' to get a link and mac address list in OpenNebula's datasource with usage of cloudinit.net. This makes test cases there not depend on 'ip' availability and also uses common code. --- cloudinit/sources/DataSourceOpenNebula.py | 34 ++++++++++------------ tests/unittests/test_datasource/test_opennebula.py | 23 +++++++-------- 2 files changed, 25 insertions(+), 32 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 635a836c..ba5f3f92 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -30,6 +30,7 @@ import re import string from cloudinit import log as logging +from cloudinit import net from cloudinit import sources from cloudinit import util @@ -120,17 +121,11 @@ class BrokenContextDiskDir(Exception): class OpenNebulaNetwork(object): - REG_DEV_MAC = re.compile( - r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?', - re.MULTILINE | re.DOTALL) - - def __init__(self, ip, context): - self.ip = ip + def __init__(self, context, system_nics_by_mac=None): self.context = context - self.ifaces = self.get_ifaces() - - def get_ifaces(self): - return self.REG_DEV_MAC.findall(self.ip) + if system_nics_by_mac is None: + system_nics_by_mac = get_physical_nics_by_mac() + self.ifaces = system_nics_by_mac def mac2ip(self, mac): components = mac.split(':')[2:] @@ -188,9 +183,7 @@ class OpenNebulaNetwork(object): conf.append('iface lo inet loopback') conf.append('') - for i in self.ifaces: - dev = i[0] - mac = i[1] + for mac, dev in self.ifaces.items(): ip_components = self.mac2ip(mac) conf.append('auto ' + dev) @@ -405,16 +398,19 @@ def read_context_disk_dir(source_dir, asuser=None): # generate static /etc/network/interfaces # only if there are any required context variables # http://opennebula.org/documentation:rel3.8:cong#network_configuration - for k in context: - if re.match(r'^ETH\d+_IP$', k): - (out, _) = util.subp(['ip', 'link']) - net = OpenNebulaNetwork(out, context) - results['network-interfaces'] = net.gen_conf() - break + ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP$', k)] + if ipaddr_keys: + onet = OpenNebulaNetwork(context) + results['network-interfaces'] = onet.gen_conf() return results +def get_physical_nics_by_mac(): + devs = net.get_interfaces_by_mac() + return dict([(m, n) for m, n in devs.items() if net.is_physical(n)]) + + # Legacy: Must be present in case we load an old pkl object DataSourceOpenNebulaNet = DataSourceOpenNebula diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index d796f030..ce5b5550 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -1,7 +1,7 @@ from cloudinit import helpers from cloudinit.sources import DataSourceOpenNebula as ds from cloudinit import util -from ..helpers import TestCase, populate_dir +from ..helpers import mock, populate_dir, TestCase import os import pwd @@ -31,12 +31,7 @@ SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i' HOSTNAME = 'foo.example.com' PUBLIC_IP = '10.0.0.3' -CMD_IP_OUT = '''\ -1: lo: mtu 16436 qdisc noqueue state UNKNOWN - link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 -2: eth0: mtu 1500 qdisc mq state UP qlen 1000 - link/ether 02:00:0a:12:01:01 brd ff:ff:ff:ff:ff:ff -''' +DS_PATH = "cloudinit.sources.DataSourceOpenNebula" class TestOpenNebulaDataSource(TestCase): @@ -233,18 +228,19 @@ class TestOpenNebulaDataSource(TestCase): class TestOpenNebulaNetwork(unittest.TestCase): - def setUp(self): - super(TestOpenNebulaNetwork, self).setUp() + system_nics = {'02:00:0a:12:01:01': 'eth0'} def test_lo(self): - net = ds.OpenNebulaNetwork('', {}) + net = ds.OpenNebulaNetwork(context={}, system_nics_by_mac={}) self.assertEqual(net.gen_conf(), u'''\ auto lo iface lo inet loopback ''') - def test_eth0(self): - net = ds.OpenNebulaNetwork(CMD_IP_OUT, {}) + @mock.patch(DS_PATH + ".get_physical_nics_by_mac") + def test_eth0(self, m_get_phys_by_mac): + m_get_phys_by_mac.return_value = self.system_nics + net = ds.OpenNebulaNetwork({}) self.assertEqual(net.gen_conf(), u'''\ auto lo iface lo inet loopback @@ -267,7 +263,8 @@ iface eth0 inet static 'ETH0_DNS': '1.2.3.6 1.2.3.7' } - net = ds.OpenNebulaNetwork(CMD_IP_OUT, context) + net = ds.OpenNebulaNetwork(context, + system_nics_by_mac=self.system_nics) self.assertEqual(net.gen_conf(), u'''\ auto lo iface lo inet loopback -- cgit v1.2.3 From 4f8ceffb2e3a9feefcb718bda7a7f0f21ef7ab7c Mon Sep 17 00:00:00 2001 From: "kaihuan.pkh" Date: Thu, 13 Oct 2016 20:31:49 +0800 Subject: AliYun: Add new datasource for Ali-Cloud ECS Support AliYun(Ali-Cloud ECS). This datasource inherits from EC2, the main difference is the meta-server address is changed to 100.100.100.200. The datasource behaves similarly to EC2 and relies on network polling. As such, it is not enabled by default. --- cloudinit/sources/DataSourceAliYun.py | 49 ++++++++ cloudinit/sources/DataSourceEc2.py | 18 ++- tests/unittests/test_datasource/test_aliyun.py | 148 +++++++++++++++++++++++++ 3 files changed, 205 insertions(+), 10 deletions(-) create mode 100644 cloudinit/sources/DataSourceAliYun.py create mode 100644 tests/unittests/test_datasource/test_aliyun.py (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py new file mode 100644 index 00000000..19957212 --- /dev/null +++ b/cloudinit/sources/DataSourceAliYun.py @@ -0,0 +1,49 @@ +# vi: ts=4 expandtab + +import os + +from cloudinit import sources +from cloudinit.sources import DataSourceEc2 as EC2 + +DEF_MD_VERSION = "2016-01-01" + + +class DataSourceAliYun(EC2.DataSourceEc2): + metadata_urls = ["http://100.100.100.200"] + + def __init__(self, sys_cfg, distro, paths): + super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths) + self.seed_dir = os.path.join(paths.seed_dir, "AliYun") + self.api_ver = DEF_MD_VERSION + + def get_hostname(self, fqdn=False, _resolve_ip=False): + return self.metadata.get('hostname', 'localhost.localdomain') + + def get_public_ssh_keys(self): + return parse_public_keys(self.metadata.get('public-keys', {})) + + +def parse_public_keys(public_keys): + keys = [] + for key_id, key_body in public_keys.items(): + if isinstance(key_body, str): + keys.append(key_body.strip()) + elif isinstance(key_body, list): + keys.extend(key_body) + elif isinstance(key_body, dict): + key = key_body.get('openssh-key', []) + if isinstance(key, str): + keys.append(key.strip()) + elif isinstance(key, list): + keys.extend(key) + return keys + +# Used to match classes to dependencies +datasources = [ + (DataSourceAliYun, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 6fe2a0bb..bc84ef5d 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -31,21 +31,19 @@ from cloudinit import util LOG = logging.getLogger(__name__) -DEF_MD_URL = "http://169.254.169.254" - # Which version we are requesting of the ec2 metadata apis DEF_MD_VERSION = '2009-04-04' -# Default metadata urls that will be used if none are provided -# They will be checked for 'resolveability' and some of the -# following may be discarded if they do not resolve -DEF_MD_URLS = [DEF_MD_URL, "http://instance-data.:8773"] - class DataSourceEc2(sources.DataSource): + # Default metadata urls that will be used if none are provided + # They will be checked for 'resolveability' and some of the + # following may be discarded if they do not resolve + metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"] + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) - self.metadata_address = DEF_MD_URL + self.metadata_address = None self.seed_dir = os.path.join(paths.seed_dir, "ec2") self.api_ver = DEF_MD_VERSION @@ -106,7 +104,7 @@ class DataSourceEc2(sources.DataSource): return False # Remove addresses from the list that wont resolve. - mdurls = mcfg.get("metadata_urls", DEF_MD_URLS) + mdurls = mcfg.get("metadata_urls", self.metadata_urls) filtered = [x for x in mdurls if util.is_resolvable_url(x)] if set(filtered) != set(mdurls): @@ -117,7 +115,7 @@ class DataSourceEc2(sources.DataSource): mdurls = filtered else: LOG.warn("Empty metadata url list! using default list") - mdurls = DEF_MD_URLS + mdurls = self.metadata_urls urls = [] url2base = {} diff --git a/tests/unittests/test_datasource/test_aliyun.py b/tests/unittests/test_datasource/test_aliyun.py new file mode 100644 index 00000000..6f1de072 --- /dev/null +++ b/tests/unittests/test_datasource/test_aliyun.py @@ -0,0 +1,148 @@ +import functools +import httpretty +import os + +from .. import helpers as test_helpers +from cloudinit import helpers +from cloudinit.sources import DataSourceAliYun as ay + +DEFAULT_METADATA = { + 'instance-id': 'aliyun-test-vm-00', + 'eipv4': '10.0.0.1', + 'hostname': 'test-hostname', + 'image-id': 'm-test', + 'launch-index': '0', + 'mac': '00:16:3e:00:00:00', + 'network-type': 'vpc', + 'private-ipv4': '192.168.0.1', + 'serial-number': 'test-string', + 'vpc-cidr-block': '192.168.0.0/16', + 'vpc-id': 'test-vpc', + 'vswitch-id': 'test-vpc', + 'vswitch-cidr-block': '192.168.0.0/16', + 'zone-id': 'test-zone-1', + 'ntp-conf': {'ntp_servers': [ + 'ntp1.aliyun.com', + 'ntp2.aliyun.com', + 'ntp3.aliyun.com']}, + 'source-address': ['http://mirrors.aliyun.com', + 'http://mirrors.aliyuncs.com'], + 'public-keys': {'key-pair-1': {'openssh-key': 'ssh-rsa AAAAB3...'}, + 'key-pair-2': {'openssh-key': 'ssh-rsa AAAAB3...'}} +} + +DEFAULT_USERDATA = """\ +#cloud-config + +hostname: localhost""" + + +def register_mock_metaserver(base_url, data): + def register_helper(register, base_url, body): + if isinstance(body, str): + register(base_url, body) + elif isinstance(body, list): + register(base_url.rstrip('/'), '\n'.join(body) + '\n') + elif isinstance(body, dict): + vals = [] + for k, v in body.items(): + if isinstance(v, (str, list)): + suffix = k.rstrip('/') + else: + suffix = k.rstrip('/') + '/' + vals.append(suffix) + url = base_url.rstrip('/') + '/' + suffix + register_helper(register, url, v) + register(base_url, '\n'.join(vals) + '\n') + + register = functools.partial(httpretty.register_uri, httpretty.GET) + register_helper(register, base_url, data) + + +class TestAliYunDatasource(test_helpers.HttprettyTestCase): + def setUp(self): + super(TestAliYunDatasource, self).setUp() + cfg = {'datasource': {'AliYun': {'timeout': '1', 'max_wait': '1'}}} + distro = {} + paths = helpers.Paths({}) + self.ds = ay.DataSourceAliYun(cfg, distro, paths) + self.metadata_address = self.ds.metadata_urls[0] + self.api_ver = self.ds.api_ver + + @property + def default_metadata(self): + return DEFAULT_METADATA + + @property + def default_userdata(self): + return DEFAULT_USERDATA + + @property + def metadata_url(self): + return os.path.join(self.metadata_address, + self.api_ver, 'meta-data') + '/' + + @property + def userdata_url(self): + return os.path.join(self.metadata_address, + self.api_ver, 'user-data') + + def regist_default_server(self): + register_mock_metaserver(self.metadata_url, self.default_metadata) + register_mock_metaserver(self.userdata_url, self.default_userdata) + + def _test_get_data(self): + self.assertEqual(self.ds.metadata, self.default_metadata) + self.assertEqual(self.ds.userdata_raw, + self.default_userdata.encode('utf8')) + + def _test_get_sshkey(self): + pub_keys = [v['openssh-key'] for (_, v) in + self.default_metadata['public-keys'].items()] + self.assertEqual(self.ds.get_public_ssh_keys(), pub_keys) + + def _test_get_iid(self): + self.assertEqual(self.default_metadata['instance-id'], + self.ds.get_instance_id()) + + def _test_host_name(self): + self.assertEqual(self.default_metadata['hostname'], + self.ds.get_hostname()) + + @httpretty.activate + def test_with_mock_server(self): + self.regist_default_server() + self.ds.get_data() + self._test_get_data() + self._test_get_sshkey() + self._test_get_iid() + self._test_host_name() + + def test_parse_public_keys(self): + public_keys = {} + self.assertEqual(ay.parse_public_keys(public_keys), []) + + public_keys = {'key-pair-0': 'ssh-key-0'} + self.assertEqual(ay.parse_public_keys(public_keys), + [public_keys['key-pair-0']]) + + public_keys = {'key-pair-0': 'ssh-key-0', 'key-pair-1': 'ssh-key-1'} + self.assertEqual(set(ay.parse_public_keys(public_keys)), + set([public_keys['key-pair-0'], + public_keys['key-pair-1']])) + + public_keys = {'key-pair-0': ['ssh-key-0', 'ssh-key-1']} + self.assertEqual(ay.parse_public_keys(public_keys), + public_keys['key-pair-0']) + + public_keys = {'key-pair-0': {'openssh-key': []}} + self.assertEqual(ay.parse_public_keys(public_keys), []) + + public_keys = {'key-pair-0': {'openssh-key': 'ssh-key-0'}} + self.assertEqual(ay.parse_public_keys(public_keys), + [public_keys['key-pair-0']['openssh-key']]) + + public_keys = {'key-pair-0': {'openssh-key': ['ssh-key-0', + 'ssh-key-1']}} + self.assertEqual(ay.parse_public_keys(public_keys), + public_keys['key-pair-0']['openssh-key']) -- cgit v1.2.3