diff options
| author | Chad Smith <chad.smith@canonical.com> | 2018-03-28 12:29:04 -0600 | 
|---|---|---|
| committer | Chad Smith <chad.smith@canonical.com> | 2018-03-28 12:29:04 -0600 | 
| commit | cf3eaed2e01062f9b5d47042d7a76b092970e0cf (patch) | |
| tree | 53f7c52c5a76bb586da0483699fd6d188e72f457 /cloudinit/sources | |
| parent | 9f159f3a55a7bba7868e03d9cccd898678381f03 (diff) | |
| parent | 8caa3bcf8f2c5b3a448b9d892d4cf53ed8db9be9 (diff) | |
| download | vyos-cloud-init-cf3eaed2e01062f9b5d47042d7a76b092970e0cf.tar.gz vyos-cloud-init-cf3eaed2e01062f9b5d47042d7a76b092970e0cf.zip | |
merge from master at 18.2
Diffstat (limited to 'cloudinit/sources')
| -rw-r--r-- | cloudinit/sources/DataSourceAliYun.py | 2 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceAzure.py | 33 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceCloudSigma.py | 2 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceConfigDrive.py | 10 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceGCE.py | 17 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceHetzner.py | 106 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceIBMCloud.py | 325 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceOVF.py | 21 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceOpenNebula.py | 109 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceScaleway.py | 8 | ||||
| -rw-r--r-- | cloudinit/sources/__init__.py | 21 | ||||
| -rw-r--r-- | cloudinit/sources/helpers/hetzner.py | 26 | ||||
| -rw-r--r-- | cloudinit/sources/tests/test_init.py | 98 | 
13 files changed, 697 insertions, 81 deletions
| diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 7ac8288d..22279d09 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -22,7 +22,7 @@ class DataSourceAliYun(EC2.DataSourceEc2):          super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths)          self.seed_dir = os.path.join(paths.seed_dir, "AliYun") -    def get_hostname(self, fqdn=False, _resolve_ip=False): +    def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):          return self.metadata.get('hostname', 'localhost.localdomain')      def get_public_ssh_keys(self): diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 4bcbf3a4..0ee622e2 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -20,7 +20,7 @@ from cloudinit import net  from cloudinit.net.dhcp import EphemeralDHCPv4  from cloudinit import sources  from cloudinit.sources.helpers.azure import get_metadata_from_fabric -from cloudinit.url_helper import readurl, wait_for_url, UrlError +from cloudinit.url_helper import readurl, UrlError  from cloudinit import util  LOG = logging.getLogger(__name__) @@ -49,7 +49,6 @@ DEFAULT_FS = 'ext4'  AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'  REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"  IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" -IMDS_RETRIES = 5  def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): @@ -223,6 +222,8 @@ DEF_PASSWD_REDACTION = 'REDACTED'  def get_hostname(hostname_command='hostname'): +    if not isinstance(hostname_command, (list, tuple)): +        hostname_command = (hostname_command,)      return util.subp(hostname_command, capture=True)[0].strip() @@ -449,36 +450,24 @@ class DataSourceAzure(sources.DataSource):          headers = {"Metadata": "true"}          LOG.debug("Start polling IMDS") -        def sleep_cb(response, loop_n): -            return 1 - -        def exception_cb(msg, exception): +        def exc_cb(msg, exception):              if isinstance(exception, UrlError) and exception.code == 404: -                return -            LOG.warning("Exception during polling. Will try DHCP.", -                        exc_info=True) - +                return True              # If we get an exception while trying to call IMDS, we              # call DHCP and setup the ephemeral network to acquire the new IP. -            raise exception +            return False          need_report = report_ready -        for i in range(IMDS_RETRIES): +        while True:              try:                  with EphemeralDHCPv4() as lease:                      if need_report:                          self._report_ready(lease=lease)                          need_report = False -                    wait_for_url([url], max_wait=None, timeout=60, -                                 status_cb=LOG.info, -                                 headers_cb=lambda url: headers, sleep_time=1, -                                 exception_cb=exception_cb, -                                 sleep_time_cb=sleep_cb) -                    return str(readurl(url, headers=headers)) -            except Exception: -                LOG.debug("Exception during polling-retrying dhcp" + -                          " %d more time(s).", (IMDS_RETRIES - i), -                          exc_info=True) +                    return readurl(url, timeout=1, headers=headers, +                                   exception_cb=exc_cb, infinite=True).contents +            except UrlError: +                pass      def _report_ready(self, lease):          """Tells the fabric provisioning has completed diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index 4eaad475..c816f349 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -84,7 +84,7 @@ class DataSourceCloudSigma(sources.DataSource):          return True -    def get_hostname(self, fqdn=False, resolve_ip=False): +    def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):          """          Cleans up and uses the server's name if the latter is set. Otherwise          the first part from uuid is being used. diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index b8db6267..c7b5fe5f 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -14,6 +14,7 @@ from cloudinit import util  from cloudinit.net import eni +from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform  from cloudinit.sources.helpers import openstack  LOG = logging.getLogger(__name__) @@ -255,6 +256,15 @@ def find_candidate_devs(probe_optical=True):      # an unpartitioned block device (ex sda, not sda1)      devices = [d for d in candidates                 if d in by_label or not util.is_partition(d)] + +    if devices: +        # IBMCloud uses config-2 label, but limited to a single UUID. +        ibm_platform, ibm_path = get_ibm_platform() +        if ibm_path in devices: +            devices.remove(ibm_path) +            LOG.debug("IBMCloud device '%s' (%s) removed from candidate list", +                      ibm_path, ibm_platform) +      return devices diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 2da34a99..d8162623 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -90,7 +90,7 @@ class DataSourceGCE(sources.DataSource):          public_keys_data = self.metadata['public-keys-data']          return _parse_public_keys(public_keys_data, self.default_user) -    def get_hostname(self, fqdn=False, resolve_ip=False): +    def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):          # GCE has long FDQN's and has asked for short hostnames.          return self.metadata['local-hostname'].split('.')[0] @@ -213,16 +213,15 @@ def read_md(address=None, platform_check=True):      if md['availability-zone']:          md['availability-zone'] = md['availability-zone'].split('/')[-1] -    encoding = instance_data.get('user-data-encoding') -    if encoding: +    if 'user-data' in instance_data: +        # instance_data was json, so values are all utf-8 strings. +        ud = instance_data['user-data'].encode("utf-8") +        encoding = instance_data.get('user-data-encoding')          if encoding == 'base64': -            md['user-data'] = b64decode(instance_data.get('user-data')) -        else: +            ud = b64decode(ud) +        elif encoding:              LOG.warning('unknown user-data-encoding: %s, ignoring', encoding) - -    if 'user-data' in md: -        ret['user-data'] = md['user-data'] -        del md['user-data'] +        ret['user-data'] = ud      ret['meta-data'] = md      ret['success'] = True diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py new file mode 100644 index 00000000..5c75b65b --- /dev/null +++ b/cloudinit/sources/DataSourceHetzner.py @@ -0,0 +1,106 @@ +# Author: Jonas Keidel <jonas.keidel@hetzner.com> +# Author: Markus Schade <markus.schade@hetzner.com> +# +# This file is part of cloud-init. See LICENSE file for license information. +# +"""Hetzner Cloud API Documentation. +   https://docs.hetzner.cloud/""" + +from cloudinit import log as logging +from cloudinit import net as cloudnet +from cloudinit import sources +from cloudinit import util + +import cloudinit.sources.helpers.hetzner as hc_helper + +LOG = logging.getLogger(__name__) + +BASE_URL_V1 = 'http://169.254.169.254/hetzner/v1' + +BUILTIN_DS_CONFIG = { +    'metadata_url': BASE_URL_V1 + '/metadata', +    'userdata_url': BASE_URL_V1 + '/userdata', +} + +MD_RETRIES = 60 +MD_TIMEOUT = 2 +MD_WAIT_RETRY = 2 + + +class DataSourceHetzner(sources.DataSource): +    def __init__(self, sys_cfg, distro, paths): +        sources.DataSource.__init__(self, sys_cfg, distro, paths) +        self.distro = distro +        self.metadata = dict() +        self.ds_cfg = util.mergemanydict([ +            util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}), +            BUILTIN_DS_CONFIG]) +        self.metadata_address = self.ds_cfg['metadata_url'] +        self.userdata_address = self.ds_cfg['userdata_url'] +        self.retries = self.ds_cfg.get('retries', MD_RETRIES) +        self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) +        self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) +        self._network_config = None +        self.dsmode = sources.DSMODE_NETWORK + +    def get_data(self): +        if not on_hetzner(): +            return False +        nic = cloudnet.find_fallback_nic() +        with cloudnet.EphemeralIPv4Network(nic, "169.254.0.1", 16, +                                           "169.254.255.255"): +            md = hc_helper.read_metadata( +                self.metadata_address, timeout=self.timeout, +                sec_between=self.wait_retry, retries=self.retries) +            ud = hc_helper.read_userdata( +                self.userdata_address, timeout=self.timeout, +                sec_between=self.wait_retry, retries=self.retries) + +        self.userdata_raw = ud +        self.metadata_full = md + +        """hostname is name provided by user at launch.  The API enforces +        it is a valid hostname, but it is not guaranteed to be resolvable +        in dns or fully qualified.""" +        self.metadata['instance-id'] = md['instance-id'] +        self.metadata['local-hostname'] = md['hostname'] +        self.metadata['network-config'] = md.get('network-config', None) +        self.metadata['public-keys'] = md.get('public-keys', None) +        self.vendordata_raw = md.get("vendor_data", None) + +        return True + +    @property +    def network_config(self): +        """Configure the networking. This needs to be done each boot, since +           the IP information may have changed due to snapshot and/or +           migration. +        """ + +        if self._network_config: +            return self._network_config + +        _net_config = self.metadata['network-config'] +        if not _net_config: +            raise Exception("Unable to get meta-data from server....") + +        self._network_config = _net_config + +        return self._network_config + + +def on_hetzner(): +    return util.read_dmi_data('system-manufacturer') == "Hetzner" + + +# Used to match classes to dependencies +datasources = [ +    (DataSourceHetzner, (sources.DEP_FILESYSTEM, )), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): +    return sources.list_from_depends(depends, datasources) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py new file mode 100644 index 00000000..02b3d56f --- /dev/null +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -0,0 +1,325 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Datasource for IBMCloud. + +IBMCloud is also know as SoftLayer or BlueMix. +IBMCloud hypervisor is xen (2018-03-10). + +There are 2 different api exposed launch methods. + * template: This is the legacy method of launching instances. +   When booting from an image template, the system boots first into +   a "provisioning" mode.  There, host <-> guest mechanisms are utilized +   to execute code in the guest and provision it. + +   Cloud-init will disable itself when it detects that it is in the +   provisioning mode.  It detects this by the presence of +   a file '/root/provisioningConfiguration.cfg'. + +   When provided with user-data, the "first boot" will contain a +   ConfigDrive-like disk labeled with 'METADATA'.  If there is no user-data +   provided, then there is no data-source. + +   Cloud-init never does any network configuration in this mode. + + * os_code: Essentially "launch by OS Code" (Operating System Code). +   This is a more modern approach.  There is no specific "provisioning" boot. +   Instead, cloud-init does all the customization.  With or without +   user-data provided, an OpenStack ConfigDrive like disk is attached. + +   Only disks with label 'config-2' and UUID '9796-932E' are considered. +   This is to avoid this datasource claiming ConfigDrive.  This does +   mean that 1 in 8^16 (~4 billion) Xen ConfigDrive systems will be +   incorrectly identified as IBMCloud. + +TODO: + * is uuid (/sys/hypervisor/uuid) stable for life of an instance? +   it seems it is not the same as data's uuid in the os_code case +   but is in the template case. + +""" +import base64 +import json +import os + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit.sources.helpers import openstack +from cloudinit import util + +LOG = logging.getLogger(__name__) + +IBM_CONFIG_UUID = "9796-932E" + + +class Platforms(object): +    TEMPLATE_LIVE_METADATA = "Template/Live/Metadata" +    TEMPLATE_LIVE_NODATA = "UNABLE TO BE IDENTIFIED." +    TEMPLATE_PROVISIONING_METADATA = "Template/Provisioning/Metadata" +    TEMPLATE_PROVISIONING_NODATA = "Template/Provisioning/No-Metadata" +    OS_CODE = "OS-Code/Live" + + +PROVISIONING = ( +    Platforms.TEMPLATE_PROVISIONING_METADATA, +    Platforms.TEMPLATE_PROVISIONING_NODATA) + + +class DataSourceIBMCloud(sources.DataSource): + +    dsname = 'IBMCloud' +    system_uuid = None + +    def __init__(self, sys_cfg, distro, paths): +        super(DataSourceIBMCloud, self).__init__(sys_cfg, distro, paths) +        self.source = None +        self._network_config = None +        self.network_json = None +        self.platform = None + +    def __str__(self): +        root = super(DataSourceIBMCloud, self).__str__() +        mstr = "%s [%s %s]" % (root, self.platform, self.source) +        return mstr + +    def _get_data(self): +        results = read_md() +        if results is None: +            return False + +        self.source = results['source'] +        self.platform = results['platform'] +        self.metadata = results['metadata'] +        self.userdata_raw = results.get('userdata') +        self.network_json = results.get('networkdata') +        vd = results.get('vendordata') +        self.vendordata_pure = vd +        self.system_uuid = results['system-uuid'] +        try: +            self.vendordata_raw = sources.convert_vendordata(vd) +        except ValueError as e: +            LOG.warning("Invalid content in vendor-data: %s", e) +            self.vendordata_raw = None + +        return True + +    def check_instance_id(self, sys_cfg): +        """quickly (local check only) if self.instance_id is still valid + +        in Template mode, the system uuid (/sys/hypervisor/uuid) is the +        same as found in the METADATA disk.  But that is not true in OS_CODE +        mode.  So we read the system_uuid and keep that for later compare.""" +        if self.system_uuid is None: +            return False +        return self.system_uuid == _read_system_uuid() + +    @property +    def network_config(self): +        if self.platform != Platforms.OS_CODE: +            # If deployed from template, an agent in the provisioning +            # environment handles networking configuration. Not cloud-init. +            return {'config': 'disabled', 'version': 1} +        if self._network_config is None: +            if self.network_json is not None: +                LOG.debug("network config provided via network_json") +                self._network_config = openstack.convert_net_json( +                    self.network_json, known_macs=None) +            else: +                LOG.debug("no network configuration available.") +        return self._network_config + + +def _read_system_uuid(): +    uuid_path = "/sys/hypervisor/uuid" +    if not os.path.isfile(uuid_path): +        return None +    return util.load_file(uuid_path).strip().lower() + + +def _is_xen(): +    return os.path.exists("/proc/xen") + + +def _is_ibm_provisioning(): +    return os.path.exists("/root/provisioningConfiguration.cfg") + + +def get_ibm_platform(): +    """Return a tuple (Platform, path) + +    If this is Not IBM cloud, then the return value is (None, None). +    An instance in provisioning mode is considered running on IBM cloud.""" +    label_mdata = "METADATA" +    label_cfg2 = "CONFIG-2" +    not_found = (None, None) + +    if not _is_xen(): +        return not_found + +    # fslabels contains only the first entry with a given label. +    fslabels = {} +    try: +        devs = util.blkid() +    except util.ProcessExecutionError as e: +        LOG.warning("Failed to run blkid: %s", e) +        return (None, None) + +    for dev in sorted(devs.keys()): +        data = devs[dev] +        label = data.get("LABEL", "").upper() +        uuid = data.get("UUID", "").upper() +        if label not in (label_mdata, label_cfg2): +            continue +        if label in fslabels: +            LOG.warning("Duplicate fslabel '%s'. existing=%s current=%s", +                        label, fslabels[label], data) +            continue +        if label == label_cfg2 and uuid != IBM_CONFIG_UUID: +            LOG.debug("Skipping %s with LABEL=%s due to uuid != %s: %s", +                      dev, label, uuid, data) +            continue +        fslabels[label] = data + +    metadata_path = fslabels.get(label_mdata, {}).get('DEVNAME') +    cfg2_path = fslabels.get(label_cfg2, {}).get('DEVNAME') + +    if cfg2_path: +        return (Platforms.OS_CODE, cfg2_path) +    elif metadata_path: +        if _is_ibm_provisioning(): +            return (Platforms.TEMPLATE_PROVISIONING_METADATA, metadata_path) +        else: +            return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path) +    elif _is_ibm_provisioning(): +            return (Platforms.TEMPLATE_PROVISIONING_NODATA, None) +    return not_found + + +def read_md(): +    """Read data from IBM Cloud. + +    @return: None if not running on IBM Cloud. +             dictionary with guaranteed fields: metadata, version +             and optional fields: userdata, vendordata, networkdata. +             Also includes the system uuid from /sys/hypervisor/uuid.""" +    platform, path = get_ibm_platform() +    if platform is None: +        LOG.debug("This is not an IBMCloud platform.") +        return None +    elif platform in PROVISIONING: +        LOG.debug("Cloud-init is disabled during provisioning: %s.", +                  platform) +        return None + +    ret = {'platform': platform, 'source': path, +           'system-uuid': _read_system_uuid()} + +    try: +        if os.path.isdir(path): +            results = metadata_from_dir(path) +        else: +            results = util.mount_cb(path, metadata_from_dir) +    except BrokenMetadata as e: +        raise RuntimeError( +            "Failed reading IBM config disk (platform=%s path=%s): %s" % +            (platform, path, e)) + +    ret.update(results) +    return ret + + +class BrokenMetadata(IOError): +    pass + + +def metadata_from_dir(source_dir): +    """Walk source_dir extracting standardized metadata. + +    Certain metadata keys are renamed to present a standardized set of metadata +    keys. + +    This function has a lot in common with ConfigDriveReader.read_v2 but +    there are a number of inconsistencies, such key renames and as only +    presenting a 'latest' version which make it an unlikely candidate to share +    code. + +    @return: Dict containing translated metadata, userdata, vendordata, +        networkdata as present. +    """ + +    def opath(fname): +        return os.path.join("openstack", "latest", fname) + +    def load_json_bytes(blob): +        return json.loads(blob.decode('utf-8')) + +    files = [ +        # tuples of (results_name, path, translator) +        ('metadata_raw', opath('meta_data.json'), load_json_bytes), +        ('userdata', opath('user_data'), None), +        ('vendordata', opath('vendor_data.json'), load_json_bytes), +        ('networkdata', opath('network_data.json'), load_json_bytes), +    ] + +    results = {} +    for (name, path, transl) in files: +        fpath = os.path.join(source_dir, path) +        raw = None +        try: +            raw = util.load_file(fpath, decode=False) +        except IOError as e: +            LOG.debug("Failed reading path '%s': %s", fpath, e) + +        if raw is None or transl is None: +            data = raw +        else: +            try: +                data = transl(raw) +            except Exception as e: +                raise BrokenMetadata("Failed decoding %s: %s" % (path, e)) + +        results[name] = data + +    if results.get('metadata_raw') is None: +        raise BrokenMetadata( +            "%s missing required file 'meta_data.json'" % source_dir) + +    results['metadata'] = {} + +    md_raw = results['metadata_raw'] +    md = results['metadata'] +    if 'random_seed' in md_raw: +        try: +            md['random_seed'] = base64.b64decode(md_raw['random_seed']) +        except (ValueError, TypeError) as e: +            raise BrokenMetadata( +                "Badly formatted metadata random_seed entry: %s" % e) + +    renames = ( +        ('public_keys', 'public-keys'), ('hostname', 'local-hostname'), +        ('uuid', 'instance-id')) +    for mdname, newname in renames: +        if mdname in md_raw: +            md[newname] = md_raw[mdname] + +    return results + + +# Used to match classes to dependencies +datasources = [ +    (DataSourceIBMCloud, (sources.DEP_FILESYSTEM,)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): +    return sources.list_from_depends(depends, datasources) + + +if __name__ == "__main__": +    import argparse + +    parser = argparse.ArgumentParser(description='Query IBM Cloud Metadata') +    args = parser.parse_args() +    data = read_md() +    print(util.json_dumps(data)) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 6e62f984..dc914a72 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -95,11 +95,20 @@ class DataSourceOVF(sources.DataSource):                            "VMware Customization support")              elif not util.get_cfg_option_bool(                      self.sys_cfg, "disable_vmware_customization", True): -                deployPkgPluginPath = search_file("/usr/lib/vmware-tools", -                                                  "libdeployPkgPlugin.so") -                if not deployPkgPluginPath: -                    deployPkgPluginPath = search_file("/usr/lib/open-vm-tools", -                                                      "libdeployPkgPlugin.so") + +                search_paths = ( +                    "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools", +                    "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools") + +                plugin = "libdeployPkgPlugin.so" +                deployPkgPluginPath = None +                for path in search_paths: +                    deployPkgPluginPath = search_file(path, plugin) +                    if deployPkgPluginPath: +                        LOG.debug("Found the customization plugin at %s", +                                  deployPkgPluginPath) +                        break +                  if deployPkgPluginPath:                      # When the VM is powered on, the "VMware Tools" daemon                      # copies the customization specification file to @@ -111,6 +120,8 @@ class DataSourceOVF(sources.DataSource):                          msg="waiting for configuration file",                          func=wait_for_imc_cfg_file,                          args=("cust.cfg", max_wait)) +                else: +                    LOG.debug("Did not find the customization plugin.")                  if vmwareImcConfigFilePath:                      LOG.debug("Found VMware Customization Config File at %s", diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index ce47b6bd..d4a41116 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -20,7 +20,6 @@ import string  from cloudinit import log as logging  from cloudinit import net -from cloudinit.net import eni  from cloudinit import sources  from cloudinit import util @@ -91,19 +90,19 @@ class DataSourceOpenNebula(sources.DataSource):              return False          self.seed = seed -        self.network_eni = results.get('network-interfaces') +        self.network = results.get('network-interfaces')          self.metadata = md          self.userdata_raw = results.get('userdata')          return True      @property      def network_config(self): -        if self.network_eni is not None: -            return eni.convert_eni_data(self.network_eni) +        if self.network is not None: +            return self.network          else:              return None -    def get_hostname(self, fqdn=False, resolve_ip=None): +    def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):          if resolve_ip is None:              if self.dsmode == sources.DSMODE_NETWORK:                  resolve_ip = True @@ -143,18 +142,42 @@ class OpenNebulaNetwork(object):      def mac2network(self, mac):          return self.mac2ip(mac).rpartition(".")[0] + ".0" -    def get_dns(self, dev): -        return self.get_field(dev, "dns", "").split() +    def get_nameservers(self, dev): +        nameservers = {} +        dns = self.get_field(dev, "dns", "").split() +        dns.extend(self.context.get('DNS', "").split()) +        if dns: +            nameservers['addresses'] = dns +        search_domain = self.get_field(dev, "search_domain", "").split() +        if search_domain: +            nameservers['search'] = search_domain +        return nameservers -    def get_domain(self, dev): -        return self.get_field(dev, "domain") +    def get_mtu(self, dev): +        return self.get_field(dev, "mtu")      def get_ip(self, dev, mac):          return self.get_field(dev, "ip", self.mac2ip(mac)) +    def get_ip6(self, dev): +        addresses6 = [] +        ip6 = self.get_field(dev, "ip6") +        if ip6: +            addresses6.append(ip6) +        ip6_ula = self.get_field(dev, "ip6_ula") +        if ip6_ula: +            addresses6.append(ip6_ula) +        return addresses6 + +    def get_ip6_prefix(self, dev): +        return self.get_field(dev, "ip6_prefix_length", "64") +      def get_gateway(self, dev):          return self.get_field(dev, "gateway") +    def get_gateway6(self, dev): +        return self.get_field(dev, "gateway6") +      def get_mask(self, dev):          return self.get_field(dev, "mask", "255.255.255.0") @@ -171,13 +194,11 @@ class OpenNebulaNetwork(object):          return default if val in (None, "") else val      def gen_conf(self): -        global_dns = self.context.get('DNS', "").split() - -        conf = [] -        conf.append('auto lo') -        conf.append('iface lo inet loopback') -        conf.append('') +        netconf = {} +        netconf['version'] = 2 +        netconf['ethernets'] = {} +        ethernets = {}          for mac, dev in self.ifaces.items():              mac = mac.lower() @@ -185,29 +206,49 @@ class OpenNebulaNetwork(object):              # dev stores the current system name.              c_dev = self.context_devname.get(mac, dev) -            conf.append('auto ' + dev) -            conf.append('iface ' + dev + ' inet static') -            conf.append('  #hwaddress %s' % mac) -            conf.append('  address ' + self.get_ip(c_dev, mac)) -            conf.append('  network ' + self.get_network(c_dev, mac)) -            conf.append('  netmask ' + self.get_mask(c_dev)) +            devconf = {} + +            # Set MAC address +            devconf['match'] = {'macaddress': mac} +            # Set IPv4 address +            devconf['addresses'] = [] +            mask = self.get_mask(c_dev) +            prefix = str(net.mask_to_net_prefix(mask)) +            devconf['addresses'].append( +                self.get_ip(c_dev, mac) + '/' + prefix) + +            # Set IPv6 Global and ULA address +            addresses6 = self.get_ip6(c_dev) +            if addresses6: +                prefix6 = self.get_ip6_prefix(c_dev) +                devconf['addresses'].extend( +                    [i + '/' + prefix6 for i in addresses6]) + +            # Set IPv4 default gateway              gateway = self.get_gateway(c_dev)              if gateway: -                conf.append('  gateway ' + gateway) +                devconf['gateway4'] = gateway + +            # Set IPv6 default gateway +            gateway6 = self.get_gateway6(c_dev) +            if gateway: +                devconf['gateway6'] = gateway6 -            domain = self.get_domain(c_dev) -            if domain: -                conf.append('  dns-search ' + domain) +            # Set DNS servers and search domains +            nameservers = self.get_nameservers(c_dev) +            if nameservers: +                devconf['nameservers'] = nameservers -            # add global DNS servers to all interfaces -            dns = self.get_dns(c_dev) -            if global_dns or dns: -                conf.append('  dns-nameservers ' + ' '.join(global_dns + dns)) +            # Set MTU size +            mtu = self.get_mtu(c_dev) +            if mtu: +                devconf['mtu'] = mtu -            conf.append('') +            ethernets[dev] = devconf -        return "\n".join(conf) +        netconf['ethernets'] = ethernets +        return(netconf)  def find_candidate_devs(): @@ -393,10 +434,10 @@ def read_context_disk_dir(source_dir, asuser=None):              except TypeError:                  LOG.warning("Failed base64 decoding of userdata") -    # generate static /etc/network/interfaces +    # generate Network Configuration v2      # only if there are any required context variables -    # http://opennebula.org/documentation:rel3.8:cong#network_configuration -    ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP$', k)] +    # http://docs.opennebula.org/5.4/operation/references/template.html#context-section +    ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP.*$', k)]      if ipaddr_keys:          onet = OpenNebulaNetwork(context)          results['network-interfaces'] = onet.gen_conf() diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index b0b19c93..e2502b02 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -113,9 +113,9 @@ def query_data_api_once(api_address, timeout, requests_session):              retries=0,              session=requests_session,              # If the error is a HTTP/404 or a ConnectionError, go into raise -            # block below. -            exception_cb=lambda _, exc: exc.code == 404 or ( -                isinstance(exc.cause, requests.exceptions.ConnectionError) +            # block below and don't bother retrying. +            exception_cb=lambda _, exc: exc.code != 404 and ( +                not isinstance(exc.cause, requests.exceptions.ConnectionError)              )          )          return util.decode_binary(resp.contents) @@ -215,7 +215,7 @@ class DataSourceScaleway(sources.DataSource):      def get_public_ssh_keys(self):          return [key['key'] for key in self.metadata['ssh_public_keys']] -    def get_hostname(self, fqdn=False, resolve_ip=False): +    def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):          return self.metadata['hostname']      @property diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index a05ca2f6..df0b374a 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -276,21 +276,34 @@ class DataSource(object):              return "iid-datasource"          return str(self.metadata['instance-id']) -    def get_hostname(self, fqdn=False, resolve_ip=False): +    def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): +        """Get hostname or fqdn from the datasource. Look it up if desired. + +        @param fqdn: Boolean, set True to return hostname with domain. +        @param resolve_ip: Boolean, set True to attempt to resolve an ipv4 +            address provided in local-hostname meta-data. +        @param metadata_only: Boolean, set True to avoid looking up hostname +            if meta-data doesn't have local-hostname present. + +        @return: hostname or qualified hostname. Optionally return None when +            metadata_only is True and local-hostname data is not available. +        """          defdomain = "localdomain"          defhost = "localhost"          domain = defdomain          if not self.metadata or 'local-hostname' not in self.metadata: +            if metadata_only: +                return None              # this is somewhat questionable really.              # the cloud datasource was asked for a hostname              # and didn't have one. raising error might be more appropriate              # but instead, basically look up the existing hostname              toks = []              hostname = util.get_hostname() -            fqdn = util.get_fqdn_from_hosts(hostname) -            if fqdn and fqdn.find(".") > 0: -                toks = str(fqdn).split(".") +            hosts_fqdn = util.get_fqdn_from_hosts(hostname) +            if hosts_fqdn and hosts_fqdn.find(".") > 0: +                toks = str(hosts_fqdn).split(".")              elif hostname and hostname.find(".") > 0:                  toks = str(hostname).split(".")              elif hostname: diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py new file mode 100644 index 00000000..2554530d --- /dev/null +++ b/cloudinit/sources/helpers/hetzner.py @@ -0,0 +1,26 @@ +# Author: Jonas Keidel <jonas.keidel@hetzner.com> +# Author: Markus Schade <markus.schade@hetzner.com> +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import log as logging +from cloudinit import url_helper +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +def read_metadata(url, timeout=2, sec_between=2, retries=30): +    response = url_helper.readurl(url, timeout=timeout, +                                  sec_between=sec_between, retries=retries) +    if not response.ok(): +        raise RuntimeError("unable to read metadata at %s" % url) +    return util.load_yaml(response.contents.decode()) + + +def read_userdata(url, timeout=2, sec_between=2, retries=30): +    response = url_helper.readurl(url, timeout=timeout, +                                  sec_between=sec_between, retries=retries) +    if not response.ok(): +        raise RuntimeError("unable to read userdata at %s" % url) +    return response.contents diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index af151154..e7fda22a 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -1,13 +1,15 @@  # This file is part of cloud-init. See LICENSE file for license information. +import inspect  import os  import six  import stat  from cloudinit.helpers import Paths +from cloudinit import importer  from cloudinit.sources import (      INSTANCE_JSON_FILE, DataSource) -from cloudinit.tests.helpers import CiTestCase, skipIf +from cloudinit.tests.helpers import CiTestCase, skipIf, mock  from cloudinit.user_data import UserDataProcessor  from cloudinit import util @@ -108,6 +110,74 @@ class TestDataSource(CiTestCase):          self.assertEqual('userdata_raw', datasource.userdata_raw)          self.assertEqual('vendordata_raw', datasource.vendordata_raw) +    def test_get_hostname_strips_local_hostname_without_domain(self): +        """Datasource.get_hostname strips metadata local-hostname of domain.""" +        tmp = self.tmp_dir() +        datasource = DataSourceTestSubclassNet( +            self.sys_cfg, self.distro, Paths({'run_dir': tmp})) +        self.assertTrue(datasource.get_data()) +        self.assertEqual( +            'test-subclass-hostname', datasource.metadata['local-hostname']) +        self.assertEqual('test-subclass-hostname', datasource.get_hostname()) +        datasource.metadata['local-hostname'] = 'hostname.my.domain.com' +        self.assertEqual('hostname', datasource.get_hostname()) + +    def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self): +        """Datasource.get_hostname with fqdn set gets qualified hostname.""" +        tmp = self.tmp_dir() +        datasource = DataSourceTestSubclassNet( +            self.sys_cfg, self.distro, Paths({'run_dir': tmp})) +        self.assertTrue(datasource.get_data()) +        datasource.metadata['local-hostname'] = 'hostname.my.domain.com' +        self.assertEqual( +            'hostname.my.domain.com', datasource.get_hostname(fqdn=True)) + +    def test_get_hostname_without_metadata_uses_system_hostname(self): +        """Datasource.gethostname runs util.get_hostname when no metadata.""" +        tmp = self.tmp_dir() +        datasource = DataSourceTestSubclassNet( +            self.sys_cfg, self.distro, Paths({'run_dir': tmp})) +        self.assertEqual({}, datasource.metadata) +        mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' +        with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: +            with mock.patch(mock_fqdn) as m_fqdn: +                m_gethost.return_value = 'systemhostname.domain.com' +                m_fqdn.return_value = None  # No maching fqdn in /etc/hosts +                self.assertEqual('systemhostname', datasource.get_hostname()) +                self.assertEqual( +                    'systemhostname.domain.com', +                    datasource.get_hostname(fqdn=True)) + +    def test_get_hostname_without_metadata_returns_none(self): +        """Datasource.gethostname returns None when metadata_only and no MD.""" +        tmp = self.tmp_dir() +        datasource = DataSourceTestSubclassNet( +            self.sys_cfg, self.distro, Paths({'run_dir': tmp})) +        self.assertEqual({}, datasource.metadata) +        mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' +        with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: +            with mock.patch(mock_fqdn) as m_fqdn: +                self.assertIsNone(datasource.get_hostname(metadata_only=True)) +                self.assertIsNone( +                    datasource.get_hostname(fqdn=True, metadata_only=True)) +        self.assertEqual([], m_gethost.call_args_list) +        self.assertEqual([], m_fqdn.call_args_list) + +    def test_get_hostname_without_metadata_prefers_etc_hosts(self): +        """Datasource.gethostname prefers /etc/hosts to util.get_hostname.""" +        tmp = self.tmp_dir() +        datasource = DataSourceTestSubclassNet( +            self.sys_cfg, self.distro, Paths({'run_dir': tmp})) +        self.assertEqual({}, datasource.metadata) +        mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' +        with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: +            with mock.patch(mock_fqdn) as m_fqdn: +                m_gethost.return_value = 'systemhostname.domain.com' +                m_fqdn.return_value = 'fqdnhostname.domain.com' +                self.assertEqual('fqdnhostname', datasource.get_hostname()) +                self.assertEqual('fqdnhostname.domain.com', +                                 datasource.get_hostname(fqdn=True)) +      def test_get_data_write_json_instance_data(self):          """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root."""          tmp = self.tmp_dir() @@ -200,3 +270,29 @@ class TestDataSource(CiTestCase):              "WARNING: Error persisting instance-data.json: 'utf8' codec can't"              " decode byte 0xaa in position 2: invalid start byte",              self.logs.getvalue()) + +    def test_get_hostname_subclass_support(self): +        """Validate get_hostname signature on all subclasses of DataSource.""" +        # Use inspect.getfullargspec when we drop py2.6 and py2.7 +        get_args = inspect.getargspec  # pylint: disable=W1505 +        base_args = get_args(DataSource.get_hostname)  # pylint: disable=W1505 +        # Import all DataSource subclasses so we can inspect them. +        modules = util.find_modules(os.path.dirname(os.path.dirname(__file__))) +        for loc, name in modules.items(): +            mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], []) +            if mod_locs: +                importer.import_module(mod_locs[0]) +        for child in DataSource.__subclasses__(): +            if 'Test' in child.dsname: +                continue +            self.assertEqual( +                base_args, +                get_args(child.get_hostname),  # pylint: disable=W1505 +                '%s does not implement DataSource.get_hostname params' +                % child) +            for grandchild in child.__subclasses__(): +                self.assertEqual( +                    base_args, +                    get_args(grandchild.get_hostname),  # pylint: disable=W1505 +                    '%s does not implement DataSource.get_hostname params' +                    % grandchild) | 
