diff options
Diffstat (limited to 'cloudinit/sources')
| -rw-r--r-- | cloudinit/sources/DataSourceAltCloud.py | 2 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceAzure.py | 23 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceBigstep.py | 57 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceConfigDrive.py | 2 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceDigitalOcean.py | 4 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceEc2.py | 12 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceMAAS.py | 17 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceNoCloud.py | 4 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceNone.py | 4 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceOVF.py | 21 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceOpenNebula.py | 7 | ||||
| -rw-r--r-- | cloudinit/sources/DataSourceSmartOS.py | 262 | ||||
| -rw-r--r-- | cloudinit/sources/helpers/azure.py | 26 | ||||
| -rw-r--r-- | cloudinit/sources/helpers/vmware/imc/config_nic.py | 6 | 
14 files changed, 281 insertions, 166 deletions
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 60d58d6d..cd61df31 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -284,7 +284,7 @@ class DataSourceAltCloud(sources.DataSource):  # In the future 'dsmode' like behavior can be added to offer user  # the ability to run before networking.  datasources = [ -  (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +    (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),  ] diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index bd80a8a6..2af0ad9b 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -38,7 +38,8 @@ LOG = logging.getLogger(__name__)  DS_NAME = 'Azure'  DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}  AGENT_START = ['service', 'walinuxagent', 'start'] -BOUNCE_COMMAND = ['sh', '-xc', +BOUNCE_COMMAND = [ +    'sh', '-xc',      "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"]  BUILTIN_DS_CONFIG = { @@ -91,9 +92,9 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):      """      policy = cfg['hostname_bounce']['policy']      previous_hostname = get_hostname(hostname_command) -    if (not util.is_true(cfg.get('set_hostname')) -            or util.is_false(policy) -            or (previous_hostname == temp_hostname and policy != 'force')): +    if (not util.is_true(cfg.get('set_hostname')) or +       util.is_false(policy) or +       (previous_hostname == temp_hostname and policy != 'force')):          yield None          return      set_hostname(temp_hostname, hostname_command) @@ -123,8 +124,8 @@ class DataSourceAzureNet(sources.DataSource):          with temporary_hostname(temp_hostname, self.ds_cfg,                                  hostname_command=hostname_command) \                  as previous_hostname: -            if (previous_hostname is not None -                    and util.is_true(self.ds_cfg.get('set_hostname'))): +            if (previous_hostname is not None and +               util.is_true(self.ds_cfg.get('set_hostname'))):                  cfg = self.ds_cfg['hostname_bounce']                  try:                      perform_hostname_bounce(hostname=temp_hostname, @@ -152,7 +153,8 @@ class DataSourceAzureNet(sources.DataSource):                  else:                      bname = str(pk['fingerprint'] + ".crt")                      fp_files += [os.path.join(ddir, bname)] -                    LOG.debug("ssh authentication: using fingerprint from fabirc") +                    LOG.debug("ssh authentication: " +                              "using fingerprint from fabirc")              missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",                                      func=wait_for_files, @@ -506,7 +508,7 @@ def read_azure_ovf(contents):          raise BrokenAzureDataSource("invalid xml: %s" % e)      results = find_child(dom.documentElement, -        lambda n: n.localName == "ProvisioningSection") +                         lambda n: n.localName == "ProvisioningSection")      if len(results) == 0:          raise NonAzureDataSource("No ProvisioningSection") @@ -516,7 +518,8 @@ def read_azure_ovf(contents):      provSection = results[0]      lpcs_nodes = find_child(provSection, -        lambda n: n.localName == "LinuxProvisioningConfigurationSet") +                            lambda n: +                            n.localName == "LinuxProvisioningConfigurationSet")      if len(results) == 0:          raise NonAzureDataSource("No LinuxProvisioningConfigurationSet") @@ -633,7 +636,7 @@ class NonAzureDataSource(Exception):  # Used to match classes to dependencies  datasources = [ -  (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +    (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),  ] diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py new file mode 100644 index 00000000..b5ee4129 --- /dev/null +++ b/cloudinit/sources/DataSourceBigstep.py @@ -0,0 +1,57 @@ +# +#    Copyright (C) 2015-2016 Bigstep Cloud Ltd. +# +#    Author: Alexandru Sirbu <alexandru.sirbu@bigstep.com> +# + +import json +import errno + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util +from cloudinit import url_helper + +LOG = logging.getLogger(__name__) + + +class DataSourceBigstep(sources.DataSource): +    def __init__(self, sys_cfg, distro, paths): +        sources.DataSource.__init__(self, sys_cfg, distro, paths) +        self.metadata = {} +        self.vendordata_raw = "" +        self.userdata_raw = "" + +    def get_data(self, apply_filter=False): +        url = get_url_from_file() +        if url is None: +            return False +        response = url_helper.readurl(url) +        decoded = json.loads(response.contents) +        self.metadata = decoded["metadata"] +        self.vendordata_raw = decoded["vendordata_raw"] +        self.userdata_raw = decoded["userdata_raw"] +        return True + + +def get_url_from_file(): +    try: +        content = util.load_file("/var/lib/cloud/data/seed/bigstep/url") +    except IOError as e: +        # If the file doesn't exist, then the server probably isn't a Bigstep +        # instance; otherwise, another problem exists which needs investigation +        if e.errno == errno.ENOENT: +            return None +        else: +            raise +    return content + +# Used to match classes to dependencies +datasources = [ +    (DataSourceBigstep, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): +    return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index eb474079..e3916208 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -39,7 +39,7 @@ FS_TYPES = ('vfat', 'iso9660')  LABEL_TYPES = ('config-2',)  POSSIBLE_MOUNTS = ('sr', 'cd')  OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS -                  for i in range(0, 2))) +                        for i in range(0, 2)))  class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 5d47564d..12e863d2 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -101,8 +101,8 @@ class DataSourceDigitalOcean(sources.DataSource):  # Used to match classes to dependencies  datasources = [ -  (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), -  ] +    (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +]  # Return a list of data sources that match this set of dependencies diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 0032d06c..3ef2c6af 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -61,12 +61,12 @@ class DataSourceEc2(sources.DataSource):              if not self.wait_for_metadata_service():                  return False              start_time = time.time() -            self.userdata_raw = ec2.get_instance_userdata(self.api_ver, -                self.metadata_address) +            self.userdata_raw = \ +                ec2.get_instance_userdata(self.api_ver, self.metadata_address)              self.metadata = ec2.get_instance_metadata(self.api_ver,                                                        self.metadata_address)              LOG.debug("Crawl of metadata service took %s seconds", -                       int(time.time() - start_time)) +                      int(time.time() - start_time))              return True          except Exception:              util.logexc(LOG, "Failed reading from metadata address %s", @@ -132,13 +132,13 @@ class DataSourceEc2(sources.DataSource):          start_time = time.time()          url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, -                                timeout=timeout, status_cb=LOG.warn) +                                 timeout=timeout, status_cb=LOG.warn)          if url:              LOG.debug("Using metadata source: '%s'", url2base[url])          else:              LOG.critical("Giving up on md from %s after %s seconds", -                            urls, int(time.time() - start_time)) +                         urls, int(time.time() - start_time))          self.metadata_address = url2base.get(url)          return bool(url) @@ -206,7 +206,7 @@ class DataSourceEc2(sources.DataSource):  # Used to match classes to dependencies  datasources = [ -  (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +    (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),  ] diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index cfc59ca5..d828f078 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -254,7 +254,7 @@ class MAASSeedDirMalformed(Exception):  # Used to match classes to dependencies  datasources = [ -  (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +    (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),  ] @@ -275,17 +275,18 @@ if __name__ == "__main__":          parser = argparse.ArgumentParser(description='Interact with MAAS DS')          parser.add_argument("--config", metavar="file", -            help="specify DS config file", default=None) +                            help="specify DS config file", default=None)          parser.add_argument("--ckey", metavar="key", -            help="the consumer key to auth with", default=None) +                            help="the consumer key to auth with", default=None)          parser.add_argument("--tkey", metavar="key", -            help="the token key to auth with", default=None) +                            help="the token key to auth with", default=None)          parser.add_argument("--csec", metavar="secret", -            help="the consumer secret (likely '')", default="") +                            help="the consumer secret (likely '')", default="")          parser.add_argument("--tsec", metavar="secret", -            help="the token secret to auth with", default=None) +                            help="the token secret to auth with", default=None)          parser.add_argument("--apiver", metavar="version", -            help="the apiver to use ("" can be used)", default=MD_VERSION) +                            help="the apiver to use ("" can be used)", +                            default=MD_VERSION)          subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")          subcmds.add_parser('crawl', help="crawl the datasource") @@ -297,7 +298,7 @@ if __name__ == "__main__":          args = parser.parse_args()          creds = {'consumer_key': args.ckey, 'token_key': args.tkey, -            'token_secret': args.tsec, 'consumer_secret': args.csec} +                 'token_secret': args.tsec, 'consumer_secret': args.csec}          if args.config:              cfg = util.read_conf(args.config) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 4dffe6e6..4cad6877 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -263,8 +263,8 @@ class DataSourceNoCloudNet(DataSourceNoCloud):  # Used to match classes to dependencies  datasources = [ -  (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )), -  (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +    (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )), +    (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),  ] diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py index 12a8a992..d1a62b2a 100644 --- a/cloudinit/sources/DataSourceNone.py +++ b/cloudinit/sources/DataSourceNone.py @@ -47,8 +47,8 @@ class DataSourceNone(sources.DataSource):  # Used to match classes to dependencies  datasources = [ -  (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), -  (DataSourceNone, []), +    (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +    (DataSourceNone, []),  ] diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index bc13b71a..fec13b93 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -80,19 +80,18 @@ class DataSourceOVF(sources.DataSource):              LOG.debug("No system-product-name found")          elif 'vmware' in system_type.lower():              LOG.debug("VMware Virtualization Platform found") -            if not util.get_cfg_option_bool(self.sys_cfg, -                                        "disable_vmware_customization", -                                        True): +            if not util.get_cfg_option_bool( +                    self.sys_cfg, "disable_vmware_customization", True):                  deployPkgPluginPath = search_file("/usr/lib/vmware-tools",                                                    "libdeployPkgPlugin.so")                  if not deployPkgPluginPath:                      deployPkgPluginPath = search_file("/usr/lib/open-vm-tools",                                                        "libdeployPkgPlugin.so")                  if deployPkgPluginPath: -                    vmwareImcConfigFilePath = util.log_time(logfunc=LOG.debug, -                                      msg="waiting for configuration file", -                                      func=wait_for_imc_cfg_file, -                                      args=("/tmp", "cust.cfg")) +                    vmwareImcConfigFilePath = util.log_time( +                        logfunc=LOG.debug, +                        msg="waiting for configuration file", +                        func=wait_for_imc_cfg_file, args=("/tmp", "cust.cfg"))                  if vmwareImcConfigFilePath:                      LOG.debug("Found VMware DeployPkg Config File at %s" % @@ -373,14 +372,14 @@ def get_properties(contents):      # could also check here that elem.namespaceURI ==      #   "http://schemas.dmtf.org/ovf/environment/1"      propSections = find_child(dom.documentElement, -        lambda n: n.localName == "PropertySection") +                              lambda n: n.localName == "PropertySection")      if len(propSections) == 0:          raise XmlError("No 'PropertySection's")      props = {}      propElems = find_child(propSections[0], -                            (lambda n: n.localName == "Property")) +                           (lambda n: n.localName == "Property"))      for elem in propElems:          key = elem.attributes.getNamedItemNS(envNsURI, "key").value @@ -407,8 +406,8 @@ class XmlError(Exception):  # Used to match classes to dependencies  datasources = ( -  (DataSourceOVF, (sources.DEP_FILESYSTEM, )), -  (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +    (DataSourceOVF, (sources.DEP_FILESYSTEM, )), +    (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),  ) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index ac2c3b45..681f3a96 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -149,8 +149,8 @@ class BrokenContextDiskDir(Exception):  class OpenNebulaNetwork(object):      REG_DEV_MAC = re.compile( -                    r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?', -                    re.MULTILINE | re.DOTALL) +        r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?', +        re.MULTILINE | re.DOTALL)      def __init__(self, ip, context):          self.ip = ip @@ -404,7 +404,8 @@ def read_context_disk_dir(source_dir, asuser=None):      if ssh_key_var:          lines = context.get(ssh_key_var).splitlines()          results['metadata']['public-keys'] = [l for l in lines -            if len(l) and not l.startswith("#")] +                                              if len(l) and not +                                              l.startswith("#")]      # custom hostname -- try hostname or leave cloud-init      # itself create hostname from IP address later diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index c9b497df..5edab152 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -20,10 +20,13 @@  #    Datasource for provisioning on SmartOS. This works on Joyent  #        and public/private Clouds using SmartOS.  # -#    SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests. +#    SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests  #        The meta-data is transmitted via key/value pairs made by  #        requests on the console. For example, to get the hostname, you  #        would send "GET hostname" on /dev/ttyS1. +#        For Linux Guests running in LX-Brand Zones on SmartOS hosts +#        a socket (/native/.zonecontrol/metadata.sock) is used instead +#        of a serial console.  #  #   Certain behavior is defined by the DataDictionary  #       http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html @@ -34,6 +37,8 @@ import contextlib  import os  import random  import re +import socket +import stat  import serial @@ -46,6 +51,7 @@ LOG = logging.getLogger(__name__)  SMARTOS_ATTRIB_MAP = {      # Cloud-init Key : (SmartOS Key, Strip line endings) +    'instance-id': ('sdc:uuid', True),      'local-hostname': ('hostname', True),      'public-keys': ('root_authorized_keys', True),      'user-script': ('user-script', False), @@ -76,6 +82,7 @@ DS_CFG_PATH = ['datasource', DS_NAME]  #  BUILTIN_DS_CONFIG = {      'serial_device': '/dev/ttyS1', +    'metadata_sockfile': '/native/.zonecontrol/metadata.sock',      'seed_timeout': 60,      'no_base64_decode': ['root_authorized_keys',                           'motd_sys_info', @@ -83,7 +90,7 @@ BUILTIN_DS_CONFIG = {                           'user-data',                           'user-script',                           'sdc:datacenter_name', -                        ], +                         'sdc:uuid'],      'base64_keys': [],      'base64_all': False,      'disk_aliases': {'ephemeral0': '/dev/vdb'}, @@ -94,7 +101,7 @@ BUILTIN_CLOUD_CONFIG = {          'ephemeral0': {'table_type': 'mbr',                         'layout': False,                         'overwrite': False} -         }, +    },      'fs_setup': [{'label': 'ephemeral0',                    'filesystem': 'ext3',                    'device': 'ephemeral0'}], @@ -150,17 +157,27 @@ class DataSourceSmartOS(sources.DataSource):      def __init__(self, sys_cfg, distro, paths):          sources.DataSource.__init__(self, sys_cfg, distro, paths)          self.is_smartdc = None -          self.ds_cfg = util.mergemanydict([              self.ds_cfg,              util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),              BUILTIN_DS_CONFIG])          self.metadata = {} -        self.cfg = BUILTIN_CLOUD_CONFIG -        self.seed = self.ds_cfg.get("serial_device") -        self.seed_timeout = self.ds_cfg.get("serial_timeout") +        # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but +        # report 'BrandZ virtual linux' as the kernel version +        if os.uname()[3].lower() == 'brandz virtual linux': +            LOG.debug("Host is SmartOS, guest in Zone") +            self.is_smartdc = True +            self.smartos_type = 'lx-brand' +            self.cfg = {} +            self.seed = self.ds_cfg.get("metadata_sockfile") +        else: +            self.is_smartdc = True +            self.smartos_type = 'kvm' +            self.seed = self.ds_cfg.get("serial_device") +            self.cfg = BUILTIN_CLOUD_CONFIG +            self.seed_timeout = self.ds_cfg.get("serial_timeout")          self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode')          self.b64_keys = self.ds_cfg.get('base64_keys')          self.b64_all = self.ds_cfg.get('base64_all') @@ -170,12 +187,49 @@ class DataSourceSmartOS(sources.DataSource):          root = sources.DataSource.__str__(self)          return "%s [seed=%s]" % (root, self.seed) +    def _get_seed_file_object(self): +        if not self.seed: +            raise AttributeError("seed device is not set") + +        if self.smartos_type == 'lx-brand': +            if not stat.S_ISSOCK(os.stat(self.seed).st_mode): +                LOG.debug("Seed %s is not a socket", self.seed) +                return None +            sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) +            sock.connect(self.seed) +            return sock.makefile('rwb') +        else: +            if not stat.S_ISCHR(os.stat(self.seed).st_mode): +                LOG.debug("Seed %s is not a character device") +                return None +            ser = serial.Serial(self.seed, timeout=self.seed_timeout) +            if not ser.isOpen(): +                raise SystemError("Unable to open %s" % self.seed) +            return ser +        return None + +    def _set_provisioned(self): +        '''Mark the instance provisioning state as successful. + +        When run in a zone, the host OS will look for /var/svc/provisioning +        to be renamed as /var/svc/provision_success.   This should be done +        after meta-data is successfully retrieved and from this point +        the host considers the provision of the zone to be a success and +        keeps the zone running. +        ''' + +        LOG.debug('Instance provisioning state set as successful') +        svc_path = '/var/svc' +        if os.path.exists('/'.join([svc_path, 'provisioning'])): +            os.rename('/'.join([svc_path, 'provisioning']), +                      '/'.join([svc_path, 'provision_success'])) +      def get_data(self):          md = {}          ud = ""          if not device_exists(self.seed): -            LOG.debug("No serial device '%s' found for SmartOS datasource", +            LOG.debug("No metadata device '%s' found for SmartOS datasource",                        self.seed)              return False @@ -185,29 +239,36 @@ class DataSourceSmartOS(sources.DataSource):              LOG.debug("Disabling SmartOS datasource on arm (LP: #1243287)")              return False -        dmi_info = dmi_data() -        if dmi_info is False: -            LOG.debug("No dmidata utility found") -            return False - -        system_uuid, system_type = tuple(dmi_info) -        if 'smartdc' not in system_type.lower(): -            LOG.debug("Host is not on SmartOS. system_type=%s", system_type) +        # SDC KVM instances will provide dmi data, LX-brand does not +        if self.smartos_type == 'kvm': +            dmi_info = dmi_data() +            if dmi_info is False: +                LOG.debug("No dmidata utility found") +                return False + +            system_type = dmi_info +            if 'smartdc' not in system_type.lower(): +                LOG.debug("Host is not on SmartOS. system_type=%s", +                          system_type) +                return False +            LOG.debug("Host is SmartOS, guest in KVM") + +        seed_obj = self._get_seed_file_object() +        if seed_obj is None: +            LOG.debug('Seed file object not found.')              return False -        self.is_smartdc = True -        md['instance-id'] = system_uuid +        with contextlib.closing(seed_obj) as seed: +            b64_keys = self.query('base64_keys', seed, strip=True, b64=False) +            if b64_keys is not None: +                self.b64_keys = [k.strip() for k in str(b64_keys).split(',')] -        b64_keys = self.query('base64_keys', strip=True, b64=False) -        if b64_keys is not None: -            self.b64_keys = [k.strip() for k in str(b64_keys).split(',')] +            b64_all = self.query('base64_all', seed, strip=True, b64=False) +            if b64_all is not None: +                self.b64_all = util.is_true(b64_all) -        b64_all = self.query('base64_all', strip=True, b64=False) -        if b64_all is not None: -            self.b64_all = util.is_true(b64_all) - -        for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): -            smartos_noun, strip = attribute -            md[ci_noun] = self.query(smartos_noun, strip=strip) +            for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): +                smartos_noun, strip = attribute +                md[ci_noun] = self.query(smartos_noun, seed, strip=strip)          # @datadictionary: This key may contain a program that is written          # to a file in the filesystem of the guest on each boot and then @@ -240,7 +301,7 @@ class DataSourceSmartOS(sources.DataSource):          # Handle the cloud-init regular meta          if not md['local-hostname']: -            md['local-hostname'] = system_uuid +            md['local-hostname'] = md['instance-id']          ud = None          if md['user-data']: @@ -257,6 +318,8 @@ class DataSourceSmartOS(sources.DataSource):          self.metadata = util.mergemanydict([md, self.metadata])          self.userdata_raw = ud          self.vendordata_raw = md['vendor-data'] + +        self._set_provisioned()          return True      def device_name_to_device(self, name): @@ -268,40 +331,64 @@ class DataSourceSmartOS(sources.DataSource):      def get_instance_id(self):          return self.metadata['instance-id'] -    def query(self, noun, strip=False, default=None, b64=None): +    def query(self, noun, seed_file, strip=False, default=None, b64=None):          if b64 is None:              if noun in self.smartos_no_base64:                  b64 = False              elif self.b64_all or noun in self.b64_keys:                  b64 = True -        return query_data(noun=noun, strip=strip, seed_device=self.seed, -                          seed_timeout=self.seed_timeout, default=default, -                          b64=b64) +        return self._query_data(noun, seed_file, strip=strip, +                                default=default, b64=b64) +    def _query_data(self, noun, seed_file, strip=False, +                    default=None, b64=None): +        """Makes a request via "GET <NOUN>" -def device_exists(device): -    """Symplistic method to determine if the device exists or not""" -    return os.path.exists(device) +           In the response, the first line is the status, while subsequent +           lines are is the value. A blank line with a "." is used to +           indicate end of response. +           If the response is expected to be base64 encoded, then set +           b64encoded to true. Unfortantely, there is no way to know if +           something is 100% encoded, so this method relies on being told +           if the data is base64 or not. +        """ -def get_serial(seed_device, seed_timeout): -    """This is replaced in unit testing, allowing us to replace -        serial.Serial with a mocked class. +        if not noun: +            return False -        The timeout value of 60 seconds should never be hit. The value -        is taken from SmartOS own provisioning tools. Since we are reading -        each line individually up until the single ".", the transfer is -        usually very fast (i.e. microseconds) to get the response. -    """ -    if not seed_device: -        raise AttributeError("seed_device value is not set") +        response = JoyentMetadataClient(seed_file).get_metadata(noun) + +        if response is None: +            return default + +        if b64 is None: +            b64 = self._query_data('b64-%s' % noun, seed_file, b64=False, +                                   default=False, strip=True) +            b64 = util.is_true(b64) + +        resp = None +        if b64 or strip: +            resp = "".join(response).rstrip() +        else: +            resp = "".join(response) -    ser = serial.Serial(seed_device, timeout=seed_timeout) -    if not ser.isOpen(): -        raise SystemError("Unable to open %s" % seed_device) +        if b64: +            try: +                return util.b64d(resp) +            # Bogus input produces different errors in Python 2 and 3; +            # catch both. +            except (TypeError, binascii.Error): +                LOG.warn("Failed base64 decoding key '%s'", noun) +                return resp -    return ser +        return resp + + +def device_exists(device): +    """Symplistic method to determine if the device exists or not""" +    return os.path.exists(device)  class JoyentMetadataFetchException(Exception): @@ -320,8 +407,8 @@ class JoyentMetadataClient(object):          r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'          r'( (?P<payload>.+))?)') -    def __init__(self, serial): -        self.serial = serial +    def __init__(self, metasource): +        self.metasource = metasource      def _checksum(self, body):          return '{0:08x}'.format( @@ -356,67 +443,30 @@ class JoyentMetadataClient(object):                                              util.b64e(metadata_key))          msg = 'V2 {0} {1} {2}\n'.format(              len(message_body), self._checksum(message_body), message_body) -        LOG.debug('Writing "%s" to serial port.', msg) -        self.serial.write(msg.encode('ascii')) -        response = self.serial.readline().decode('ascii') -        LOG.debug('Read "%s" from serial port.', response) -        return self._get_value_from_frame(request_id, response) - - -def query_data(noun, seed_device, seed_timeout, strip=False, default=None, -               b64=None): -    """Makes a request to via the serial console via "GET <NOUN>" - -        In the response, the first line is the status, while subsequent lines -        are is the value. A blank line with a "." is used to indicate end of -        response. - -        If the response is expected to be base64 encoded, then set b64encoded -        to true. Unfortantely, there is no way to know if something is 100% -        encoded, so this method relies on being told if the data is base64 or -        not. -    """ -    if not noun: -        return False - -    with contextlib.closing(get_serial(seed_device, seed_timeout)) as ser: -        client = JoyentMetadataClient(ser) -        response = client.get_metadata(noun) - -    if response is None: -        return default - -    if b64 is None: -        b64 = query_data('b64-%s' % noun, seed_device=seed_device, -                         seed_timeout=seed_timeout, b64=False, -                         default=False, strip=True) -        b64 = util.is_true(b64) - -    resp = None -    if b64 or strip: -        resp = "".join(response).rstrip() -    else: -        resp = "".join(response) - -    if b64: -        try: -            return util.b64d(resp) -        # Bogus input produces different errors in Python 2 and 3; catch both. -        except (TypeError, binascii.Error): -            LOG.warn("Failed base64 decoding key '%s'", noun) -            return resp +        LOG.debug('Writing "%s" to metadata transport.', msg) +        self.metasource.write(msg.encode('ascii')) +        self.metasource.flush() + +        response = bytearray() +        response.extend(self.metasource.read(1)) +        while response[-1:] != b'\n': +            response.extend(self.metasource.read(1)) +        response = response.rstrip().decode('ascii') +        LOG.debug('Read "%s" from metadata transport.', response) + +        if 'SUCCESS' not in response: +            return None -    return resp +        return self._get_value_from_frame(request_id, response)  def dmi_data(): -    sys_uuid = util.read_dmi_data("system-uuid")      sys_type = util.read_dmi_data("system-product-name") -    if not sys_uuid or not sys_type: +    if not sys_type:          return None -    return (sys_uuid.lower(), sys_type) +    return sys_type  def write_boot_content(content, content_f, link=None, shebang=False, @@ -462,7 +512,7 @@ def write_boot_content(content, content_f, link=None, shebang=False,          except Exception as e:              util.logexc(LOG, ("Failed to identify script type for %s" % -                             content_f, e)) +                              content_f, e))      if link:          try: diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index d90c22fd..018cac6d 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -197,6 +197,21 @@ class WALinuxAgentShim(object):              self.openssl_manager.clean_up()      @staticmethod +    def get_ip_from_lease_value(lease_value): +        unescaped_value = lease_value.replace('\\', '') +        if len(unescaped_value) > 4: +            hex_string = '' +            for hex_pair in unescaped_value.split(':'): +                if len(hex_pair) == 1: +                    hex_pair = '0' + hex_pair +                hex_string += hex_pair +            packed_bytes = struct.pack( +                '>L', int(hex_string.replace(':', ''), 16)) +        else: +            packed_bytes = unescaped_value.encode('utf-8') +        return socket.inet_ntoa(packed_bytes) + +    @staticmethod      def find_endpoint():          LOG.debug('Finding Azure endpoint...')          content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases') @@ -206,16 +221,7 @@ class WALinuxAgentShim(object):                  value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')          if value is None:              raise Exception('No endpoint found in DHCP config.') -        if ':' in value: -            hex_string = '' -            for hex_pair in value.split(':'): -                if len(hex_pair) == 1: -                    hex_pair = '0' + hex_pair -                hex_string += hex_pair -            value = struct.pack('>L', int(hex_string.replace(':', ''), 16)) -        else: -            value = value.encode('utf-8') -        endpoint_ip_address = socket.inet_ntoa(value) +        endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)          LOG.debug('Azure endpoint found at %s', endpoint_ip_address)          return endpoint_ip_address diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index 42fbcc7e..77098a05 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -19,7 +19,6 @@  import logging  import os -import subprocess  import re  from cloudinit import util @@ -186,9 +185,8 @@ class NicConfigurator:          lines = []          for addr in addrs: -            lines.append( -                     '    up route -A inet6 add default gw %s metric 10000' % -                     addr.gateway) +            lines.append('    up route -A inet6 add default gw ' +                         '%s metric 10000' % addr.gateway)          return lines  | 
