summaryrefslogtreecommitdiff
path: root/cloudinit/sources
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/sources')
-rw-r--r--cloudinit/sources/DataSourceAliYun.py49
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py6
-rw-r--r--cloudinit/sources/DataSourceAzure.py7
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py101
-rw-r--r--cloudinit/sources/DataSourceEc2.py18
-rw-r--r--cloudinit/sources/DataSourceMAAS.py12
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py34
-rw-r--r--cloudinit/sources/helpers/azure.py2
-rw-r--r--cloudinit/sources/helpers/digitalocean.py218
9 files changed, 351 insertions, 96 deletions
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
new file mode 100644
index 00000000..19957212
--- /dev/null
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -0,0 +1,49 @@
+# vi: ts=4 expandtab
+
+import os
+
+from cloudinit import sources
+from cloudinit.sources import DataSourceEc2 as EC2
+
+DEF_MD_VERSION = "2016-01-01"
+
+
+class DataSourceAliYun(EC2.DataSourceEc2):
+ metadata_urls = ["http://100.100.100.200"]
+
+ def __init__(self, sys_cfg, distro, paths):
+ super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths)
+ self.seed_dir = os.path.join(paths.seed_dir, "AliYun")
+ self.api_ver = DEF_MD_VERSION
+
+ def get_hostname(self, fqdn=False, _resolve_ip=False):
+ return self.metadata.get('hostname', 'localhost.localdomain')
+
+ def get_public_ssh_keys(self):
+ return parse_public_keys(self.metadata.get('public-keys', {}))
+
+
+def parse_public_keys(public_keys):
+ keys = []
+ for key_id, key_body in public_keys.items():
+ if isinstance(key_body, str):
+ keys.append(key_body.strip())
+ elif isinstance(key_body, list):
+ keys.extend(key_body)
+ elif isinstance(key_body, dict):
+ key = key_body.get('openssh-key', [])
+ if isinstance(key, str):
+ keys.append(key.strip())
+ elif isinstance(key, list):
+ keys.extend(key)
+ return keys
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceAliYun, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 48136f7c..20345389 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -195,8 +195,7 @@ class DataSourceAltCloud(sources.DataSource):
(cmd_out, _err) = util.subp(cmd)
LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
except ProcessExecutionError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
- _err.message)
+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
return False
except OSError as _err:
util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
@@ -211,8 +210,7 @@ class DataSourceAltCloud(sources.DataSource):
(cmd_out, _err) = util.subp(cmd)
LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
except ProcessExecutionError as _err:
- util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
- _err.message)
+ util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err)
return False
except OSError as _err:
util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index dbc2bb68..b802b03e 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -252,7 +252,7 @@ class DataSourceAzureNet(sources.DataSource):
cc_modules_override = support_new_ephemeral(self.sys_cfg)
if cc_modules_override:
- self.cfg['cloud_config_modules'] = cc_modules_override
+ self.cfg['cloud_init_modules'] = cc_modules_override
return True
@@ -283,11 +283,14 @@ def find_fabric_formatted_ephemeral_part():
device_location = potential_location
break
if device_location is None:
+ LOG.debug("no azure resource disk partition path found")
return None
ntfs_devices = util.find_devs_with("TYPE=ntfs")
real_device = os.path.realpath(device_location)
if real_device in ntfs_devices:
return device_location
+ LOG.debug("'%s' existed (%s) but was not ntfs formated",
+ device_location, real_device)
return None
@@ -342,7 +345,7 @@ def support_new_ephemeral(cfg):
LOG.debug("cloud-init will format ephemeral0.1 this boot.")
LOG.debug("setting disk_setup and mounts modules 'always' for this boot")
- cc_modules = cfg.get('cloud_config_modules')
+ cc_modules = cfg.get('cloud_init_modules')
if not cc_modules:
return None
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index fc596e17..c5770d5d 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -18,13 +18,12 @@
# DigitalOcean Droplet API:
# https://developers.digitalocean.com/documentation/metadata/
-import json
-
from cloudinit import log as logging
from cloudinit import sources
-from cloudinit import url_helper
from cloudinit import util
+import cloudinit.sources.helpers.digitalocean as do_helper
+
LOG = logging.getLogger(__name__)
BUILTIN_DS_CONFIG = {
@@ -36,11 +35,13 @@ BUILTIN_DS_CONFIG = {
MD_RETRIES = 30
MD_TIMEOUT = 2
MD_WAIT_RETRY = 2
+MD_USE_IPV4LL = True
class DataSourceDigitalOcean(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.distro = distro
self.metadata = dict()
self.ds_cfg = util.mergemanydict([
util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}),
@@ -48,80 +49,72 @@ class DataSourceDigitalOcean(sources.DataSource):
self.metadata_address = self.ds_cfg['metadata_url']
self.retries = self.ds_cfg.get('retries', MD_RETRIES)
self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT)
+ self.use_ip4LL = self.ds_cfg.get('use_ip4LL', MD_USE_IPV4LL)
self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY)
+ self._network_config = None
def _get_sysinfo(self):
- # DigitalOcean embeds vendor ID and instance/droplet_id in the
- # SMBIOS information
-
- LOG.debug("checking if instance is a DigitalOcean droplet")
-
- # Detect if we are on DigitalOcean and return the Droplet's ID
- vendor_name = util.read_dmi_data("system-manufacturer")
- if vendor_name != "DigitalOcean":
- return (False, None)
+ return do_helper.read_sysinfo()
- LOG.info("running on DigitalOcean")
-
- droplet_id = util.read_dmi_data("system-serial-number")
- if droplet_id:
- LOG.debug(("system identified via SMBIOS as DigitalOcean Droplet"
- "{}").format(droplet_id))
- else:
- LOG.critical(("system identified via SMBIOS as a DigitalOcean "
- "Droplet, but did not provide an ID. Please file a "
- "support ticket at: "
- "https://cloud.digitalocean.com/support/tickets/"
- "new"))
-
- return (True, droplet_id)
-
- def get_data(self, apply_filter=False):
+ def get_data(self):
(is_do, droplet_id) = self._get_sysinfo()
# only proceed if we know we are on DigitalOcean
if not is_do:
return False
- LOG.debug("reading metadata from {}".format(self.metadata_address))
- response = url_helper.readurl(self.metadata_address,
- timeout=self.timeout,
- sec_between=self.wait_retry,
- retries=self.retries)
+ LOG.info("Running on digital ocean. droplet_id=%s" % droplet_id)
- contents = util.decode_binary(response.contents)
- decoded = json.loads(contents)
+ ipv4LL_nic = None
+ if self.use_ip4LL:
+ ipv4LL_nic = do_helper.assign_ipv4_link_local()
- self.metadata = decoded
- self.metadata['instance-id'] = decoded.get('droplet_id', droplet_id)
- self.metadata['local-hostname'] = decoded.get('hostname', droplet_id)
- self.vendordata_raw = decoded.get("vendor_data", None)
- self.userdata_raw = decoded.get("user_data", None)
- return True
+ md = do_helper.read_metadata(
+ self.metadata_address, timeout=self.timeout,
+ sec_between=self.wait_retry, retries=self.retries)
- def get_public_ssh_keys(self):
- public_keys = self.metadata.get('public_keys', [])
- if isinstance(public_keys, list):
- return public_keys
- else:
- return [public_keys]
+ self.metadata_full = md
+ self.metadata['instance-id'] = md.get('droplet_id', droplet_id)
+ self.metadata['local-hostname'] = md.get('hostname', droplet_id)
+ self.metadata['interfaces'] = md.get('interfaces')
+ self.metadata['public-keys'] = md.get('public_keys')
+ self.metadata['availability_zone'] = md.get('region', 'default')
+ self.vendordata_raw = md.get("vendor_data", None)
+ self.userdata_raw = md.get("user_data", None)
- @property
- def availability_zone(self):
- return self.metadata.get('region', 'default')
+ if ipv4LL_nic:
+ do_helper.del_ipv4_link_local(ipv4LL_nic)
- @property
- def launch_index(self):
- return None
+ return True
def check_instance_id(self, sys_cfg):
return sources.instance_id_matches_system_uuid(
self.get_instance_id(), 'system-serial-number')
+ @property
+ def network_config(self):
+ """Configure the networking. This needs to be done each boot, since
+ the IP information may have changed due to snapshot and/or
+ migration.
+ """
+
+ if self._network_config:
+ return self._network_config
+
+ interfaces = self.metadata.get('interfaces')
+ LOG.debug(interfaces)
+ if not interfaces:
+ raise Exception("Unable to get meta-data from server....")
+
+ nameservers = self.metadata_full['dns']['nameservers']
+ self._network_config = do_helper.convert_network_configuration(
+ interfaces, nameservers)
+ return self._network_config
+
# Used to match classes to dependencies
datasources = [
- (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, )),
]
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 6fe2a0bb..bc84ef5d 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -31,21 +31,19 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
-DEF_MD_URL = "http://169.254.169.254"
-
# Which version we are requesting of the ec2 metadata apis
DEF_MD_VERSION = '2009-04-04'
-# Default metadata urls that will be used if none are provided
-# They will be checked for 'resolveability' and some of the
-# following may be discarded if they do not resolve
-DEF_MD_URLS = [DEF_MD_URL, "http://instance-data.:8773"]
-
class DataSourceEc2(sources.DataSource):
+ # Default metadata urls that will be used if none are provided
+ # They will be checked for 'resolveability' and some of the
+ # following may be discarded if they do not resolve
+ metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"]
+
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.metadata_address = DEF_MD_URL
+ self.metadata_address = None
self.seed_dir = os.path.join(paths.seed_dir, "ec2")
self.api_ver = DEF_MD_VERSION
@@ -106,7 +104,7 @@ class DataSourceEc2(sources.DataSource):
return False
# Remove addresses from the list that wont resolve.
- mdurls = mcfg.get("metadata_urls", DEF_MD_URLS)
+ mdurls = mcfg.get("metadata_urls", self.metadata_urls)
filtered = [x for x in mdurls if util.is_resolvable_url(x)]
if set(filtered) != set(mdurls):
@@ -117,7 +115,7 @@ class DataSourceEc2(sources.DataSource):
mdurls = filtered
else:
LOG.warn("Empty metadata url list! using default list")
- mdurls = DEF_MD_URLS
+ mdurls = self.metadata_urls
urls = []
url2base = {}
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index ab93c0a2..81abcd47 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -310,12 +310,12 @@ if __name__ == "__main__":
creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
'token_secret': args.tsec, 'consumer_secret': args.csec}
- maaspkg_cfg = "/etc/cloud/cloud.cfg.d/90_dpkg_maas.cfg"
- if (args.config is None and args.url is None and
- os.path.exists(maaspkg_cfg) and
- os.access(maaspkg_cfg, os.R_OK)):
- sys.stderr.write("Used config in %s.\n" % maaspkg_cfg)
- args.config = maaspkg_cfg
+ if args.config is None:
+ for fname in ('91_kernel_cmdline_url', '90_dpkg_maas'):
+ fpath = "/etc/cloud/cloud.cfg.d/" + fname + ".cfg"
+ if os.path.exists(fpath) and os.access(fpath, os.R_OK):
+ sys.stderr.write("Used config in %s.\n" % fpath)
+ args.config = fpath
if args.config:
cfg = util.read_conf(args.config)
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 635a836c..ba5f3f92 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -30,6 +30,7 @@ import re
import string
from cloudinit import log as logging
+from cloudinit import net
from cloudinit import sources
from cloudinit import util
@@ -120,17 +121,11 @@ class BrokenContextDiskDir(Exception):
class OpenNebulaNetwork(object):
- REG_DEV_MAC = re.compile(
- r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?',
- re.MULTILINE | re.DOTALL)
-
- def __init__(self, ip, context):
- self.ip = ip
+ def __init__(self, context, system_nics_by_mac=None):
self.context = context
- self.ifaces = self.get_ifaces()
-
- def get_ifaces(self):
- return self.REG_DEV_MAC.findall(self.ip)
+ if system_nics_by_mac is None:
+ system_nics_by_mac = get_physical_nics_by_mac()
+ self.ifaces = system_nics_by_mac
def mac2ip(self, mac):
components = mac.split(':')[2:]
@@ -188,9 +183,7 @@ class OpenNebulaNetwork(object):
conf.append('iface lo inet loopback')
conf.append('')
- for i in self.ifaces:
- dev = i[0]
- mac = i[1]
+ for mac, dev in self.ifaces.items():
ip_components = self.mac2ip(mac)
conf.append('auto ' + dev)
@@ -405,16 +398,19 @@ def read_context_disk_dir(source_dir, asuser=None):
# generate static /etc/network/interfaces
# only if there are any required context variables
# http://opennebula.org/documentation:rel3.8:cong#network_configuration
- for k in context:
- if re.match(r'^ETH\d+_IP$', k):
- (out, _) = util.subp(['ip', 'link'])
- net = OpenNebulaNetwork(out, context)
- results['network-interfaces'] = net.gen_conf()
- break
+ ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP$', k)]
+ if ipaddr_keys:
+ onet = OpenNebulaNetwork(context)
+ results['network-interfaces'] = onet.gen_conf()
return results
+def get_physical_nics_by_mac():
+ devs = net.get_interfaces_by_mac()
+ return dict([(m, n) for m, n in devs.items() if net.is_physical(n)])
+
+
# Legacy: Must be present in case we load an old pkl object
DataSourceOpenNebulaNet = DataSourceOpenNebula
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 689ed4cc..1b3e9b70 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -232,7 +232,7 @@ class WALinuxAgentShim(object):
def _get_value_from_leases_file(fallback_lease_file):
leases = []
content = util.load_file(fallback_lease_file)
- LOG.debug("content is {}".format(content))
+ LOG.debug("content is %s", content)
for line in content.splitlines():
if 'unknown-245' in line:
# Example line from Ubuntu
diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py
new file mode 100644
index 00000000..b0a721c2
--- /dev/null
+++ b/cloudinit/sources/helpers/digitalocean.py
@@ -0,0 +1,218 @@
+# vi: ts=4 expandtab
+#
+# Author: Ben Howard <bh@digitalocean.com>
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import json
+import random
+
+from cloudinit import log as logging
+from cloudinit import net as cloudnet
+from cloudinit import url_helper
+from cloudinit import util
+
+NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
+
+LOG = logging.getLogger(__name__)
+
+
+def assign_ipv4_link_local(nic=None):
+ """Bring up NIC using an address using link-local (ip4LL) IPs. On
+ DigitalOcean, the link-local domain is per-droplet routed, so there
+ is no risk of collisions. However, to be more safe, the ip4LL
+ address is random.
+ """
+
+ if not nic:
+ for cdev in sorted(cloudnet.get_devicelist()):
+ if cloudnet.is_physical(cdev):
+ nic = cdev
+ LOG.debug("assigned nic '%s' for link-local discovery", nic)
+ break
+
+ if not nic:
+ raise RuntimeError("unable to find interfaces to access the"
+ "meta-data server. This droplet is broken.")
+
+ addr = "169.254.{0}.{1}/16".format(random.randint(1, 168),
+ random.randint(0, 255))
+
+ ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic]
+ ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up']
+
+ if not util.which('ip'):
+ raise RuntimeError("No 'ip' command available to configure ip4LL "
+ "address")
+
+ try:
+ (result, _err) = util.subp(ip_addr_cmd)
+ LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic)
+
+ (result, _err) = util.subp(ip_link_cmd)
+ LOG.debug("brought device '%s' up", nic)
+ except Exception:
+ util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed."
+ " Droplet networking will be broken", addr, nic)
+ raise
+
+ return nic
+
+
+def del_ipv4_link_local(nic=None):
+ """Remove the ip4LL address. While this is not necessary, the ip4LL
+ address is extraneous and confusing to users.
+ """
+ if not nic:
+ LOG.debug("no link_local address interface defined, skipping link "
+ "local address cleanup")
+ return
+
+ LOG.debug("cleaning up ipv4LL address")
+
+ ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic]
+
+ try:
+ (result, _err) = util.subp(ip_addr_cmd)
+ LOG.debug("removed ip4LL addresses from %s", nic)
+
+ except Exception as e:
+ util.logexc(LOG, "failed to remove ip4LL address from '%s'.", nic, e)
+
+
+def convert_network_configuration(config, dns_servers):
+ """Convert the DigitalOcean Network description into Cloud-init's netconfig
+ format.
+
+ Example JSON:
+ {'public': [
+ {'mac': '04:01:58:27:7f:01',
+ 'ipv4': {'gateway': '45.55.32.1',
+ 'netmask': '255.255.224.0',
+ 'ip_address': '45.55.50.93'},
+ 'anchor_ipv4': {
+ 'gateway': '10.17.0.1',
+ 'netmask': '255.255.0.0',
+ 'ip_address': '10.17.0.9'},
+ 'type': 'public',
+ 'ipv6': {'gateway': '....',
+ 'ip_address': '....',
+ 'cidr': 64}}
+ ],
+ 'private': [
+ {'mac': '04:01:58:27:7f:02',
+ 'ipv4': {'gateway': '10.132.0.1',
+ 'netmask': '255.255.0.0',
+ 'ip_address': '10.132.75.35'},
+ 'type': 'private'}
+ ]
+ }
+ """
+
+ def _get_subnet_part(pcfg, nameservers=None):
+ subpart = {'type': 'static',
+ 'control': 'auto',
+ 'address': pcfg.get('ip_address'),
+ 'gateway': pcfg.get('gateway')}
+
+ if nameservers:
+ subpart['dns_nameservers'] = nameservers
+
+ if ":" in pcfg.get('ip_address'):
+ subpart['address'] = "{0}/{1}".format(pcfg.get('ip_address'),
+ pcfg.get('cidr'))
+ else:
+ subpart['netmask'] = pcfg.get('netmask')
+
+ return subpart
+
+ all_nics = []
+ for k in ('public', 'private'):
+ if k in config:
+ all_nics.extend(config[k])
+
+ macs_to_nics = cloudnet.get_interfaces_by_mac()
+ nic_configs = []
+
+ for nic in all_nics:
+
+ mac_address = nic.get('mac')
+ sysfs_name = macs_to_nics.get(mac_address)
+ nic_type = nic.get('type', 'unknown')
+ # Note: the entry 'public' above contains a list, but
+ # the list will only ever have one nic inside it per digital ocean.
+ # If it ever had more than one nic, then this code would
+ # assign all 'public' the same name.
+ if_name = NIC_MAP.get(nic_type, sysfs_name)
+
+ LOG.debug("mapped %s interface to %s, assigning name of %s",
+ mac_address, sysfs_name, if_name)
+
+ ncfg = {'type': 'physical',
+ 'mac_address': mac_address,
+ 'name': if_name}
+
+ subnets = []
+ for netdef in ('ipv4', 'ipv6', 'anchor_ipv4', 'anchor_ipv6'):
+ raw_subnet = nic.get(netdef, None)
+ if not raw_subnet:
+ continue
+
+ sub_part = _get_subnet_part(raw_subnet)
+ if nic_type == 'public' and 'anchor' not in netdef:
+ # add DNS resolvers to the public interfaces only
+ sub_part = _get_subnet_part(raw_subnet, dns_servers)
+ else:
+ # remove the gateway any non-public interfaces
+ if 'gateway' in sub_part:
+ del sub_part['gateway']
+
+ subnets.append(sub_part)
+
+ ncfg['subnets'] = subnets
+ nic_configs.append(ncfg)
+ LOG.debug("nic '%s' configuration: %s", if_name, ncfg)
+
+ return {'version': 1, 'config': nic_configs}
+
+
+def read_metadata(url, timeout=2, sec_between=2, retries=30):
+ response = url_helper.readurl(url, timeout=timeout,
+ sec_between=sec_between, retries=retries)
+ if not response.ok():
+ raise RuntimeError("unable to read metadata at %s" % url)
+ return json.loads(response.contents.decode())
+
+
+def read_sysinfo():
+ # DigitalOcean embeds vendor ID and instance/droplet_id in the
+ # SMBIOS information
+
+ # Detect if we are on DigitalOcean and return the Droplet's ID
+ vendor_name = util.read_dmi_data("system-manufacturer")
+ if vendor_name != "DigitalOcean":
+ return (False, None)
+
+ droplet_id = util.read_dmi_data("system-serial-number")
+ if droplet_id:
+ LOG.debug("system identified via SMBIOS as DigitalOcean Droplet: %s",
+ droplet_id)
+ else:
+ msg = ("system identified via SMBIOS as a DigitalOcean "
+ "Droplet, but did not provide an ID. Please file a "
+ "support ticket at: "
+ "https://cloud.digitalocean.com/support/tickets/new")
+ LOG.critical(msg)
+ raise RuntimeError(msg)
+
+ return (True, droplet_id)