summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/apport.py1
-rw-r--r--cloudinit/settings.py1
-rw-r--r--cloudinit/sources/DataSourceVultr.py147
-rw-r--r--cloudinit/sources/helpers/vultr.py242
4 files changed, 391 insertions, 0 deletions
diff --git a/cloudinit/apport.py b/cloudinit/apport.py
index 25f254e3..aadc638f 100644
--- a/cloudinit/apport.py
+++ b/cloudinit/apport.py
@@ -41,6 +41,7 @@ KNOWN_CLOUD_NAMES = [
'SmartOS',
'UpCloud',
'VMware',
+ 'Vultr',
'ZStack',
'Other'
]
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 91e1bfe7..23e4c0ad 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -30,6 +30,7 @@ CFG_BUILTIN = {
'GCE',
'OpenStack',
'AliYun',
+ 'Vultr',
'Ec2',
'CloudSigma',
'CloudStack',
diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py
new file mode 100644
index 00000000..c08ff848
--- /dev/null
+++ b/cloudinit/sources/DataSourceVultr.py
@@ -0,0 +1,147 @@
+# Author: Eric Benner <ebenner@vultr.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# Vultr Metadata API:
+# https://www.vultr.com/metadata/
+
+from cloudinit import log as log
+from cloudinit import sources
+from cloudinit import util
+
+import cloudinit.sources.helpers.vultr as vultr
+
+LOG = log.getLogger(__name__)
+BUILTIN_DS_CONFIG = {
+ 'url': 'http://169.254.169.254',
+ 'retries': 30,
+ 'timeout': 2,
+ 'wait': 2
+}
+
+
+class DataSourceVultr(sources.DataSource):
+
+ dsname = 'Vultr'
+
+ def __init__(self, sys_cfg, distro, paths):
+ super(DataSourceVultr, self).__init__(sys_cfg, distro, paths)
+ self.ds_cfg = util.mergemanydict([
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Vultr"], {}),
+ BUILTIN_DS_CONFIG])
+
+ # Initiate data and check if Vultr
+ def _get_data(self):
+ LOG.debug("Detecting if machine is a Vultr instance")
+ if not vultr.is_vultr():
+ LOG.debug("Machine is not a Vultr instance")
+ return False
+
+ LOG.debug("Machine is a Vultr instance")
+
+ # Fetch metadata
+ md = self.get_metadata()
+
+ self.metadata_full = md
+ self.metadata['instanceid'] = md['instanceid']
+ self.metadata['local-hostname'] = md['hostname']
+ self.metadata['public-keys'] = md["public-keys"]
+ self.userdata_raw = md["user-data"]
+
+ # Generate config and process data
+ self.get_datasource_data(md)
+
+ # Dump some data so diagnosing failures is manageable
+ LOG.debug("Vultr Vendor Config:")
+ LOG.debug(md['vendor-data']['config'])
+ LOG.debug("SUBID: %s", self.metadata['instanceid'])
+ LOG.debug("Hostname: %s", self.metadata['local-hostname'])
+ if self.userdata_raw is not None:
+ LOG.debug("User-Data:")
+ LOG.debug(self.userdata_raw)
+
+ return True
+
+ # Process metadata
+ def get_datasource_data(self, md):
+ # Grab config
+ config = md['vendor-data']['config']
+
+ # Generate network config
+ self.netcfg = vultr.generate_network_config(md['interfaces'])
+
+ # This requires info generated in the vendor config
+ user_scripts = vultr.generate_user_scripts(md, self.netcfg['config'])
+
+ # Default hostname is "guest" for whitelabel
+ if self.metadata['local-hostname'] == "":
+ self.metadata['local-hostname'] = "guest"
+
+ self.userdata_raw = md["user-data"]
+ if self.userdata_raw == "":
+ self.userdata_raw = None
+
+ # Assemble vendor-data
+ # This adds provided scripts and the config
+ self.vendordata_raw = []
+ self.vendordata_raw.extend(user_scripts)
+ self.vendordata_raw.append("#cloud-config\n%s" % config)
+
+ # Get the metadata by flag
+ def get_metadata(self):
+ return vultr.get_metadata(self.ds_cfg['url'],
+ self.ds_cfg['timeout'],
+ self.ds_cfg['retries'],
+ self.ds_cfg['wait'])
+
+ # Compare subid as instance id
+ def check_instance_id(self, sys_cfg):
+ if not vultr.is_vultr():
+ return False
+
+ # Baremetal has no way to implement this in local
+ if vultr.is_baremetal():
+ return False
+
+ subid = vultr.get_sysinfo()['subid']
+ return sources.instance_id_matches_system_uuid(subid)
+
+ # Currently unsupported
+ @property
+ def launch_index(self):
+ return None
+
+ @property
+ def network_config(self):
+ return self.netcfg
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceVultr, (sources.DEP_FILESYSTEM, )),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+if __name__ == "__main__":
+ import sys
+
+ if not vultr.is_vultr():
+ print("Machine is not a Vultr instance")
+ sys.exit(1)
+
+ md = vultr.get_metadata(BUILTIN_DS_CONFIG['url'],
+ BUILTIN_DS_CONFIG['timeout'],
+ BUILTIN_DS_CONFIG['retries'],
+ BUILTIN_DS_CONFIG['wait'])
+ config = md['vendor-data']['config']
+ sysinfo = vultr.get_sysinfo()
+
+ print(util.json_dumps(sysinfo))
+ print(config)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py
new file mode 100644
index 00000000..c22cd0b1
--- /dev/null
+++ b/cloudinit/sources/helpers/vultr.py
@@ -0,0 +1,242 @@
+# Author: Eric Benner <ebenner@vultr.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+from cloudinit import log as log
+from cloudinit import url_helper
+from cloudinit import dmi
+from cloudinit import util
+from cloudinit import net
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+from functools import lru_cache
+
+# Get LOG
+LOG = log.getLogger(__name__)
+
+
+@lru_cache()
+def get_metadata(url, timeout, retries, sec_between):
+ # Bring up interface
+ try:
+ with EphemeralDHCPv4(connectivity_url=url):
+ # Fetch the metadata
+ v1 = read_metadata(url, timeout, retries, sec_between)
+ except (NoDHCPLeaseError) as exc:
+ LOG.error("Bailing, DHCP Exception: %s", exc)
+ raise
+
+ v1_json = json.loads(v1)
+ metadata = v1_json
+
+ return metadata
+
+
+# Read the system information from SMBIOS
+def get_sysinfo():
+ return {
+ 'manufacturer': dmi.read_dmi_data("system-manufacturer"),
+ 'subid': dmi.read_dmi_data("system-serial-number")
+ }
+
+
+# Assumes is Vultr is already checked
+def is_baremetal():
+ if get_sysinfo()['manufacturer'] != "Vultr":
+ return True
+ return False
+
+
+# Confirm is Vultr
+def is_vultr():
+ # VC2, VDC, and HFC use DMI
+ sysinfo = get_sysinfo()
+
+ if sysinfo['manufacturer'] == "Vultr":
+ return True
+
+ # Baremetal requires a kernel parameter
+ if "vultr" in util.get_cmdline().split():
+ return True
+
+ return False
+
+
+# Read Metadata endpoint
+def read_metadata(url, timeout, retries, sec_between):
+ url = "%s/v1.json" % url
+ response = url_helper.readurl(url,
+ timeout=timeout,
+ retries=retries,
+ headers={'Metadata-Token': 'vultr'},
+ sec_between=sec_between)
+
+ if not response.ok():
+ raise RuntimeError("Failed to connect to %s: Code: %s" %
+ url, response.code)
+
+ return response.contents.decode()
+
+
+# Wrapped for caching
+@lru_cache()
+def get_interface_map():
+ return net.get_interfaces_by_mac()
+
+
+# Convert macs to nics
+def get_interface_name(mac):
+ macs_to_nic = get_interface_map()
+
+ if mac not in macs_to_nic:
+ return None
+
+ return macs_to_nic.get(mac)
+
+
+# Generate network configs
+def generate_network_config(interfaces):
+ network = {
+ "version": 1,
+ "config": [
+ {
+ "type": "nameserver",
+ "address": [
+ "108.61.10.10"
+ ]
+ }
+ ]
+ }
+
+ # Prepare interface 0, public
+ if len(interfaces) > 0:
+ public = generate_public_network_interface(interfaces[0])
+ network['config'].append(public)
+
+ # Prepare interface 1, private
+ if len(interfaces) > 1:
+ private = generate_private_network_interface(interfaces[1])
+ network['config'].append(private)
+
+ return network
+
+
+# Input Metadata and generate public network config part
+def generate_public_network_interface(interface):
+ interface_name = get_interface_name(interface['mac'])
+ if not interface_name:
+ raise RuntimeError(
+ "Interface: %s could not be found on the system" %
+ interface['mac'])
+
+ netcfg = {
+ "name": interface_name,
+ "type": "physical",
+ "mac_address": interface['mac'],
+ "accept-ra": 1,
+ "subnets": [
+ {
+ "type": "dhcp",
+ "control": "auto"
+ },
+ {
+ "type": "dhcp6",
+ "control": "auto"
+ },
+ ]
+ }
+
+ # Check for additional IP's
+ additional_count = len(interface['ipv4']['additional'])
+ if "ipv4" in interface and additional_count > 0:
+ for additional in interface['ipv4']['additional']:
+ add = {
+ "type": "static",
+ "control": "auto",
+ "address": additional['address'],
+ "netmask": additional['netmask']
+ }
+ netcfg['subnets'].append(add)
+
+ # Check for additional IPv6's
+ additional_count = len(interface['ipv6']['additional'])
+ if "ipv6" in interface and additional_count > 0:
+ for additional in interface['ipv6']['additional']:
+ add = {
+ "type": "static6",
+ "control": "auto",
+ "address": additional['address'],
+ "netmask": additional['netmask']
+ }
+ netcfg['subnets'].append(add)
+
+ # Add config to template
+ return netcfg
+
+
+# Input Metadata and generate private network config part
+def generate_private_network_interface(interface):
+ interface_name = get_interface_name(interface['mac'])
+ if not interface_name:
+ raise RuntimeError(
+ "Interface: %s could not be found on the system" %
+ interface['mac'])
+
+ netcfg = {
+ "name": interface_name,
+ "type": "physical",
+ "mac_address": interface['mac'],
+ "accept-ra": 1,
+ "subnets": [
+ {
+ "type": "static",
+ "control": "auto",
+ "address": interface['ipv4']['address'],
+ "netmask": interface['ipv4']['netmask']
+ }
+ ]
+ }
+
+ return netcfg
+
+
+# This is for the vendor and startup scripts
+def generate_user_scripts(md, network_config):
+ user_scripts = []
+
+ # Raid 1 script
+ if md['vendor-data']['raid1-script']:
+ user_scripts.append(md['vendor-data']['raid1-script'])
+
+ # Enable multi-queue on linux
+ if util.is_Linux() and md['vendor-data']['ethtool-script']:
+ ethtool_script = md['vendor-data']['ethtool-script']
+
+ # Tool location
+ tool = "/opt/vultr/ethtool"
+
+ # Go through the interfaces
+ for netcfg in network_config:
+ # If the interface has a mac and is physical
+ if "mac_address" in netcfg and netcfg['type'] == "physical":
+ # Set its multi-queue to num of cores as per RHEL Docs
+ name = netcfg['name']
+ command = "%s -L %s combined $(nproc --all)" % (tool, name)
+ ethtool_script = '%s\n%s' % (ethtool_script, command)
+
+ user_scripts.append(ethtool_script)
+
+ # This is for vendor scripts
+ if md['vendor-data']['vendor-script']:
+ user_scripts.append(md['vendor-data']['vendor-script'])
+
+ # Startup script
+ script = md['startup-script']
+ if script and script != "echo No configured startup script":
+ user_scripts.append(script)
+
+ return user_scripts
+
+
+# vi: ts=4 expandtab