summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
authoreb3095 <45504889+eb3095@users.noreply.github.com>2021-09-24 09:57:33 -0400
committerGitHub <noreply@github.com>2021-09-24 08:57:33 -0500
commit244af3f4971c8f89c741aa90306f0fb1b4459940 (patch)
tree9d4cbe459aff2496cb0ce726065fb5dc2c923b2d /cloudinit
parent24a15e9cd3acd7c93efe9755b98897ee8c0476c7 (diff)
downloadvyos-cloud-init-244af3f4971c8f89c741aa90306f0fb1b4459940.tar.gz
vyos-cloud-init-244af3f4971c8f89c741aa90306f0fb1b4459940.zip
Cleanup Vultr support (#987)
Offload Vultr's vendordata assembly to the backend, correct vendordata storage and parsing, allow passing critical data via the useragent, better networking configuration for additional interfaces.
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/sources/DataSourceVultr.py47
-rw-r--r--cloudinit/sources/helpers/vultr.py68
2 files changed, 38 insertions, 77 deletions
diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py
index c08ff848..92765c72 100644
--- a/cloudinit/sources/DataSourceVultr.py
+++ b/cloudinit/sources/DataSourceVultr.py
@@ -8,6 +8,7 @@
from cloudinit import log as log
from cloudinit import sources
from cloudinit import util
+from cloudinit import version
import cloudinit.sources.helpers.vultr as vultr
@@ -16,7 +17,11 @@ BUILTIN_DS_CONFIG = {
'url': 'http://169.254.169.254',
'retries': 30,
'timeout': 2,
- 'wait': 2
+ 'wait': 2,
+ 'user-agent': 'Cloud-Init/%s - OS: %s Variant: %s' %
+ (version.version_string(),
+ util.system_info()['system'],
+ util.system_info()['variant'])
}
@@ -40,21 +45,18 @@ class DataSourceVultr(sources.DataSource):
LOG.debug("Machine is a Vultr instance")
# Fetch metadata
- md = self.get_metadata()
-
- self.metadata_full = md
- self.metadata['instanceid'] = md['instanceid']
- self.metadata['local-hostname'] = md['hostname']
- self.metadata['public-keys'] = md["public-keys"]
- self.userdata_raw = md["user-data"]
+ self.metadata = self.get_metadata()
+ self.metadata['instance-id'] = self.metadata['instanceid']
+ self.metadata['local-hostname'] = self.metadata['hostname']
+ self.userdata_raw = self.metadata["user-data"]
# Generate config and process data
- self.get_datasource_data(md)
+ self.get_datasource_data(self.metadata)
# Dump some data so diagnosing failures is manageable
LOG.debug("Vultr Vendor Config:")
- LOG.debug(md['vendor-data']['config'])
- LOG.debug("SUBID: %s", self.metadata['instanceid'])
+ LOG.debug(util.json_dumps(self.metadata['vendor-data']))
+ LOG.debug("SUBID: %s", self.metadata['instance-id'])
LOG.debug("Hostname: %s", self.metadata['local-hostname'])
if self.userdata_raw is not None:
LOG.debug("User-Data:")
@@ -64,14 +66,11 @@ class DataSourceVultr(sources.DataSource):
# Process metadata
def get_datasource_data(self, md):
- # Grab config
- config = md['vendor-data']['config']
-
# Generate network config
self.netcfg = vultr.generate_network_config(md['interfaces'])
- # This requires info generated in the vendor config
- user_scripts = vultr.generate_user_scripts(md, self.netcfg['config'])
+ # Grab vendordata
+ self.vendordata_raw = md['vendor-data']
# Default hostname is "guest" for whitelabel
if self.metadata['local-hostname'] == "":
@@ -81,18 +80,13 @@ class DataSourceVultr(sources.DataSource):
if self.userdata_raw == "":
self.userdata_raw = None
- # Assemble vendor-data
- # This adds provided scripts and the config
- self.vendordata_raw = []
- self.vendordata_raw.extend(user_scripts)
- self.vendordata_raw.append("#cloud-config\n%s" % config)
-
# Get the metadata by flag
def get_metadata(self):
return vultr.get_metadata(self.ds_cfg['url'],
self.ds_cfg['timeout'],
self.ds_cfg['retries'],
- self.ds_cfg['wait'])
+ self.ds_cfg['wait'],
+ self.ds_cfg['user-agent'])
# Compare subid as instance id
def check_instance_id(self, sys_cfg):
@@ -137,11 +131,12 @@ if __name__ == "__main__":
md = vultr.get_metadata(BUILTIN_DS_CONFIG['url'],
BUILTIN_DS_CONFIG['timeout'],
BUILTIN_DS_CONFIG['retries'],
- BUILTIN_DS_CONFIG['wait'])
- config = md['vendor-data']['config']
+ BUILTIN_DS_CONFIG['wait'],
+ BUILTIN_DS_CONFIG['user-agent'])
+ config = md['vendor-data']
sysinfo = vultr.get_sysinfo()
print(util.json_dumps(sysinfo))
- print(config)
+ print(util.json_dumps(config))
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py
index 2521ec2f..9effb0d9 100644
--- a/cloudinit/sources/helpers/vultr.py
+++ b/cloudinit/sources/helpers/vultr.py
@@ -17,20 +17,17 @@ LOG = log.getLogger(__name__)
@lru_cache()
-def get_metadata(url, timeout, retries, sec_between):
+def get_metadata(url, timeout, retries, sec_between, agent):
# Bring up interface
try:
with EphemeralDHCPv4(connectivity_url_data={"url": url}):
# Fetch the metadata
- v1 = read_metadata(url, timeout, retries, sec_between)
+ v1 = read_metadata(url, timeout, retries, sec_between, agent)
except (NoDHCPLeaseError) as exc:
LOG.error("Bailing, DHCP Exception: %s", exc)
raise
- v1_json = json.loads(v1)
- metadata = v1_json
-
- return metadata
+ return json.loads(v1)
# Read the system information from SMBIOS
@@ -64,12 +61,20 @@ def is_vultr():
# Read Metadata endpoint
-def read_metadata(url, timeout, retries, sec_between):
+def read_metadata(url, timeout, retries, sec_between, agent):
url = "%s/v1.json" % url
+
+ # Announce os details so we can handle non Vultr origin
+ # images and provide correct vendordata generation.
+ headers = {
+ 'Metadata-Token': 'cloudinit',
+ 'User-Agent': agent
+ }
+
response = url_helper.readurl(url,
timeout=timeout,
retries=retries,
- headers={'Metadata-Token': 'vultr'},
+ headers=headers,
sec_between=sec_between)
if not response.ok():
@@ -114,9 +119,9 @@ def generate_network_config(interfaces):
public = generate_public_network_interface(interfaces[0])
network['config'].append(public)
- # Prepare interface 1, private
- if len(interfaces) > 1:
- private = generate_private_network_interface(interfaces[1])
+ # Prepare additional interfaces, private
+ for i in range(1, len(interfaces)):
+ private = generate_private_network_interface(interfaces[i])
network['config'].append(private)
return network
@@ -141,7 +146,7 @@ def generate_public_network_interface(interface):
"control": "auto"
},
{
- "type": "dhcp6",
+ "type": "ipv6_slaac",
"control": "auto"
},
]
@@ -187,7 +192,6 @@ def generate_private_network_interface(interface):
"name": interface_name,
"type": "physical",
"mac_address": interface['mac'],
- "accept-ra": 1,
"subnets": [
{
"type": "static",
@@ -201,42 +205,4 @@ def generate_private_network_interface(interface):
return netcfg
-# This is for the vendor and startup scripts
-def generate_user_scripts(md, network_config):
- user_scripts = []
-
- # Raid 1 script
- if md['vendor-data']['raid1-script']:
- user_scripts.append(md['vendor-data']['raid1-script'])
-
- # Enable multi-queue on linux
- if util.is_Linux() and md['vendor-data']['ethtool-script']:
- ethtool_script = md['vendor-data']['ethtool-script']
-
- # Tool location
- tool = "/opt/vultr/ethtool"
-
- # Go through the interfaces
- for netcfg in network_config:
- # If the interface has a mac and is physical
- if "mac_address" in netcfg and netcfg['type'] == "physical":
- # Set its multi-queue to num of cores as per RHEL Docs
- name = netcfg['name']
- command = "%s -L %s combined $(nproc --all)" % (tool, name)
- ethtool_script = '%s\n%s' % (ethtool_script, command)
-
- user_scripts.append(ethtool_script)
-
- # This is for vendor scripts
- if md['vendor-data']['vendor-script']:
- user_scripts.append(md['vendor-data']['vendor-script'])
-
- # Startup script
- script = md['startup-script']
- if script and script != "echo No configured startup script":
- user_scripts.append(script)
-
- return user_scripts
-
-
# vi: ts=4 expandtab