summaryrefslogtreecommitdiff
path: root/cloudinit/sources
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/sources')
-rw-r--r--cloudinit/sources/DataSourceAliYun.py18
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py113
-rwxr-xr-xcloudinit/sources/DataSourceAzure.py2358
-rw-r--r--cloudinit/sources/DataSourceBigstep.py9
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py39
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py137
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py115
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py67
-rw-r--r--cloudinit/sources/DataSourceEc2.py461
-rw-r--r--cloudinit/sources/DataSourceExoscale.py171
-rw-r--r--cloudinit/sources/DataSourceGCE.py251
-rw-r--r--cloudinit/sources/DataSourceHetzner.py99
-rw-r--r--cloudinit/sources/DataSourceIBMCloud.py128
-rw-r--r--cloudinit/sources/DataSourceLXD.py392
-rw-r--r--cloudinit/sources/DataSourceMAAS.py180
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py156
-rw-r--r--cloudinit/sources/DataSourceNone.py15
-rw-r--r--cloudinit/sources/DataSourceOVF.py445
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py194
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py132
-rw-r--r--cloudinit/sources/DataSourceOracle.py138
-rw-r--r--cloudinit/sources/DataSourceRbxCloud.py195
-rw-r--r--cloudinit/sources/DataSourceScaleway.py139
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py553
-rw-r--r--cloudinit/sources/DataSourceUpCloud.py162
-rw-r--r--cloudinit/sources/DataSourceVMware.py869
-rw-r--r--cloudinit/sources/DataSourceVultr.py157
-rw-r--r--cloudinit/sources/__init__.py471
-rwxr-xr-xcloudinit/sources/helpers/azure.py743
-rw-r--r--cloudinit/sources/helpers/digitalocean.py195
-rw-r--r--cloudinit/sources/helpers/hetzner.py15
-rw-r--r--cloudinit/sources/helpers/netlink.py187
-rw-r--r--cloudinit/sources/helpers/openstack.py439
-rw-r--r--cloudinit/sources/helpers/tests/test_netlink.py480
-rw-r--r--cloudinit/sources/helpers/tests/test_openstack.py44
-rw-r--r--cloudinit/sources/helpers/upcloud.py229
-rw-r--r--cloudinit/sources/helpers/vmware/imc/boot_proto.py5
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py67
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_custom_script.py45
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py7
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py90
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_passwd.py38
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_source.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_error.py2
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_event.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_state.py1
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py46
-rw-r--r--cloudinit/sources/helpers/vmware/imc/ipv4_mode.py11
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic.py33
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic_base.py29
-rw-r--r--cloudinit/sources/helpers/vultr.py230
-rw-r--r--cloudinit/sources/tests/__init__.py0
-rw-r--r--cloudinit/sources/tests/test_init.py759
-rw-r--r--cloudinit/sources/tests/test_oracle.py785
55 files changed, 7057 insertions, 5590 deletions
diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py
index 09052873..37f512e3 100644
--- a/cloudinit/sources/DataSourceAliYun.py
+++ b/cloudinit/sources/DataSourceAliYun.py
@@ -1,7 +1,6 @@
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import dmi
-from cloudinit import sources
+from cloudinit import dmi, sources
from cloudinit.sources import DataSourceEc2 as EC2
ALIYUN_PRODUCT = "Alibaba Cloud ECS"
@@ -9,18 +8,18 @@ ALIYUN_PRODUCT = "Alibaba Cloud ECS"
class DataSourceAliYun(EC2.DataSourceEc2):
- dsname = 'AliYun'
- metadata_urls = ['http://100.100.100.200']
+ dsname = "AliYun"
+ metadata_urls = ["http://100.100.100.200"]
# The minimum supported metadata_version from the ec2 metadata apis
- min_metadata_version = '2016-01-01'
+ min_metadata_version = "2016-01-01"
extended_metadata_versions = []
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
- return self.metadata.get('hostname', 'localhost.localdomain')
+ return self.metadata.get("hostname", "localhost.localdomain")
def get_public_ssh_keys(self):
- return parse_public_keys(self.metadata.get('public-keys', {}))
+ return parse_public_keys(self.metadata.get("public-keys", {}))
def _get_cloud_name(self):
if _is_aliyun():
@@ -30,7 +29,7 @@ class DataSourceAliYun(EC2.DataSourceEc2):
def _is_aliyun():
- return dmi.read_dmi_data('system-product-name') == ALIYUN_PRODUCT
+ return dmi.read_dmi_data("system-product-name") == ALIYUN_PRODUCT
def parse_public_keys(public_keys):
@@ -41,7 +40,7 @@ def parse_public_keys(public_keys):
elif isinstance(key_body, list):
keys.extend(key_body)
elif isinstance(key_body, dict):
- key = key_body.get('openssh-key', [])
+ key = key_body.get("openssh-key", [])
if isinstance(key, str):
keys.append(key.strip())
elif isinstance(key, list):
@@ -59,4 +58,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index cd93412a..9029b535 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -7,10 +7,10 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-'''
+"""
This file contains code used to gather the user data passed to an
instance on RHEVm and vSphere.
-'''
+"""
import errno
import os
@@ -18,29 +18,26 @@ import os.path
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import sources, subp, util
LOG = logging.getLogger(__name__)
# Needed file paths
-CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
+CLOUD_INFO_FILE = "/etc/sysconfig/cloud-info"
# Shell command lists
-CMD_PROBE_FLOPPY = ['modprobe', 'floppy']
+CMD_PROBE_FLOPPY = ["modprobe", "floppy"]
META_DATA_NOT_SUPPORTED = {
- 'block-device-mapping': {},
- 'instance-id': 455,
- 'local-hostname': 'localhost',
- 'placement': {},
+ "block-device-mapping": {},
+ "instance-id": 455,
+ "local-hostname": "localhost",
+ "placement": {},
}
def read_user_data_callback(mount_dir):
- '''
+ """
Description:
This callback will be applied by util.mount_cb() on the mounted
file.
@@ -55,10 +52,10 @@ def read_user_data_callback(mount_dir):
Returns:
User Data
- '''
+ """
- deltacloud_user_data_file = mount_dir + '/deltacloud-user-data.txt'
- user_data_file = mount_dir + '/user-data.txt'
+ deltacloud_user_data_file = mount_dir + "/deltacloud-user-data.txt"
+ user_data_file = mount_dir + "/user-data.txt"
# First try deltacloud_user_data_file. On failure try user_data_file.
try:
@@ -67,7 +64,7 @@ def read_user_data_callback(mount_dir):
try:
user_data = util.load_file(user_data_file).strip()
except IOError:
- util.logexc(LOG, 'Failed accessing user data file.')
+ util.logexc(LOG, "Failed accessing user data file.")
return None
return user_data
@@ -75,7 +72,7 @@ def read_user_data_callback(mount_dir):
class DataSourceAltCloud(sources.DataSource):
- dsname = 'AltCloud'
+ dsname = "AltCloud"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -87,7 +84,7 @@ class DataSourceAltCloud(sources.DataSource):
return "%s [seed=%s]" % (root, self.seed)
def get_cloud_type(self):
- '''
+ """
Description:
Get the type for the cloud back end this instance is running on
by examining the string returned by reading either:
@@ -101,31 +98,34 @@ class DataSourceAltCloud(sources.DataSource):
One of the following strings:
'RHEV', 'VSPHERE' or 'UNKNOWN'
- '''
+ """
if os.path.exists(CLOUD_INFO_FILE):
try:
cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper()
except IOError:
- util.logexc(LOG, 'Unable to access cloud info file at %s.',
- CLOUD_INFO_FILE)
- return 'UNKNOWN'
+ util.logexc(
+ LOG,
+ "Unable to access cloud info file at %s.",
+ CLOUD_INFO_FILE,
+ )
+ return "UNKNOWN"
return cloud_type
system_name = dmi.read_dmi_data("system-product-name")
if not system_name:
- return 'UNKNOWN'
+ return "UNKNOWN"
sys_name = system_name.upper()
- if sys_name.startswith('RHEV'):
- return 'RHEV'
+ if sys_name.startswith("RHEV"):
+ return "RHEV"
- if sys_name.startswith('VMWARE'):
- return 'VSPHERE'
+ if sys_name.startswith("VMWARE"):
+ return "VSPHERE"
- return 'UNKNOWN'
+ return "UNKNOWN"
def _get_data(self):
- '''
+ """
Description:
User Data is passed to the launching instance which
is used to perform instance configuration.
@@ -140,18 +140,18 @@ class DataSourceAltCloud(sources.DataSource):
Images not built with Imagefactory will try to
determine what the cloud provider is based on system
information.
- '''
+ """
- LOG.debug('Invoked get_data()')
+ LOG.debug("Invoked get_data()")
cloud_type = self.get_cloud_type()
- LOG.debug('cloud_type: %s', str(cloud_type))
+ LOG.debug("cloud_type: %s", str(cloud_type))
- if 'RHEV' in cloud_type:
+ if "RHEV" in cloud_type:
if self.user_data_rhevm():
return True
- elif 'VSPHERE' in cloud_type:
+ elif "VSPHERE" in cloud_type:
if self.user_data_vsphere():
return True
else:
@@ -160,20 +160,20 @@ class DataSourceAltCloud(sources.DataSource):
return False
# No user data found
- util.logexc(LOG, 'Failed accessing user data.')
+ util.logexc(LOG, "Failed accessing user data.")
return False
def _get_subplatform(self):
"""Return the subplatform metadata details."""
cloud_type = self.get_cloud_type()
- if not hasattr(self, 'source'):
+ if not hasattr(self, "source"):
self.source = sources.METADATA_UNKNOWN
- if cloud_type == 'RHEV':
- self.source = '/dev/fd0'
- return '%s (%s)' % (cloud_type.lower(), self.source)
+ if cloud_type == "RHEV":
+ self.source = "/dev/fd0"
+ return "%s (%s)" % (cloud_type.lower(), self.source)
def user_data_rhevm(self):
- '''
+ """
RHEVM specific userdata read
If on RHEV-M the user data will be contained on the
@@ -186,7 +186,7 @@ class DataSourceAltCloud(sources.DataSource):
mount /dev/fd0 <tmp mount dir>
The call back passed to util.mount_cb will do:
read <tmp mount dir>/<user_data_file>
- '''
+ """
return_str = None
@@ -194,16 +194,16 @@ class DataSourceAltCloud(sources.DataSource):
try:
modprobe_floppy()
except subp.ProcessExecutionError as e:
- util.logexc(LOG, 'Failed modprobe: %s', e)
+ util.logexc(LOG, "Failed modprobe: %s", e)
return False
- floppy_dev = '/dev/fd0'
+ floppy_dev = "/dev/fd0"
# udevadm settle for floppy device
try:
util.udevadm_settle(exists=floppy_dev, timeout=5)
except (subp.ProcessExecutionError, OSError) as e:
- util.logexc(LOG, 'Failed udevadm_settle: %s\n', e)
+ util.logexc(LOG, "Failed udevadm_settle: %s\n", e)
return False
try:
@@ -212,8 +212,11 @@ class DataSourceAltCloud(sources.DataSource):
if err.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user data",
- floppy_dev)
+ util.logexc(
+ LOG,
+ "Failed to mount %s when looking for user data",
+ floppy_dev,
+ )
self.userdata_raw = return_str
self.metadata = META_DATA_NOT_SUPPORTED
@@ -224,7 +227,7 @@ class DataSourceAltCloud(sources.DataSource):
return False
def user_data_vsphere(self):
- '''
+ """
vSphere specific userdata read
If on vSphere the user data will be contained on the
@@ -235,10 +238,10 @@ class DataSourceAltCloud(sources.DataSource):
mount /dev/fd0 <tmp mount dir>
The call back passed to util.mount_cb will do:
read <tmp mount dir>/<user_data_file>
- '''
+ """
return_str = None
- cdrom_list = util.find_devs_with('LABEL=CDROM')
+ cdrom_list = util.find_devs_with("LABEL=CDROM")
for cdrom_dev in cdrom_list:
try:
return_str = util.mount_cb(cdrom_dev, read_user_data_callback)
@@ -249,8 +252,11 @@ class DataSourceAltCloud(sources.DataSource):
if err.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user "
- "data", cdrom_dev)
+ util.logexc(
+ LOG,
+ "Failed to mount %s when looking for user data",
+ cdrom_dev,
+ )
self.userdata_raw = return_str
self.metadata = META_DATA_NOT_SUPPORTED
@@ -263,7 +269,7 @@ class DataSourceAltCloud(sources.DataSource):
def modprobe_floppy():
out, _err = subp.subp(CMD_PROBE_FLOPPY)
- LOG.debug('Command: %s\nOutput%s', ' '.join(CMD_PROBE_FLOPPY), out)
+ LOG.debug("Command: %s\nOutput%s", " ".join(CMD_PROBE_FLOPPY), out)
# Used to match classes to dependencies
@@ -279,4 +285,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 04ff2131..359dfbde 100755
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -5,101 +5,93 @@
# This file is part of cloud-init. See LICENSE file for license information.
import base64
-import contextlib
import crypt
-from functools import partial
+import datetime
import os
import os.path
import re
-from time import time
-from time import sleep
-from xml.dom import minidom
import xml.etree.ElementTree as ET
from enum import Enum
+from time import sleep, time
+from typing import Any, Dict, List, Optional
+from xml.dom import minidom
+
+import requests
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit.event import EventType
+from cloudinit import net, sources, ssh_util, subp, util
+from cloudinit.event import EventScope, EventType
from cloudinit.net import device_driver
-from cloudinit.net.dhcp import EphemeralDHCPv4
-from cloudinit import sources
-from cloudinit.sources.helpers import netlink
-from cloudinit import subp
-from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
-from cloudinit import util
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
from cloudinit.reporting import events
-
+from cloudinit.sources.helpers import netlink
from cloudinit.sources.helpers.azure import (
DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE,
+ DEFAULT_WIRESERVER_ENDPOINT,
azure_ds_reporter,
azure_ds_telemetry_reporter,
- get_metadata_from_fabric,
+ build_minimal_ovf,
+ dhcp_log_cb,
get_boot_telemetry,
+ get_metadata_from_fabric,
get_system_info,
- report_diagnostic_event,
- EphemeralDHCPv4WithReporting,
is_byte_swapped,
- dhcp_log_cb,
push_log_to_kvp,
- report_failure_to_fabric)
+ report_diagnostic_event,
+ report_failure_to_fabric,
+)
+from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc
LOG = logging.getLogger(__name__)
-DS_NAME = 'Azure'
+DS_NAME = "Azure"
DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
-AGENT_START = ['service', 'walinuxagent', 'start']
-AGENT_START_BUILTIN = "__builtin__"
-BOUNCE_COMMAND_IFUP = [
- 'sh', '-xc',
- "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"
-]
-BOUNCE_COMMAND_FREEBSD = [
- 'sh', '-xc',
- ("i=$interface; x=0; ifconfig down $i || x=$?; "
- "ifconfig up $i || x=$?; exit $x")
-]
# azure systems will always have a resource disk, and 66-azure-ephemeral.rules
# ensures that it gets linked to this path.
-RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource'
-DEFAULT_PRIMARY_NIC = 'eth0'
-LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases'
-DEFAULT_FS = 'ext4'
+RESOURCE_DISK_PATH = "/dev/disk/cloud/azure_resource"
+LEASE_FILE = "/var/lib/dhcp/dhclient.eth0.leases"
+DEFAULT_FS = "ext4"
# DMI chassis-asset-tag is set static for all azure instances
-AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77'
+AZURE_CHASSIS_ASSET_TAG = "7783-7084-3265-9085-8269-3286-77"
REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds"
-REPROVISION_NIC_ATTACH_MARKER_FILE = "/var/lib/cloud/data/wait_for_nic_attach"
REPROVISION_NIC_DETACHED_MARKER_FILE = "/var/lib/cloud/data/nic_detached"
REPORTED_READY_MARKER_FILE = "/var/lib/cloud/data/reported_ready"
-AGENT_SEED_DIR = '/var/lib/waagent'
-
+AGENT_SEED_DIR = "/var/lib/waagent"
+DEFAULT_PROVISIONING_ISO_DEV = "/dev/sr0"
# In the event where the IMDS primary server is not
# available, it takes 1s to fallback to the secondary one
IMDS_TIMEOUT_IN_SECONDS = 2
-IMDS_URL = "http://169.254.169.254/metadata/"
-IMDS_VER = "2019-06-01"
-IMDS_VER_PARAM = "api-version={}".format(IMDS_VER)
+IMDS_URL = "http://169.254.169.254/metadata"
+IMDS_VER_MIN = "2019-06-01"
+IMDS_VER_WANT = "2021-08-01"
+IMDS_EXTENDED_VER_MIN = "2021-03-01"
+
+class MetadataType(Enum):
+ ALL = "{}/instance".format(IMDS_URL)
+ NETWORK = "{}/instance/network".format(IMDS_URL)
+ REPROVISION_DATA = "{}/reprovisiondata".format(IMDS_URL)
-class metadata_type(Enum):
- compute = "{}instance?{}".format(IMDS_URL, IMDS_VER_PARAM)
- network = "{}instance/network?{}".format(IMDS_URL,
- IMDS_VER_PARAM)
- reprovisiondata = "{}reprovisiondata?{}".format(IMDS_URL,
- IMDS_VER_PARAM)
+class PPSType(Enum):
+ NONE = "None"
+ RUNNING = "Running"
+ SAVABLE = "Savable"
+ UNKNOWN = "Unknown"
-PLATFORM_ENTROPY_SOURCE = "/sys/firmware/acpi/tables/OEM0"
+
+PLATFORM_ENTROPY_SOURCE: Optional[str] = "/sys/firmware/acpi/tables/OEM0"
# List of static scripts and network config artifacts created by
# stock ubuntu suported images.
UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
- '/etc/netplan/90-hotplug-azure.yaml',
- '/usr/local/sbin/ephemeral_eth.sh',
- '/etc/udev/rules.d/10-net-device-added.rules',
- '/run/network/interfaces.ephemeral.d',
+ "/etc/netplan/90-hotplug-azure.yaml",
+ "/usr/local/sbin/ephemeral_eth.sh",
+ "/etc/udev/rules.d/10-net-device-added.rules",
+ "/run/network/interfaces.ephemeral.d",
]
# This list is used to blacklist devices that will be considered
@@ -119,7 +111,7 @@ UBUNTU_EXTENDED_NETWORK_SCRIPTS = [
# https://docs.microsoft.com/en-us/azure/virtual-machines/dv2-dsv2-series
# https://docs.microsoft.com/en-us/azure/virtual-machines/dv3-dsv3-series
# https://docs.microsoft.com/en-us/azure/virtual-machines/ev3-esv3-series
-BLACKLIST_DRIVERS = ['mlx4_core', 'mlx5_core']
+BLACKLIST_DRIVERS = ["mlx4_core", "mlx5_core"]
def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
@@ -133,11 +125,13 @@ def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid):
if re.search(r"pnpinfo", line):
fields = line.split()
if len(fields) >= 3:
- columns = fields[2].split('=')
- if (len(columns) >= 2 and
- columns[0] == "deviceid" and
- columns[1].startswith(deviceid)):
- comps = fields[0].split('.')
+ columns = fields[2].split("=")
+ if (
+ len(columns) >= 2
+ and columns[0] == "deviceid"
+ and columns[1].startswith(deviceid)
+ ):
+ comps = fields[0].split(".")
return comps[2]
return None
@@ -161,7 +155,7 @@ def find_busdev_from_disk(camcontrol_out, disk_drv):
return None
-def find_dev_from_busdev(camcontrol_out, busdev):
+def find_dev_from_busdev(camcontrol_out: str, busdev: str) -> Optional[str]:
# find the daX from 'camcontrol devlist' output
# if busdev matches the specified value, i.e. 'scbus2'
"""
@@ -171,18 +165,38 @@ def find_dev_from_busdev(camcontrol_out, busdev):
"""
for line in camcontrol_out.splitlines():
if re.search(busdev, line):
- items = line.split('(')
+ items = line.split("(")
if len(items) == 2:
- dev_pass = items[1].split(',')
+ dev_pass = items[1].split(",")
return dev_pass[0]
return None
-def execute_or_debug(cmd, fail_ret=None):
+def normalize_mac_address(mac: str) -> str:
+ """Normalize mac address with colons and lower-case."""
+ if len(mac) == 12:
+ mac = ":".join(
+ [mac[0:2], mac[2:4], mac[4:6], mac[6:8], mac[8:10], mac[10:12]]
+ )
+
+ return mac.lower()
+
+
+@azure_ds_telemetry_reporter
+def get_hv_netvsc_macs_normalized() -> List[str]:
+ """Get Hyper-V NICs as normalized MAC addresses."""
+ return [
+ normalize_mac_address(n[1])
+ for n in net.get_interfaces()
+ if n[2] == "hv_netvsc"
+ ]
+
+
+def execute_or_debug(cmd, fail_ret=None) -> str:
try:
- return subp.subp(cmd)[0]
+ return subp.subp(cmd)[0] # type: ignore
except subp.ProcessExecutionError:
- LOG.debug("Failed to execute: %s", ' '.join(cmd))
+ LOG.debug("Failed to execute: %s", " ".join(cmd))
return fail_ret
@@ -191,14 +205,14 @@ def get_dev_storvsc_sysctl():
def get_camcontrol_dev_bus():
- return execute_or_debug(['camcontrol', 'devlist', '-b'])
+ return execute_or_debug(["camcontrol", "devlist", "-b"])
def get_camcontrol_dev():
- return execute_or_debug(['camcontrol', 'devlist'])
+ return execute_or_debug(["camcontrol", "devlist"])
-def get_resource_disk_on_freebsd(port_id):
+def get_resource_disk_on_freebsd(port_id) -> Optional[str]:
g0 = "00000000"
if port_id > 1:
g0 = "00000001"
@@ -242,9 +256,8 @@ def get_resource_disk_on_freebsd(port_id):
# update the FreeBSD specific information
if util.is_FreeBSD():
- DEFAULT_PRIMARY_NIC = 'hn0'
- LEASE_FILE = '/var/db/dhclient.leases.hn0'
- DEFAULT_FS = 'freebsd-ufs'
+ LEASE_FILE = "/var/db/dhclient.leases.hn0"
+ DEFAULT_FS = "freebsd-ufs"
res_disk = get_resource_disk_on_freebsd(1)
if res_disk is not None:
LOG.debug("resource disk is not None")
@@ -255,186 +268,152 @@ if util.is_FreeBSD():
PLATFORM_ENTROPY_SOURCE = None
BUILTIN_DS_CONFIG = {
- 'agent_command': AGENT_START_BUILTIN,
- 'data_dir': AGENT_SEED_DIR,
- 'set_hostname': True,
- 'hostname_bounce': {
- 'interface': DEFAULT_PRIMARY_NIC,
- 'policy': True,
- 'command': 'builtin',
- 'hostname_command': 'hostname',
- },
- 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH},
- 'dhclient_lease_file': LEASE_FILE,
- 'apply_network_config': True, # Use IMDS published network configuration
+ "data_dir": AGENT_SEED_DIR,
+ "disk_aliases": {"ephemeral0": RESOURCE_DISK_PATH},
+ "dhclient_lease_file": LEASE_FILE,
+ "apply_network_config": True, # Use IMDS published network configuration
}
# RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False
-BUILTIN_CLOUD_CONFIG = {
- 'disk_setup': {
- 'ephemeral0': {'table_type': 'gpt',
- 'layout': [100],
- 'overwrite': True},
+BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG = {
+ "disk_setup": {
+ "ephemeral0": {
+ "table_type": "gpt",
+ "layout": [100],
+ "overwrite": True,
+ },
},
- 'fs_setup': [{'filesystem': DEFAULT_FS,
- 'device': 'ephemeral0.1'}],
+ "fs_setup": [{"filesystem": DEFAULT_FS, "device": "ephemeral0.1"}],
}
-DS_CFG_PATH = ['datasource', DS_NAME]
-DS_CFG_KEY_PRESERVE_NTFS = 'never_destroy_ntfs'
-DEF_EPHEMERAL_LABEL = 'Temporary Storage'
+DS_CFG_PATH = ["datasource", DS_NAME]
+DS_CFG_KEY_PRESERVE_NTFS = "never_destroy_ntfs"
+DEF_EPHEMERAL_LABEL = "Temporary Storage"
# The redacted password fails to meet password complexity requirements
# so we can safely use this to mask/redact the password in the ovf-env.xml
-DEF_PASSWD_REDACTION = 'REDACTED'
-
-
-def get_hostname(hostname_command='hostname'):
- if not isinstance(hostname_command, (list, tuple)):
- hostname_command = (hostname_command,)
- return subp.subp(hostname_command, capture=True)[0].strip()
-
-
-def set_hostname(hostname, hostname_command='hostname'):
- subp.subp([hostname_command, hostname])
-
-
-@azure_ds_telemetry_reporter
-@contextlib.contextmanager
-def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
- """
- Set a temporary hostname, restoring the previous hostname on exit.
-
- Will have the value of the previous hostname when used as a context
- manager, or None if the hostname was not changed.
- """
- policy = cfg['hostname_bounce']['policy']
- previous_hostname = get_hostname(hostname_command)
- if (not util.is_true(cfg.get('set_hostname')) or
- util.is_false(policy) or
- (previous_hostname == temp_hostname and policy != 'force')):
- yield None
- return
- try:
- set_hostname(temp_hostname, hostname_command)
- except Exception as e:
- report_diagnostic_event(
- 'Failed setting temporary hostname: %s' % e,
- logger_func=LOG.warning)
- yield None
- return
- try:
- yield previous_hostname
- finally:
- set_hostname(previous_hostname, hostname_command)
+DEF_PASSWD_REDACTION = "REDACTED"
class DataSourceAzure(sources.DataSource):
- dsname = 'Azure'
+ dsname = "Azure"
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ }
+ }
_negotiated = False
_metadata_imds = sources.UNSET
+ _ci_pkl_version = 1
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'azure')
+ self.seed_dir = os.path.join(paths.seed_dir, "azure")
self.cfg = {}
self.seed = None
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
- BUILTIN_DS_CONFIG])
- self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file')
+ self.ds_cfg = util.mergemanydict(
+ [util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]
+ )
+ self.dhclient_lease_file = self.ds_cfg.get("dhclient_lease_file")
+ self._iso_dev = None
self._network_config = None
- # Regenerate network config new_instance boot and every boot
- self.update_events['network'].add(EventType.BOOT)
self._ephemeral_dhcp_ctx = None
+ self._wireserver_endpoint = DEFAULT_WIRESERVER_ENDPOINT
+
+ def _unpickle(self, ci_pkl_version: int) -> None:
+ super()._unpickle(ci_pkl_version)
+
+ self._ephemeral_dhcp_ctx = None
+ self._iso_dev = None
+ self._wireserver_endpoint = DEFAULT_WIRESERVER_ENDPOINT
def __str__(self):
root = sources.DataSource.__str__(self)
return "%s [seed=%s]" % (root, self.seed)
- @azure_ds_telemetry_reporter
- def bounce_network_with_azure_hostname(self):
- # When using cloud-init to provision, we have to set the hostname from
- # the metadata and "bounce" the network to force DDNS to update via
- # dhclient
- azure_hostname = self.metadata.get('local-hostname')
- LOG.debug("Hostname in metadata is %s", azure_hostname)
- hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
-
- with temporary_hostname(azure_hostname, self.ds_cfg,
- hostname_command=hostname_command) \
- as previous_hn:
- if (previous_hn is not None and
- util.is_true(self.ds_cfg.get('set_hostname'))):
- cfg = self.ds_cfg['hostname_bounce']
-
- # "Bouncing" the network
- try:
- return perform_hostname_bounce(hostname=azure_hostname,
- cfg=cfg,
- prev_hostname=previous_hn)
- except Exception as e:
- report_diagnostic_event(
- "Failed publishing hostname: %s" % e,
- logger_func=LOG.warning)
- util.logexc(LOG, "handling set_hostname failed")
- return False
+ def _get_subplatform(self):
+ """Return the subplatform metadata source details."""
+ if self.seed is None:
+ subplatform_type = "unknown"
+ elif self.seed.startswith("/dev"):
+ subplatform_type = "config-disk"
+ elif self.seed.lower() == "imds":
+ subplatform_type = "imds"
+ else:
+ subplatform_type = "seed-dir"
+ return "%s (%s)" % (subplatform_type, self.seed)
@azure_ds_telemetry_reporter
- def get_metadata_from_agent(self):
- temp_hostname = self.metadata.get('local-hostname')
- agent_cmd = self.ds_cfg['agent_command']
- LOG.debug("Getting metadata via agent. hostname=%s cmd=%s",
- temp_hostname, agent_cmd)
+ def _setup_ephemeral_networking(
+ self, *, iface: Optional[str] = None, timeout_minutes: int = 5
+ ) -> None:
+ """Setup ephemeral networking.
- self.bounce_network_with_azure_hostname()
+ Keep retrying DHCP up to specified number of minutes. This does
+ not kill dhclient, so the timeout in practice may be up to
+ timeout_minutes + the system-configured timeout for dhclient.
- try:
- invoke_agent(agent_cmd)
- except subp.ProcessExecutionError:
- # claim the datasource even if the command failed
- util.logexc(LOG, "agent command '%s' failed.",
- self.ds_cfg['agent_command'])
-
- ddir = self.ds_cfg['data_dir']
-
- fp_files = []
- key_value = None
- for pk in self.cfg.get('_pubkeys', []):
- if pk.get('value', None):
- key_value = pk['value']
- LOG.debug("SSH authentication: using value from fabric")
- else:
- bname = str(pk['fingerprint'] + ".crt")
- fp_files += [os.path.join(ddir, bname)]
- LOG.debug("SSH authentication: "
- "using fingerprint from fabric")
+ :param timeout_minutes: Number of minutes to keep retrying for.
+
+ :raises NoDHCPLeaseError: If unable to obtain DHCP lease.
+ """
+ if self._ephemeral_dhcp_ctx is not None:
+ raise RuntimeError(
+ "Bringing up networking when already configured."
+ )
+
+ LOG.debug("Requested ephemeral networking (iface=%s)", iface)
+
+ start = datetime.datetime.utcnow()
+ timeout = start + datetime.timedelta(minutes=timeout_minutes)
+ self._ephemeral_dhcp_ctx = EphemeralDHCPv4(
+ iface=iface, dhcp_log_func=dhcp_log_cb
+ )
+
+ lease = None
with events.ReportEventStack(
- name="waiting-for-ssh-public-key",
- description="wait for agents to retrieve SSH keys",
- parent=azure_ds_reporter):
- # wait very long for public SSH keys to arrive
- # https://bugs.launchpad.net/cloud-init/+bug/1717611
- missing = util.log_time(logfunc=LOG.debug,
- msg="waiting for SSH public key files",
- func=util.wait_for_files,
- args=(fp_files, 900))
- if len(missing):
- LOG.warning("Did not find files, but going on: %s", missing)
-
- metadata = {}
- metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
- return metadata
+ name="obtain-dhcp-lease",
+ description="obtain dhcp lease",
+ parent=azure_ds_reporter,
+ ):
+ while datetime.datetime.utcnow() < timeout:
+ try:
+ lease = self._ephemeral_dhcp_ctx.obtain_lease()
+ break
+ except NoDHCPLeaseError:
+ continue
- def _get_subplatform(self):
- """Return the subplatform metadata source details."""
- if self.seed.startswith('/dev'):
- subplatform_type = 'config-disk'
- else:
- subplatform_type = 'seed-dir'
- return '%s (%s)' % (subplatform_type, self.seed)
+ if lease is None:
+ msg = "Failed to obtain DHCP lease (iface=%s)" % iface
+ report_diagnostic_event(msg, logger_func=LOG.error)
+ self._ephemeral_dhcp_ctx = None
+ raise NoDHCPLeaseError()
+ else:
+ # Ensure iface is set.
+ self._ephemeral_dhcp_ctx.iface = lease["interface"]
+
+ # Update wireserver IP from DHCP options.
+ if "unknown-245" in lease:
+ self._wireserver_endpoint = lease["unknown-245"]
+
+ @azure_ds_telemetry_reporter
+ def _teardown_ephemeral_networking(self) -> None:
+ """Teardown ephemeral networking."""
+ if self._ephemeral_dhcp_ctx is None:
+ return
+
+ self._ephemeral_dhcp_ctx.clean_network()
+ self._ephemeral_dhcp_ctx = None
+
+ def _is_ephemeral_networking_up(self) -> bool:
+ """Check if networking is configured."""
+ return not (
+ self._ephemeral_dhcp_ctx is None
+ or self._ephemeral_dhcp_ctx.lease is None
+ )
@azure_ds_telemetry_reporter
def crawl_metadata(self):
@@ -448,126 +427,205 @@ class DataSourceAzure(sources.DataSource):
# azure removes/ejects the cdrom containing the ovf-env.xml
# file on reboot. So, in order to successfully reboot we
# need to look in the datadir and consider that valid
- ddir = self.ds_cfg['data_dir']
+ ddir = self.ds_cfg["data_dir"]
# The order in which the candidates are inserted matters here, because
# it determines the value of ret. More specifically, the first one in
# the candidate list determines the path to take in order to get the
# metadata we need.
- candidates = [self.seed_dir]
+ ovf_is_accessible = False
+ metadata_source = None
+ md = {}
+ userdata_raw = ""
+ cfg = {}
+ files = {}
+
if os.path.isfile(REPROVISION_MARKER_FILE):
- candidates.insert(0, "IMDS")
- report_diagnostic_event("Reprovision marker file already present "
- "before crawling Azure metadata: %s" %
- REPROVISION_MARKER_FILE,
- logger_func=LOG.debug)
- elif os.path.isfile(REPROVISION_NIC_ATTACH_MARKER_FILE):
- candidates.insert(0, "NIC_ATTACH_MARKER_PRESENT")
- report_diagnostic_event("Reprovision nic attach marker file "
- "already present before crawling Azure "
- "metadata: %s" %
- REPROVISION_NIC_ATTACH_MARKER_FILE,
- logger_func=LOG.debug)
- candidates.extend(list_possible_azure_ds_devs())
- if ddir:
- candidates.append(ddir)
-
- found = None
- reprovision = False
- reprovision_after_nic_attach = False
- for cdev in candidates:
- try:
- if cdev == "IMDS":
- ret = None
- reprovision = True
- elif cdev == "NIC_ATTACH_MARKER_PRESENT":
- ret = None
- reprovision_after_nic_attach = True
- elif cdev.startswith("/dev/"):
- if util.is_FreeBSD():
- ret = util.mount_cb(cdev, load_azure_ds_dir,
- mtype="udf")
+ metadata_source = "IMDS"
+ report_diagnostic_event(
+ "Reprovision marker file already present "
+ "before crawling Azure metadata: %s" % REPROVISION_MARKER_FILE,
+ logger_func=LOG.debug,
+ )
+ else:
+ for src in list_possible_azure_ds(self.seed_dir, ddir):
+ try:
+ if src.startswith("/dev/"):
+ if util.is_FreeBSD():
+ md, userdata_raw, cfg, files = util.mount_cb(
+ src, load_azure_ds_dir, mtype="udf"
+ )
+ else:
+ md, userdata_raw, cfg, files = util.mount_cb(
+ src, load_azure_ds_dir
+ )
+ # save the device for ejection later
+ self._iso_dev = src
else:
- ret = util.mount_cb(cdev, load_azure_ds_dir)
- else:
- ret = load_azure_ds_dir(cdev)
+ md, userdata_raw, cfg, files = load_azure_ds_dir(src)
+ ovf_is_accessible = True
+ metadata_source = src
+ break
+ except NonAzureDataSource:
+ report_diagnostic_event(
+ "Did not find Azure data source in %s" % src,
+ logger_func=LOG.debug,
+ )
+ continue
+ except util.MountFailedError:
+ report_diagnostic_event(
+ "%s was not mountable" % src, logger_func=LOG.debug
+ )
+ md = {"local-hostname": ""}
+ cfg = {"system_info": {"default_user": {"name": ""}}}
+ metadata_source = "IMDS"
+ continue
+ except BrokenAzureDataSource as exc:
+ msg = "BrokenAzureDataSource: %s" % exc
+ report_diagnostic_event(msg, logger_func=LOG.error)
+ raise sources.InvalidMetaDataException(msg)
- except NonAzureDataSource:
- report_diagnostic_event(
- "Did not find Azure data source in %s" % cdev,
- logger_func=LOG.debug)
- continue
- except BrokenAzureDataSource as exc:
- msg = 'BrokenAzureDataSource: %s' % exc
+ report_diagnostic_event(
+ "Found provisioning metadata in %s" % metadata_source,
+ logger_func=LOG.debug,
+ )
+
+ # If we read OVF from attached media, we are provisioning. If OVF
+ # is not found, we are probably provisioning on a system which does
+ # not have UDF support. In either case, require IMDS metadata.
+ # If we require IMDS metadata, try harder to obtain networking, waiting
+ # for at least 20 minutes. Otherwise only wait 5 minutes.
+ requires_imds_metadata = bool(self._iso_dev) or not ovf_is_accessible
+ timeout_minutes = 5 if requires_imds_metadata else 20
+ try:
+ self._setup_ephemeral_networking(timeout_minutes=timeout_minutes)
+ except NoDHCPLeaseError:
+ pass
+
+ if self._is_ephemeral_networking_up():
+ imds_md = self.get_imds_data_with_api_fallback(retries=10)
+ else:
+ imds_md = {}
+
+ if not imds_md and not ovf_is_accessible:
+ msg = "No OVF or IMDS available"
+ report_diagnostic_event(msg)
+ raise sources.InvalidMetaDataException(msg)
+
+ # Refresh PPS type using metadata.
+ pps_type = self._determine_pps_type(cfg, imds_md)
+ if pps_type != PPSType.NONE:
+ if util.is_FreeBSD():
+ msg = "Free BSD is not supported for PPS VMs"
report_diagnostic_event(msg, logger_func=LOG.error)
raise sources.InvalidMetaDataException(msg)
- except util.MountFailedError:
- report_diagnostic_event(
- '%s was not mountable' % cdev, logger_func=LOG.warning)
- continue
- perform_reprovision = reprovision or self._should_reprovision(ret)
- perform_reprovision_after_nic_attach = (
- reprovision_after_nic_attach or
- self._should_reprovision_after_nic_attach(ret))
+ self._write_reprovision_marker()
+
+ if pps_type == PPSType.SAVABLE:
+ self._wait_for_all_nics_ready()
+
+ md, userdata_raw, cfg, files = self._reprovision()
+ # fetch metadata again as it has changed after reprovisioning
+ imds_md = self.get_imds_data_with_api_fallback(retries=10)
+
+ # Report errors if IMDS network configuration is missing data.
+ self.validate_imds_network_metadata(imds_md=imds_md)
+
+ self.seed = metadata_source
+ crawled_data.update(
+ {
+ "cfg": cfg,
+ "files": files,
+ "metadata": util.mergemanydict([md, {"imds": imds_md}]),
+ "userdata_raw": userdata_raw,
+ }
+ )
+ imds_username = _username_from_imds(imds_md)
+ imds_hostname = _hostname_from_imds(imds_md)
+ imds_disable_password = _disable_password_from_imds(imds_md)
+ if imds_username:
+ LOG.debug("Username retrieved from IMDS: %s", imds_username)
+ cfg["system_info"]["default_user"]["name"] = imds_username
+ if imds_hostname:
+ LOG.debug("Hostname retrieved from IMDS: %s", imds_hostname)
+ crawled_data["metadata"]["local-hostname"] = imds_hostname
+ if imds_disable_password:
+ LOG.debug(
+ "Disable password retrieved from IMDS: %s",
+ imds_disable_password,
+ )
+ crawled_data["metadata"][
+ "disable_password"
+ ] = imds_disable_password
- if perform_reprovision or perform_reprovision_after_nic_attach:
- if util.is_FreeBSD():
- msg = "Free BSD is not supported for PPS VMs"
- report_diagnostic_event(msg, logger_func=LOG.error)
- raise sources.InvalidMetaDataException(msg)
- if perform_reprovision_after_nic_attach:
- self._wait_for_all_nics_ready()
- ret = self._reprovision()
-
- imds_md = get_metadata_from_imds(
- self.fallback_interface, retries=10)
- (md, userdata_raw, cfg, files) = ret
- self.seed = cdev
- crawled_data.update({
- 'cfg': cfg,
- 'files': files,
- 'metadata': util.mergemanydict(
- [md, {'imds': imds_md}]),
- 'userdata_raw': userdata_raw})
- found = cdev
+ if metadata_source == "IMDS" and not crawled_data["files"]:
+ try:
+ contents = build_minimal_ovf(
+ username=imds_username, # type: ignore
+ hostname=imds_hostname, # type: ignore
+ disableSshPwd=imds_disable_password, # type: ignore
+ )
+ crawled_data["files"] = {"ovf-env.xml": contents}
+ except Exception as e:
+ report_diagnostic_event(
+ "Failed to construct OVF from IMDS data %s" % e,
+ logger_func=LOG.debug,
+ )
- report_diagnostic_event(
- 'found datasource in %s' % cdev, logger_func=LOG.debug)
- break
+ # only use userdata from imds if OVF did not provide custom data
+ # userdata provided by IMDS is always base64 encoded
+ if not userdata_raw:
+ imds_userdata = _userdata_from_imds(imds_md)
+ if imds_userdata:
+ LOG.debug("Retrieved userdata from IMDS")
+ try:
+ crawled_data["userdata_raw"] = base64.b64decode(
+ "".join(imds_userdata.split())
+ )
+ except Exception:
+ report_diagnostic_event(
+ "Bad userdata in IMDS", logger_func=LOG.warning
+ )
- if not found:
- msg = 'No Azure metadata found'
+ if not metadata_source:
+ msg = "No Azure metadata found"
report_diagnostic_event(msg, logger_func=LOG.error)
raise sources.InvalidMetaDataException(msg)
+ else:
+ report_diagnostic_event(
+ "found datasource in %s" % metadata_source,
+ logger_func=LOG.debug,
+ )
- if found == ddir:
+ if metadata_source == ddir:
report_diagnostic_event(
- "using files cached in %s" % ddir, logger_func=LOG.debug)
+ "using files cached in %s" % ddir, logger_func=LOG.debug
+ )
seed = _get_random_seed()
if seed:
- crawled_data['metadata']['random_seed'] = seed
- crawled_data['metadata']['instance-id'] = self._iid()
-
- if perform_reprovision or perform_reprovision_after_nic_attach:
- LOG.info("Reporting ready to Azure after getting ReprovisionData")
- use_cached_ephemeral = (
- self.distro.networking.is_up(self.fallback_interface) and
- getattr(self, '_ephemeral_dhcp_ctx', None))
- if use_cached_ephemeral:
- self._report_ready(lease=self._ephemeral_dhcp_ctx.lease)
- self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral
+ crawled_data["metadata"]["random_seed"] = seed
+ crawled_data["metadata"]["instance-id"] = self._iid()
+
+ if self._negotiated is False and self._is_ephemeral_networking_up():
+ # Report ready and fetch public-keys from Wireserver, if required.
+ pubkey_info = self._determine_wireserver_pubkey_info(
+ cfg=cfg, imds_md=imds_md
+ )
+ try:
+ ssh_keys = self._report_ready(pubkey_info=pubkey_info)
+ except Exception:
+ # Failed to report ready, but continue with best effort.
+ pass
else:
- try:
- with EphemeralDHCPv4WithReporting(
- azure_ds_reporter) as lease:
- self._report_ready(lease=lease)
- except Exception as e:
- report_diagnostic_event(
- "exception while reporting ready: %s" % e,
- logger_func=LOG.error)
- raise
+ LOG.debug("negotiating returned %s", ssh_keys)
+ if ssh_keys:
+ crawled_data["metadata"]["public-keys"] = ssh_keys
+
+ self._cleanup_markers()
+ self._negotiated = True
+
return crawled_data
def _is_platform_viable(self):
@@ -602,28 +660,57 @@ class DataSourceAzure(sources.DataSource):
try:
crawled_data = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
)
except Exception as e:
report_diagnostic_event(
- 'Could not crawl Azure metadata: %s' % e,
- logger_func=LOG.error)
+ "Could not crawl Azure metadata: %s" % e, logger_func=LOG.error
+ )
self._report_failure(
- description=DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE)
+ description=DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
+ )
return False
+ finally:
+ self._teardown_ephemeral_networking()
- if (self.distro and self.distro.name == 'ubuntu' and
- self.ds_cfg.get('apply_network_config')):
+ if (
+ self.distro
+ and self.distro.name == "ubuntu"
+ and self.ds_cfg.get("apply_network_config")
+ ):
maybe_remove_ubuntu_network_config_scripts()
# Process crawled data and augment with various config defaults
- self.cfg = util.mergemanydict(
- [crawled_data['cfg'], BUILTIN_CLOUD_CONFIG])
- self._metadata_imds = crawled_data['metadata']['imds']
+
+ # Only merge in default cloud config related to the ephemeral disk
+ # if the ephemeral disk exists
+ devpath = RESOURCE_DISK_PATH
+ if os.path.exists(devpath):
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' exists. "
+ "Merging default Azure cloud ephemeral disk configs."
+ % devpath,
+ logger_func=LOG.debug,
+ )
+ self.cfg = util.mergemanydict(
+ [crawled_data["cfg"], BUILTIN_CLOUD_EPHEMERAL_DISK_CONFIG]
+ )
+ else:
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' does not exist. "
+ "Not merging default Azure cloud ephemeral disk configs."
+ % devpath,
+ logger_func=LOG.debug,
+ )
+ self.cfg = crawled_data["cfg"]
+
+ self._metadata_imds = crawled_data["metadata"]["imds"]
self.metadata = util.mergemanydict(
- [crawled_data['metadata'], DEFAULT_METADATA])
- self.userdata_raw = crawled_data['userdata_raw']
+ [crawled_data["metadata"], DEFAULT_METADATA]
+ )
+ self.userdata_raw = crawled_data["userdata_raw"]
user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
@@ -631,41 +718,108 @@ class DataSourceAzure(sources.DataSource):
# walinux agent writes files world readable, but expects
# the directory to be protected.
write_files(
- self.ds_cfg['data_dir'], crawled_data['files'], dirmode=0o700)
+ self.ds_cfg["data_dir"], crawled_data["files"], dirmode=0o700
+ )
return True
+ @azure_ds_telemetry_reporter
+ def get_imds_data_with_api_fallback(
+ self,
+ *,
+ retries,
+ md_type=MetadataType.ALL,
+ exc_cb=retry_on_url_exc,
+ infinite=False,
+ ):
+ """
+ Wrapper for get_metadata_from_imds so that we can have flexibility
+ in which IMDS api-version we use. If a particular instance of IMDS
+ does not have the api version that is desired, we want to make
+ this fault tolerant and fall back to a good known minimum api
+ version.
+ """
+ for _ in range(retries):
+ try:
+ LOG.info("Attempting IMDS api-version: %s", IMDS_VER_WANT)
+ return get_metadata_from_imds(
+ retries=0,
+ md_type=md_type,
+ api_version=IMDS_VER_WANT,
+ exc_cb=exc_cb,
+ )
+ except UrlError as err:
+ LOG.info("UrlError with IMDS api-version: %s", IMDS_VER_WANT)
+ if err.code == 400:
+ log_msg = "Fall back to IMDS api-version: {}".format(
+ IMDS_VER_MIN
+ )
+ report_diagnostic_event(log_msg, logger_func=LOG.info)
+ break
+
+ LOG.info("Using IMDS api-version: %s", IMDS_VER_MIN)
+ return get_metadata_from_imds(
+ retries=retries,
+ md_type=md_type,
+ api_version=IMDS_VER_MIN,
+ exc_cb=exc_cb,
+ infinite=infinite,
+ )
+
def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
+ return self.ds_cfg["disk_aliases"].get(name)
@azure_ds_telemetry_reporter
- def get_public_ssh_keys(self):
+ def get_public_ssh_keys(self) -> List[str]:
+ """
+ Retrieve public SSH keys.
"""
- Try to get the ssh keys from IMDS first, and if that fails
- (i.e. IMDS is unavailable) then fallback to getting the ssh
- keys from OVF.
+ try:
+ return self._get_public_keys_from_imds(self.metadata["imds"])
+ except (KeyError, ValueError):
+ pass
+
+ return self._get_public_keys_from_ovf()
+
+ def _get_public_keys_from_imds(self, imds_md: dict) -> List[str]:
+ """Get SSH keys from IMDS metadata.
- The benefit to getting keys from IMDS is a large performance
- advantage, so this is a strong preference. But we must keep
- OVF as a second option for environments that don't have IMDS.
+ :raises KeyError: if IMDS metadata is malformed/missing.
+ :raises ValueError: if key format is not supported.
+
+ :returns: List of keys.
"""
- LOG.debug('Retrieving public SSH keys')
- ssh_keys = []
try:
ssh_keys = [
- public_key['keyData']
- for public_key
- in self.metadata['imds']['compute']['publicKeys']
+ public_key["keyData"]
+ for public_key in imds_md["compute"]["publicKeys"]
]
- LOG.debug('Retrieved SSH keys from IMDS')
except KeyError:
- log_msg = 'Unable to get keys from IMDS, falling back to OVF'
+ log_msg = "No SSH keys found in IMDS metadata"
+ report_diagnostic_event(log_msg, logger_func=LOG.debug)
+ raise
+
+ if any(not _key_is_openssh_formatted(key=key) for key in ssh_keys):
+ log_msg = "Key(s) not in OpenSSH format"
+ report_diagnostic_event(log_msg, logger_func=LOG.debug)
+ raise ValueError(log_msg)
+
+ log_msg = "Retrieved {} keys from IMDS".format(len(ssh_keys))
+ report_diagnostic_event(log_msg, logger_func=LOG.debug)
+ return ssh_keys
+
+ def _get_public_keys_from_ovf(self) -> List[str]:
+ """Get SSH keys that were fetched from wireserver.
+
+ :returns: List of keys.
+ """
+ ssh_keys = []
+ try:
+ ssh_keys = self.metadata["public-keys"]
+ log_msg = "Retrieved {} keys from OVF".format(len(ssh_keys))
+ report_diagnostic_event(log_msg, logger_func=LOG.debug)
+ except KeyError:
+ log_msg = "No keys available from OVF"
report_diagnostic_event(log_msg, logger_func=LOG.debug)
- try:
- ssh_keys = self.metadata['public-keys']
- LOG.debug('Retrieved keys from OVF')
- except KeyError:
- log_msg = 'No keys available from OVF'
- report_diagnostic_event(log_msg, logger_func=LOG.debug)
return ssh_keys
@@ -678,33 +832,32 @@ class DataSourceAzure(sources.DataSource):
def _iid(self, previous=None):
prev_iid_path = os.path.join(
- self.paths.get_cpath('data'), 'instance-id')
- iid = dmi.read_dmi_data('system-uuid')
+ self.paths.get_cpath("data"), "instance-id"
+ )
+ # Older kernels than 4.15 will have UPPERCASE product_uuid.
+ # We don't want Azure to react to an UPPER/lower difference as a new
+ # instance id as it rewrites SSH host keys.
+ # LP: #1835584
+ system_uuid = dmi.read_dmi_data("system-uuid")
+ if system_uuid is None:
+ raise RuntimeError("failed to read system-uuid")
+
+ iid = system_uuid.lower()
if os.path.exists(prev_iid_path):
previous = util.load_file(prev_iid_path).strip()
- if is_byte_swapped(previous, iid):
+ if previous.lower() == iid:
+ # If uppercase/lowercase equivalent, return the previous value
+ # to avoid new instance id.
+ return previous
+ if is_byte_swapped(previous.lower(), iid):
return previous
return iid
@azure_ds_telemetry_reporter
- def setup(self, is_new_instance):
- if self._negotiated is False:
- LOG.debug("negotiating for %s (new_instance=%s)",
- self.get_instance_id(), is_new_instance)
- fabric_data = self._negotiate()
- LOG.debug("negotiating returned %s", fabric_data)
- if fabric_data:
- self.metadata.update(fabric_data)
- self._negotiated = True
- else:
- LOG.debug("negotiating already done for %s",
- self.get_instance_id())
-
- @azure_ds_telemetry_reporter
def _wait_for_nic_detach(self, nl_sock):
"""Use the netlink socket provided to wait for nic detach event.
- NOTE: The function doesn't close the socket. The caller owns closing
- the socket and disposing it safely.
+ NOTE: The function doesn't close the socket. The caller owns closing
+ the socket and disposing it safely.
"""
try:
ifname = None
@@ -712,106 +865,124 @@ class DataSourceAzure(sources.DataSource):
# Preprovisioned VM will only have one NIC, and it gets
# detached immediately after deployment.
with events.ReportEventStack(
- name="wait-for-nic-detach",
- description=("wait for nic detach"),
- parent=azure_ds_reporter):
+ name="wait-for-nic-detach",
+ description="wait for nic detach",
+ parent=azure_ds_reporter,
+ ):
ifname = netlink.wait_for_nic_detach_event(nl_sock)
if ifname is None:
- msg = ("Preprovisioned nic not detached as expected. "
- "Proceeding without failing.")
+ msg = (
+ "Preprovisioned nic not detached as expected. "
+ "Proceeding without failing."
+ )
report_diagnostic_event(msg, logger_func=LOG.warning)
else:
- report_diagnostic_event("The preprovisioned nic %s is detached"
- % ifname, logger_func=LOG.warning)
+ report_diagnostic_event(
+ "The preprovisioned nic %s is detached" % ifname,
+ logger_func=LOG.warning,
+ )
path = REPROVISION_NIC_DETACHED_MARKER_FILE
LOG.info("Creating a marker file for nic detached: %s", path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
+ util.write_file(
+ path, "{pid}: {time}\n".format(pid=os.getpid(), time=time())
+ )
except AssertionError as error:
- report_diagnostic_event(error, logger_func=LOG.error)
+ report_diagnostic_event(str(error), logger_func=LOG.error)
raise
@azure_ds_telemetry_reporter
def wait_for_link_up(self, ifname):
"""In cases where the link state is still showing down after a nic is
- hot-attached, we can attempt to bring it up by forcing the hv_netvsc
- drivers to query the link state by unbinding and then binding the
- device. This function attempts infinitely until the link is up,
- because we cannot proceed further until we have a stable link."""
+ hot-attached, we can attempt to bring it up by forcing the hv_netvsc
+ drivers to query the link state by unbinding and then binding the
+ device. This function attempts infinitely until the link is up,
+ because we cannot proceed further until we have a stable link."""
if self.distro.networking.try_set_link_up(ifname):
- report_diagnostic_event("The link %s is already up." % ifname,
- logger_func=LOG.info)
+ report_diagnostic_event(
+ "The link %s is already up." % ifname, logger_func=LOG.info
+ )
return
- LOG.info("Attempting to bring %s up", ifname)
+ LOG.debug("Attempting to bring %s up", ifname)
attempts = 0
+ LOG.info("Unbinding and binding the interface %s", ifname)
while True:
-
- LOG.info("Unbinding and binding the interface %s", ifname)
- devicename = net.read_sys_net(ifname,
- 'device/device_id').strip('{}')
- util.write_file('/sys/bus/vmbus/drivers/hv_netvsc/unbind',
- devicename)
- util.write_file('/sys/bus/vmbus/drivers/hv_netvsc/bind',
- devicename)
+ device_id = net.read_sys_net(ifname, "device/device_id")
+ if device_id is False or not isinstance(device_id, str):
+ raise RuntimeError("Unable to read device ID: %s" % device_id)
+ devicename = device_id.strip("{}")
+ util.write_file(
+ "/sys/bus/vmbus/drivers/hv_netvsc/unbind", devicename
+ )
+ util.write_file(
+ "/sys/bus/vmbus/drivers/hv_netvsc/bind", devicename
+ )
attempts = attempts + 1
if self.distro.networking.try_set_link_up(ifname):
- msg = "The link %s is up after %s attempts" % (ifname,
- attempts)
+ msg = "The link %s is up after %s attempts" % (
+ ifname,
+ attempts,
+ )
report_diagnostic_event(msg, logger_func=LOG.info)
return
- sleep_duration = 1
- msg = ("Link is not up after %d attempts with %d seconds sleep "
- "between attempts." % (attempts, sleep_duration))
-
if attempts % 10 == 0:
+ msg = "Link is not up after %d attempts to rebind" % attempts
report_diagnostic_event(msg, logger_func=LOG.info)
- else:
LOG.info(msg)
- sleep(sleep_duration)
+ # It could take some time after rebind for the interface to be up.
+ # So poll for the status for some time before attempting to rebind
+ # again.
+ sleep_duration = 0.5
+ max_status_polls = 20
+ LOG.debug(
+ "Polling %d seconds for primary NIC link up after rebind.",
+ sleep_duration * max_status_polls,
+ )
+
+ for i in range(0, max_status_polls):
+ if self.distro.networking.is_up(ifname):
+ msg = (
+ "After %d attempts to rebind, link is up after "
+ "polling the link status %d times" % (attempts, i)
+ )
+ report_diagnostic_event(msg, logger_func=LOG.info)
+ LOG.debug(msg)
+ return
+ else:
+ sleep(sleep_duration)
@azure_ds_telemetry_reporter
def _create_report_ready_marker(self):
path = REPORTED_READY_MARKER_FILE
- LOG.info(
- "Creating a marker file to report ready: %s", path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
+ LOG.info("Creating a marker file to report ready: %s", path)
+ util.write_file(
+ path, "{pid}: {time}\n".format(pid=os.getpid(), time=time())
+ )
report_diagnostic_event(
- 'Successfully created reported ready marker file '
- 'while in the preprovisioning pool.',
- logger_func=LOG.debug)
+ "Successfully created reported ready marker file "
+ "while in the preprovisioning pool.",
+ logger_func=LOG.debug,
+ )
@azure_ds_telemetry_reporter
- def _report_ready_if_needed(self):
- """Report ready to the platform if the marker file is not present,
- and create the marker file.
+ def _report_ready_for_pps(self) -> None:
+ """Report ready for PPS, creating the marker file upon completion.
+
+ :raises sources.InvalidMetaDataException: On error reporting ready.
"""
- have_not_reported_ready = (
- not os.path.isfile(REPORTED_READY_MARKER_FILE))
+ try:
+ self._report_ready()
+ except Exception as error:
+ msg = "Failed reporting ready while in the preprovisioning pool."
+ report_diagnostic_event(msg, logger_func=LOG.error)
+ raise sources.InvalidMetaDataException(msg) from error
- if have_not_reported_ready:
- report_diagnostic_event("Reporting ready before nic detach",
- logger_func=LOG.info)
- try:
- with EphemeralDHCPv4WithReporting(azure_ds_reporter) as lease:
- self._report_ready(lease=lease)
- except Exception as e:
- report_diagnostic_event("Exception reporting ready during "
- "preprovisioning before nic detach: %s"
- % e, logger_func=LOG.error)
- raise
- self._create_report_ready_marker()
- else:
- report_diagnostic_event("Already reported ready before nic detach."
- " The marker file already exists: %s" %
- REPORTED_READY_MARKER_FILE,
- logger_func=LOG.error)
+ self._create_report_ready_marker()
@azure_ds_telemetry_reporter
def _check_if_nic_is_primary(self, ifname):
@@ -822,61 +993,89 @@ class DataSourceAzure(sources.DataSource):
is_primary = False
expected_nic_count = -1
imds_md = None
+ metadata_poll_count = 0
+ metadata_logging_threshold = 1
+ expected_errors_count = 0
# For now, only a VM's primary NIC can contact IMDS and WireServer. If
# DHCP fails for a NIC, we have no mechanism to determine if the NIC is
- # primary or secondary. In this case, the desired behavior is to fail
- # VM provisioning if there is any DHCP failure when trying to determine
- # the primary NIC.
- try:
- with events.ReportEventStack(
- name="obtain-dhcp-lease",
- description=("obtain dhcp lease for %s when attempting to "
- "determine primary NIC during reprovision of "
- "a pre-provisioned VM" % ifname),
- parent=azure_ds_reporter):
- dhcp_ctx = EphemeralDHCPv4(
- iface=ifname,
- dhcp_log_func=dhcp_log_cb)
- dhcp_ctx.obtain_lease()
- except Exception as e:
- report_diagnostic_event("Giving up. Failed to obtain dhcp lease "
- "for %s when attempting to determine "
- "primary NIC during reprovision due to %s"
- % (ifname, e), logger_func=LOG.error)
- raise
+ # primary or secondary. In this case, retry DHCP until successful.
+ self._setup_ephemeral_networking(iface=ifname, timeout_minutes=20)
+
+ # Retry polling network metadata for a limited duration only when the
+ # calls fail due to network unreachable error or timeout.
+ # This is because the platform drops packets going towards IMDS
+ # when it is not a primary nic. If the calls fail due to other issues
+ # like 410, 503 etc, then it means we are primary but IMDS service
+ # is unavailable at the moment. Retry indefinitely in those cases
+ # since we cannot move on without the network metadata. In the future,
+ # all this will not be necessary, as a new dhcp option would tell
+ # whether the nic is primary or not.
+ def network_metadata_exc_cb(msg, exc):
+ nonlocal expected_errors_count, metadata_poll_count
+ nonlocal metadata_logging_threshold
+
+ metadata_poll_count = metadata_poll_count + 1
+
+ # Log when needed but back off exponentially to avoid exploding
+ # the log file.
+ if metadata_poll_count >= metadata_logging_threshold:
+ metadata_logging_threshold *= 2
+ report_diagnostic_event(
+ "Ran into exception when attempting to reach %s "
+ "after %d polls." % (msg, metadata_poll_count),
+ logger_func=LOG.error,
+ )
+
+ if isinstance(exc, UrlError):
+ report_diagnostic_event(
+ "poll IMDS with %s failed. Exception: %s and code: %s"
+ % (msg, exc.cause, exc.code),
+ logger_func=LOG.error,
+ )
+
+ # Retry up to a certain limit for both timeout and network
+ # unreachable errors.
+ if exc.cause and isinstance(
+ exc.cause, (requests.Timeout, requests.ConnectionError)
+ ):
+ expected_errors_count = expected_errors_count + 1
+ return expected_errors_count <= 10
+ return True
# Primary nic detection will be optimized in the future. The fact that
# primary nic is being attached first helps here. Otherwise each nic
# could add several seconds of delay.
try:
- imds_md = get_metadata_from_imds(
- ifname,
- 5,
- metadata_type.network)
+ imds_md = self.get_imds_data_with_api_fallback(
+ retries=0,
+ md_type=MetadataType.NETWORK,
+ exc_cb=network_metadata_exc_cb,
+ infinite=True,
+ )
except Exception as e:
LOG.warning(
"Failed to get network metadata using nic %s. Attempt to "
"contact IMDS failed with error %s. Assuming this is not the "
- "primary nic.", ifname, e)
- finally:
- # If we are not the primary nic, then clean the dhcp context.
- if imds_md is None:
- dhcp_ctx.clean_network()
+ "primary nic.",
+ ifname,
+ e,
+ )
- if imds_md is not None:
+ if imds_md:
# Only primary NIC will get a response from IMDS.
LOG.info("%s is the primary nic", ifname)
is_primary = True
- # If primary, set ephemeral dhcp ctx so we can report ready
- self._ephemeral_dhcp_ctx = dhcp_ctx
-
# Set the expected nic count based on the response received.
- expected_nic_count = len(
- imds_md['interface'])
- report_diagnostic_event("Expected nic count: %d" %
- expected_nic_count, logger_func=LOG.info)
+ expected_nic_count = len(imds_md["interface"])
+ report_diagnostic_event(
+ "Expected nic count: %d" % expected_nic_count,
+ logger_func=LOG.info,
+ )
+ else:
+ # If we are not the primary nic, then clean the dhcp context.
+ self._teardown_ephemeral_networking()
return is_primary, expected_nic_count
@@ -901,17 +1100,22 @@ class DataSourceAzure(sources.DataSource):
while True:
ifname = None
with events.ReportEventStack(
- name="wait-for-nic-attach",
- description=("wait for nic attach after %d nics have "
- "been attached" % len(nics_found)),
- parent=azure_ds_reporter):
- ifname = netlink.wait_for_nic_attach_event(nl_sock,
- nics_found)
+ name="wait-for-nic-attach",
+ description=(
+ "wait for nic attach after %d nics have been attached"
+ % len(nics_found)
+ ),
+ parent=azure_ds_reporter,
+ ):
+ ifname = netlink.wait_for_nic_attach_event(
+ nl_sock, nics_found
+ )
# wait_for_nic_attach_event guarantees that ifname it not None
nics_found.append(ifname)
- report_diagnostic_event("Detected nic %s attached." % ifname,
- logger_func=LOG.info)
+ report_diagnostic_event(
+ "Detected nic %s attached." % ifname, logger_func=LOG.info
+ )
# Attempt to bring the interface's operating state to
# UP in case it is not already.
@@ -921,26 +1125,29 @@ class DataSourceAzure(sources.DataSource):
# platform will attach the primary nic first so we
# won't be in primary_nic_found = false state for long.
if not primary_nic_found:
- LOG.info("Checking if %s is the primary nic",
- ifname)
- (primary_nic_found, expected_nic_count) = (
- self._check_if_nic_is_primary(ifname))
+ LOG.info("Checking if %s is the primary nic", ifname)
+ (
+ primary_nic_found,
+ expected_nic_count,
+ ) = self._check_if_nic_is_primary(ifname)
# Exit criteria: check if we've discovered all nics
- if (expected_nic_count != -1
- and len(nics_found) >= expected_nic_count):
+ if (
+ expected_nic_count != -1
+ and len(nics_found) >= expected_nic_count
+ ):
LOG.info("Found all the nics for this VM.")
break
except AssertionError as error:
- report_diagnostic_event(error, logger_func=LOG.error)
+ report_diagnostic_event(str(error), logger_func=LOG.error)
@azure_ds_telemetry_reporter
def _wait_for_all_nics_ready(self):
"""Wait for nic(s) to be hot-attached. There may be multiple nics
- depending on the customer request.
- But only primary nic would be able to communicate with wireserver
- and IMDS. So we detect and save the primary nic to be used later.
+ depending on the customer request.
+ But only primary nic would be able to communicate with wireserver
+ and IMDS. So we detect and save the primary nic to be used later.
"""
nl_sock = None
@@ -948,18 +1155,22 @@ class DataSourceAzure(sources.DataSource):
nl_sock = netlink.create_bound_netlink_socket()
report_ready_marker_present = bool(
- os.path.isfile(REPORTED_READY_MARKER_FILE))
+ os.path.isfile(REPORTED_READY_MARKER_FILE)
+ )
# Report ready if the marker file is not already present.
# The nic of the preprovisioned vm gets hot-detached as soon as
# we report ready. So no need to save the dhcp context.
- self._report_ready_if_needed()
+ if not os.path.isfile(REPORTED_READY_MARKER_FILE):
+ self._report_ready_for_pps()
has_nic_been_detached = bool(
- os.path.isfile(REPROVISION_NIC_DETACHED_MARKER_FILE))
+ os.path.isfile(REPROVISION_NIC_DETACHED_MARKER_FILE)
+ )
if not has_nic_been_detached:
LOG.info("NIC has not been detached yet.")
+ self._teardown_ephemeral_networking()
self._wait_for_nic_detach(nl_sock)
# If we know that the preprovisioned nic has been detached, and we
@@ -970,31 +1181,35 @@ class DataSourceAzure(sources.DataSource):
if not self.fallback_interface:
self._wait_for_hot_attached_nics(nl_sock)
else:
- report_diagnostic_event("Skipping waiting for nic attach "
- "because we already have a fallback "
- "interface. Report Ready marker "
- "present before detaching nics: %s" %
- report_ready_marker_present,
- logger_func=LOG.info)
+ report_diagnostic_event(
+ "Skipping waiting for nic attach "
+ "because we already have a fallback "
+ "interface. Report Ready marker "
+ "present before detaching nics: %s"
+ % report_ready_marker_present,
+ logger_func=LOG.info,
+ )
except netlink.NetlinkCreateSocketError as e:
- report_diagnostic_event(e, logger_func=LOG.warning)
+ report_diagnostic_event(str(e), logger_func=LOG.warning)
raise
finally:
if nl_sock:
nl_sock.close()
+ @azure_ds_telemetry_reporter
def _poll_imds(self):
"""Poll IMDS for the new provisioning data until we get a valid
response. Then return the returned JSON object."""
- url = metadata_type.reprovisiondata.value
+ url = "{}?api-version={}".format(
+ MetadataType.REPROVISION_DATA.value, IMDS_VER_MIN
+ )
headers = {"Metadata": "true"}
nl_sock = None
report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE))
self.imds_logging_threshold = 1
self.imds_poll_counter = 1
dhcp_attempts = 0
- vnet_switched = False
- return_val = None
+ reprovision_data = None
def exc_cb(msg, exception):
if isinstance(exception, UrlError):
@@ -1002,339 +1217,328 @@ class DataSourceAzure(sources.DataSource):
if self.imds_poll_counter == self.imds_logging_threshold:
# Reducing the logging frequency as we are polling IMDS
self.imds_logging_threshold *= 2
- LOG.debug("Backing off logging threshold for the same "
- "exception to %d",
- self.imds_logging_threshold)
- report_diagnostic_event("poll IMDS with %s failed. "
- "Exception: %s and code: %s" %
- (msg, exception.cause,
- exception.code),
- logger_func=LOG.debug)
+ LOG.debug(
+ "Backing off logging threshold for the same "
+ "exception to %d",
+ self.imds_logging_threshold,
+ )
+ report_diagnostic_event(
+ "poll IMDS with %s failed. "
+ "Exception: %s and code: %s"
+ % (msg, exception.cause, exception.code),
+ logger_func=LOG.debug,
+ )
self.imds_poll_counter += 1
return True
else:
# If we get an exception while trying to call IMDS, we call
# DHCP and setup the ephemeral network to acquire a new IP.
- report_diagnostic_event("poll IMDS with %s failed. "
- "Exception: %s and code: %s" %
- (msg, exception.cause,
- exception.code),
- logger_func=LOG.warning)
+ report_diagnostic_event(
+ "poll IMDS with %s failed. Exception: %s and code: %s"
+ % (msg, exception.cause, exception.code),
+ logger_func=LOG.warning,
+ )
return False
report_diagnostic_event(
- "poll IMDS failed with an "
- "unexpected exception: %s" % exception,
- logger_func=LOG.warning)
+ "poll IMDS failed with an unexpected exception: %s"
+ % exception,
+ logger_func=LOG.warning,
+ )
return False
- # When the interface is hot-attached, we would have already
- # done dhcp and set the dhcp context. In that case, skip
- # the attempt to do dhcp.
- is_ephemeral_ctx_present = self._ephemeral_dhcp_ctx is not None
- msg = ("Unexpected error. Dhcp context is not expected to be already "
- "set when we need to wait for vnet switch")
- if is_ephemeral_ctx_present and report_ready:
- report_diagnostic_event(msg, logger_func=LOG.error)
- raise RuntimeError(msg)
+ if report_ready:
+ # Networking must be up for netlink to detect
+ # media disconnect/connect. It may be down to due
+ # initial DHCP failure, if so check for it and retry,
+ # ensuring we flag it as required.
+ if not self._is_ephemeral_networking_up():
+ self._setup_ephemeral_networking(timeout_minutes=20)
- while True:
try:
- # Since is_ephemeral_ctx_present is set only once, this ensures
- # that with regular reprovisioning, dhcp is always done every
- # time the loop runs.
- if not is_ephemeral_ctx_present:
- # Save our EphemeralDHCPv4 context to avoid repeated dhcp
- # later when we report ready
- with events.ReportEventStack(
- name="obtain-dhcp-lease",
- description="obtain dhcp lease",
- parent=azure_ds_reporter):
- self._ephemeral_dhcp_ctx = EphemeralDHCPv4(
- dhcp_log_func=dhcp_log_cb)
- lease = self._ephemeral_dhcp_ctx.obtain_lease()
-
- if vnet_switched:
- dhcp_attempts += 1
- if report_ready:
+ if (
+ self._ephemeral_dhcp_ctx is None
+ or self._ephemeral_dhcp_ctx.iface is None
+ ):
+ raise RuntimeError("Missing ephemeral context")
+ iface = self._ephemeral_dhcp_ctx.iface
+
+ nl_sock = netlink.create_bound_netlink_socket()
+ self._report_ready_for_pps()
+
+ LOG.debug(
+ "Wait for vnetswitch to happen on %s",
+ iface,
+ )
+ with events.ReportEventStack(
+ name="wait-for-media-disconnect-connect",
+ description="wait for vnet switch",
+ parent=azure_ds_reporter,
+ ):
try:
- nl_sock = netlink.create_bound_netlink_socket()
- except netlink.NetlinkCreateSocketError as e:
+ netlink.wait_for_media_disconnect_connect(
+ nl_sock, iface
+ )
+ except AssertionError as e:
report_diagnostic_event(
- 'Failed to create bound netlink socket: %s' % e,
- logger_func=LOG.warning)
- self._ephemeral_dhcp_ctx.clean_network()
- break
-
- report_ready_succeeded = self._report_ready(lease=lease)
- if not report_ready_succeeded:
- msg = ('Failed reporting ready while in '
- 'the preprovisioning pool.')
- report_diagnostic_event(msg, logger_func=LOG.error)
- self._ephemeral_dhcp_ctx.clean_network()
- raise sources.InvalidMetaDataException(msg)
-
- self._create_report_ready_marker()
- report_ready = False
-
- LOG.debug("Wait for vnetswitch to happen")
- with events.ReportEventStack(
- name="wait-for-media-disconnect-connect",
- description="wait for vnet switch",
- parent=azure_ds_reporter):
- try:
- netlink.wait_for_media_disconnect_connect(
- nl_sock, lease['interface'])
- except AssertionError as e:
- report_diagnostic_event(
- 'Error while waiting for vnet switch: %s' % e,
- logger_func=LOG.error)
- break
-
- vnet_switched = True
- self._ephemeral_dhcp_ctx.clean_network()
- else:
- with events.ReportEventStack(
- name="get-reprovision-data-from-imds",
- description="get reprovision data from imds",
- parent=azure_ds_reporter):
- return_val = readurl(url,
- timeout=IMDS_TIMEOUT_IN_SECONDS,
- headers=headers,
- exception_cb=exc_cb,
- infinite=True,
- log_req_resp=False).contents
- break
- except UrlError:
- # Teardown our EphemeralDHCPv4 context on failure as we retry
- self._ephemeral_dhcp_ctx.clean_network()
+ "Error while waiting for vnet switch: %s" % e,
+ logger_func=LOG.error,
+ )
+ except netlink.NetlinkCreateSocketError as e:
+ report_diagnostic_event(
+ "Failed to create bound netlink socket: %s" % e,
+ logger_func=LOG.warning,
+ )
+ raise sources.InvalidMetaDataException(
+ "Failed to report ready while in provisioning pool."
+ ) from e
+ except NoDHCPLeaseError as e:
+ report_diagnostic_event(
+ "DHCP failed while in provisioning pool",
+ logger_func=LOG.warning,
+ )
+ raise sources.InvalidMetaDataException(
+ "Failed to report ready while in provisioning pool."
+ ) from e
finally:
if nl_sock:
nl_sock.close()
- if vnet_switched:
- report_diagnostic_event("attempted dhcp %d times after reuse" %
- dhcp_attempts,
- logger_func=LOG.debug)
- report_diagnostic_event("polled imds %d times after reuse" %
- self.imds_poll_counter,
- logger_func=LOG.debug)
+ # Teardown old network configuration.
+ self._teardown_ephemeral_networking()
- return return_val
+ while not reprovision_data:
+ if not self._is_ephemeral_networking_up():
+ dhcp_attempts += 1
+ try:
+ self._setup_ephemeral_networking(timeout_minutes=5)
+ except NoDHCPLeaseError:
+ continue
+
+ with events.ReportEventStack(
+ name="get-reprovision-data-from-imds",
+ description="get reprovision data from imds",
+ parent=azure_ds_reporter,
+ ):
+ try:
+ reprovision_data = readurl(
+ url,
+ timeout=IMDS_TIMEOUT_IN_SECONDS,
+ headers=headers,
+ exception_cb=exc_cb,
+ infinite=True,
+ log_req_resp=False,
+ ).contents
+ except UrlError:
+ self._teardown_ephemeral_networking()
+ continue
+
+ report_diagnostic_event(
+ "attempted dhcp %d times after reuse" % dhcp_attempts,
+ logger_func=LOG.debug,
+ )
+ report_diagnostic_event(
+ "polled imds %d times after reuse" % self.imds_poll_counter,
+ logger_func=LOG.debug,
+ )
+
+ return reprovision_data
@azure_ds_telemetry_reporter
- def _report_failure(self, description=None) -> bool:
+ def _report_failure(self, description: Optional[str] = None) -> bool:
"""Tells the Azure fabric that provisioning has failed.
@param description: A description of the error encountered.
@return: The success status of sending the failure signal.
"""
- unknown_245_key = 'unknown-245'
-
- try:
- if (self.distro.networking.is_up(self.fallback_interface) and
- getattr(self, '_ephemeral_dhcp_ctx', None) and
- getattr(self._ephemeral_dhcp_ctx, 'lease', None) and
- unknown_245_key in self._ephemeral_dhcp_ctx.lease):
+ if self._is_ephemeral_networking_up():
+ try:
report_diagnostic_event(
- 'Using cached ephemeral dhcp context '
- 'to report failure to Azure', logger_func=LOG.debug)
+ "Using cached ephemeral dhcp context "
+ "to report failure to Azure",
+ logger_func=LOG.debug,
+ )
report_failure_to_fabric(
- dhcp_opts=self._ephemeral_dhcp_ctx.lease[unknown_245_key],
- description=description)
- self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral
+ dhcp_opts=self._wireserver_endpoint,
+ description=description,
+ )
return True
- except Exception as e:
- report_diagnostic_event(
- 'Failed to report failure using '
- 'cached ephemeral dhcp context: %s' % e,
- logger_func=LOG.error)
-
- try:
- report_diagnostic_event(
- 'Using new ephemeral dhcp to report failure to Azure',
- logger_func=LOG.debug)
- with EphemeralDHCPv4WithReporting(azure_ds_reporter) as lease:
- report_failure_to_fabric(
- dhcp_opts=lease[unknown_245_key],
- description=description)
- return True
- except Exception as e:
- report_diagnostic_event(
- 'Failed to report failure using new ephemeral dhcp: %s' % e,
- logger_func=LOG.debug)
+ except Exception as e:
+ report_diagnostic_event(
+ "Failed to report failure using "
+ "cached ephemeral dhcp context: %s" % e,
+ logger_func=LOG.error,
+ )
try:
report_diagnostic_event(
- 'Using fallback lease to report failure to Azure')
+ "Using new ephemeral dhcp to report failure to Azure",
+ logger_func=LOG.debug,
+ )
+ self._teardown_ephemeral_networking()
+ try:
+ self._setup_ephemeral_networking(timeout_minutes=20)
+ except NoDHCPLeaseError:
+ # Reporting failure will fail, but it will emit telemetry.
+ pass
report_failure_to_fabric(
- fallback_lease_file=self.dhclient_lease_file,
- description=description)
+ dhcp_opts=self._wireserver_endpoint, description=description
+ )
return True
except Exception as e:
report_diagnostic_event(
- 'Failed to report failure using fallback lease: %s' % e,
- logger_func=LOG.debug)
+ "Failed to report failure using new ephemeral dhcp: %s" % e,
+ logger_func=LOG.debug,
+ )
return False
- def _report_ready(self, lease: dict) -> bool:
+ @azure_ds_telemetry_reporter
+ def _report_ready(
+ self, *, pubkey_info: Optional[List[str]] = None
+ ) -> Optional[List[str]]:
"""Tells the fabric provisioning has completed.
- @param lease: dhcp lease to use for sending the ready signal.
- @return: The success status of sending the ready signal.
+ :param pubkey_info: Fingerprints of keys to request from Wireserver.
+
+ :raises Exception: if failed to report.
+
+ :returns: List of SSH keys, if requested.
"""
try:
- get_metadata_from_fabric(None, lease['unknown-245'])
- return True
+ data = get_metadata_from_fabric(
+ fallback_lease_file=None,
+ dhcp_opts=self._wireserver_endpoint,
+ iso_dev=self._iso_dev,
+ pubkey_info=pubkey_info,
+ )
except Exception as e:
report_diagnostic_event(
"Error communicating with Azure fabric; You may experience "
- "connectivity issues: %s" % e, logger_func=LOG.warning)
- return False
+ "connectivity issues: %s" % e,
+ logger_func=LOG.warning,
+ )
+ raise
- def _should_reprovision_after_nic_attach(self, candidate_metadata) -> bool:
- """Whether or not we should wait for nic attach and then poll
- IMDS for reprovisioning data. Also sets a marker file to poll IMDS.
+ # Reporting ready ejected OVF media, no need to do so again.
+ self._iso_dev = None
+ return data
- The marker file is used for the following scenario: the VM boots into
- wait for nic attach, which we expect to be proceeding infinitely until
- the nic is attached. If for whatever reason the platform moves us to a
- new host (for instance a hardware issue), we need to keep waiting.
- However, since the VM reports ready to the Fabric, we will not attach
- the ISO, thus cloud-init needs to have a way of knowing that it should
- jump back into the waiting mode in order to retrieve the ovf_env.
+ def _ppstype_from_imds(self, imds_md: dict) -> Optional[str]:
+ try:
+ return imds_md["extended"]["compute"]["ppsType"]
+ except Exception as e:
+ report_diagnostic_event(
+ "Could not retrieve pps configuration from IMDS: %s" % e,
+ logger_func=LOG.debug,
+ )
+ return None
- @param candidate_metadata: Metadata obtained from reading ovf-env.
- @return: Whether to reprovision after waiting for nics to be attached.
- """
- if not candidate_metadata:
- return False
- (_md, _userdata_raw, cfg, _files) = candidate_metadata
- path = REPROVISION_NIC_ATTACH_MARKER_FILE
- if (cfg.get('PreprovisionedVMType', None) == "Savable" or
- os.path.isfile(path)):
- if not os.path.isfile(path):
- LOG.info("Creating a marker file to wait for nic attach: %s",
- path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
- return True
- return False
+ def _determine_pps_type(self, ovf_cfg: dict, imds_md: dict) -> PPSType:
+ """Determine PPS type using OVF, IMDS data, and reprovision marker."""
+ if os.path.isfile(REPROVISION_MARKER_FILE):
+ pps_type = PPSType.UNKNOWN
+ elif (
+ ovf_cfg.get("PreprovisionedVMType", None) == PPSType.SAVABLE.value
+ or self._ppstype_from_imds(imds_md) == PPSType.SAVABLE.value
+ ):
+ pps_type = PPSType.SAVABLE
+ elif (
+ ovf_cfg.get("PreprovisionedVm") is True
+ or ovf_cfg.get("PreprovisionedVMType", None)
+ == PPSType.RUNNING.value
+ or self._ppstype_from_imds(imds_md) == PPSType.RUNNING.value
+ ):
+ pps_type = PPSType.RUNNING
+ else:
+ pps_type = PPSType.NONE
- def _should_reprovision(self, ret):
- """Whether or not we should poll IMDS for reprovisioning data.
- Also sets a marker file to poll IMDS.
-
- The marker file is used for the following scenario: the VM boots into
- this polling loop, which we expect to be proceeding infinitely until
- the VM is picked. If for whatever reason the platform moves us to a
- new host (for instance a hardware issue), we need to keep polling.
- However, since the VM reports ready to the Fabric, we will not attach
- the ISO, thus cloud-init needs to have a way of knowing that it should
- jump back into the polling loop in order to retrieve the ovf_env."""
- if not ret:
- return False
- (_md, _userdata_raw, cfg, _files) = ret
- path = REPROVISION_MARKER_FILE
- if (cfg.get('PreprovisionedVm') is True or
- cfg.get('PreprovisionedVMType', None) == 'Running' or
- os.path.isfile(path)):
- if not os.path.isfile(path):
- LOG.info("Creating a marker file to poll imds: %s",
- path)
- util.write_file(path, "{pid}: {time}\n".format(
- pid=os.getpid(), time=time()))
- return True
- return False
+ report_diagnostic_event(
+ "PPS type: %s" % pps_type.value, logger_func=LOG.info
+ )
+ return pps_type
+ def _write_reprovision_marker(self):
+ """Write reprovision marker file in case system is rebooted."""
+ LOG.info(
+ "Creating a marker file to poll imds: %s", REPROVISION_MARKER_FILE
+ )
+ util.write_file(
+ REPROVISION_MARKER_FILE,
+ "{pid}: {time}\n".format(pid=os.getpid(), time=time()),
+ )
+
+ @azure_ds_telemetry_reporter
def _reprovision(self):
- """Initiate the reprovisioning workflow."""
+ """Initiate the reprovisioning workflow.
+
+ Ephemeral networking is up upon successful reprovisioning.
+ """
contents = self._poll_imds()
with events.ReportEventStack(
- name="reprovisioning-read-azure-ovf",
- description="read azure ovf during reprovisioning",
- parent=azure_ds_reporter):
+ name="reprovisioning-read-azure-ovf",
+ description="read azure ovf during reprovisioning",
+ parent=azure_ds_reporter,
+ ):
md, ud, cfg = read_azure_ovf(contents)
- return (md, ud, cfg, {'ovf-env.xml': contents})
+ return (md, ud, cfg, {"ovf-env.xml": contents})
@azure_ds_telemetry_reporter
- def _negotiate(self):
- """Negotiate with fabric and return data from it.
+ def _determine_wireserver_pubkey_info(
+ self, *, cfg: dict, imds_md: dict
+ ) -> Optional[List[str]]:
+ """Determine the fingerprints we need to retrieve from Wireserver.
- On success, returns a dictionary including 'public_keys'.
- On failure, returns False.
+ :return: List of keys to request from Wireserver, if any, else None.
"""
-
- if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN:
- self.bounce_network_with_azure_hostname()
-
- pubkey_info = None
- try:
- public_keys = self.metadata['imds']['compute']['publicKeys']
- LOG.debug(
- 'Successfully retrieved %s key(s) from IMDS',
- len(public_keys)
- if public_keys is not None
- else 0
- )
- except KeyError:
- LOG.debug(
- 'Unable to retrieve SSH keys from IMDS during '
- 'negotiation, falling back to OVF'
- )
- pubkey_info = self.cfg.get('_pubkeys', None)
-
- metadata_func = partial(get_metadata_from_fabric,
- fallback_lease_file=self.
- dhclient_lease_file,
- pubkey_info=pubkey_info)
- else:
- metadata_func = self.get_metadata_from_agent
-
- LOG.debug("negotiating with fabric via agent command %s",
- self.ds_cfg['agent_command'])
+ pubkey_info: Optional[List[str]] = None
try:
- fabric_data = metadata_func()
- except Exception as e:
- report_diagnostic_event(
- "Error communicating with Azure fabric; You may experience "
- "connectivity issues: %s" % e, logger_func=LOG.warning)
- return False
+ self._get_public_keys_from_imds(imds_md)
+ except (KeyError, ValueError):
+ pubkey_info = cfg.get("_pubkeys", None)
+ log_msg = "Retrieved {} fingerprints from OVF".format(
+ len(pubkey_info) if pubkey_info is not None else 0
+ )
+ report_diagnostic_event(log_msg, logger_func=LOG.debug)
+ return pubkey_info
+ def _cleanup_markers(self):
+ """Cleanup any marker files."""
util.del_file(REPORTED_READY_MARKER_FILE)
util.del_file(REPROVISION_MARKER_FILE)
- util.del_file(REPROVISION_NIC_ATTACH_MARKER_FILE)
util.del_file(REPROVISION_NIC_DETACHED_MARKER_FILE)
- return fabric_data
@azure_ds_telemetry_reporter
def activate(self, cfg, is_new_instance):
try:
- address_ephemeral_resize(is_new_instance=is_new_instance,
- preserve_ntfs=self.ds_cfg.get(
- DS_CFG_KEY_PRESERVE_NTFS, False))
+ address_ephemeral_resize(
+ is_new_instance=is_new_instance,
+ preserve_ntfs=self.ds_cfg.get(DS_CFG_KEY_PRESERVE_NTFS, False),
+ )
finally:
- push_log_to_kvp(self.sys_cfg['def_log_file'])
+ push_log_to_kvp(self.sys_cfg["def_log_file"])
return
@property
def availability_zone(self):
- return self.metadata.get(
- 'imds', {}).get('compute', {}).get('platformFaultDomain')
+ return (
+ self.metadata.get("imds", {})
+ .get("compute", {})
+ .get("platformFaultDomain")
+ )
@property
def network_config(self):
"""Generate a network config like net.generate_fallback_network() with
- the following exceptions.
+ the following exceptions.
- 1. Probe the drivers of the net-devices present and inject them in
- the network configuration under params: driver: <driver> value
- 2. Generate a fallback network config that does not include any of
- the blacklisted devices.
+ 1. Probe the drivers of the net-devices present and inject them in
+ the network configuration under params: driver: <driver> value
+ 2. Generate a fallback network config that does not include any of
+ the blacklisted devices.
"""
if not self._network_config or self._network_config == sources.UNSET:
- if self.ds_cfg.get('apply_network_config'):
+ if self.ds_cfg.get("apply_network_config"):
nc_src = self._metadata_imds
else:
nc_src = None
@@ -1343,7 +1547,103 @@ class DataSourceAzure(sources.DataSource):
@property
def region(self):
- return self.metadata.get('imds', {}).get('compute', {}).get('location')
+ return self.metadata.get("imds", {}).get("compute", {}).get("location")
+
+ @azure_ds_telemetry_reporter
+ def validate_imds_network_metadata(self, imds_md: dict) -> bool:
+ """Validate IMDS network config and report telemetry for errors."""
+ local_macs = get_hv_netvsc_macs_normalized()
+
+ try:
+ network_config = imds_md["network"]
+ imds_macs = [
+ normalize_mac_address(i["macAddress"])
+ for i in network_config["interface"]
+ ]
+ except KeyError:
+ report_diagnostic_event(
+ "IMDS network metadata has incomplete configuration: %r"
+ % imds_md.get("network"),
+ logger_func=LOG.warning,
+ )
+ return False
+
+ missing_macs = [m for m in local_macs if m not in imds_macs]
+ if not missing_macs:
+ return True
+
+ report_diagnostic_event(
+ "IMDS network metadata is missing configuration for NICs %r: %r"
+ % (missing_macs, network_config),
+ logger_func=LOG.warning,
+ )
+
+ if not self._ephemeral_dhcp_ctx or not self._ephemeral_dhcp_ctx.iface:
+ # No primary interface to check against.
+ return False
+
+ primary_mac = net.get_interface_mac(self._ephemeral_dhcp_ctx.iface)
+ if not primary_mac or not isinstance(primary_mac, str):
+ # Unexpected data for primary interface.
+ return False
+
+ primary_mac = normalize_mac_address(primary_mac)
+ if primary_mac in missing_macs:
+ report_diagnostic_event(
+ "IMDS network metadata is missing primary NIC %r: %r"
+ % (primary_mac, network_config),
+ logger_func=LOG.warning,
+ )
+
+ return False
+
+
+def _username_from_imds(imds_data):
+ try:
+ return imds_data["compute"]["osProfile"]["adminUsername"]
+ except KeyError:
+ return None
+
+
+def _userdata_from_imds(imds_data):
+ try:
+ return imds_data["compute"]["userData"]
+ except KeyError:
+ return None
+
+
+def _hostname_from_imds(imds_data):
+ try:
+ return imds_data["compute"]["osProfile"]["computerName"]
+ except KeyError:
+ return None
+
+
+def _disable_password_from_imds(imds_data):
+ try:
+ return (
+ imds_data["compute"]["osProfile"]["disablePasswordAuthentication"]
+ == "true"
+ )
+ except KeyError:
+ return None
+
+
+def _key_is_openssh_formatted(key):
+ """
+ Validate whether or not the key is OpenSSH-formatted.
+ """
+ # See https://bugs.launchpad.net/cloud-init/+bug/1910835
+ if "\r\n" in key.strip():
+ return False
+
+ parser = ssh_util.AuthKeyLineParser()
+ try:
+ akl = parser.parse(key)
+ except TypeError:
+ return False
+
+ return akl.keytype is not None
def _partitions_on_device(devpath, maxnum=16):
@@ -1362,7 +1662,7 @@ def _partitions_on_device(devpath, maxnum=16):
@azure_ds_telemetry_reporter
def _has_ntfs_filesystem(devpath):
ntfs_devices = util.find_devs_with("TYPE=ntfs", no_cache=True)
- LOG.debug('ntfs_devices found = %s', ntfs_devices)
+ LOG.debug("ntfs_devices found = %s", ntfs_devices)
return os.path.realpath(devpath) in ntfs_devices
@@ -1386,24 +1686,29 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
If cloud-init cannot mount the disk to check for data, destruction
will be allowed, unless the dscfg key is set."""
if preserve_ntfs:
- msg = ('config says to never destroy NTFS (%s.%s), skipping checks' %
- (".".join(DS_CFG_PATH), DS_CFG_KEY_PRESERVE_NTFS))
+ msg = "config says to never destroy NTFS (%s.%s), skipping checks" % (
+ ".".join(DS_CFG_PATH),
+ DS_CFG_KEY_PRESERVE_NTFS,
+ )
return False, msg
if not os.path.exists(devpath):
- return False, 'device %s does not exist' % devpath
+ return False, "device %s does not exist" % devpath
- LOG.debug('Resolving realpath of %s -> %s', devpath,
- os.path.realpath(devpath))
+ LOG.debug(
+ "Resolving realpath of %s -> %s", devpath, os.path.realpath(devpath)
+ )
# devpath of /dev/sd[a-z] or /dev/disk/cloud/azure_resource
# where partitions are "<devpath>1" or "<devpath>-part1" or "<devpath>p1"
partitions = _partitions_on_device(devpath)
if len(partitions) == 0:
- return False, 'device %s was not partitioned' % devpath
+ return False, "device %s was not partitioned" % devpath
elif len(partitions) > 2:
- msg = ('device %s had 3 or more partitions: %s' %
- (devpath, ' '.join([p[1] for p in partitions])))
+ msg = "device %s had 3 or more partitions: %s" % (
+ devpath,
+ " ".join([p[1] for p in partitions]),
+ )
return False, msg
elif len(partitions) == 2:
cand_part, cand_path = partitions[1]
@@ -1411,66 +1716,78 @@ def can_dev_be_reformatted(devpath, preserve_ntfs):
cand_part, cand_path = partitions[0]
if not _has_ntfs_filesystem(cand_path):
- msg = ('partition %s (%s) on device %s was not ntfs formatted' %
- (cand_part, cand_path, devpath))
+ msg = "partition %s (%s) on device %s was not ntfs formatted" % (
+ cand_part,
+ cand_path,
+ devpath,
+ )
return False, msg
@azure_ds_telemetry_reporter
def count_files(mp):
- ignored = set(['dataloss_warning_readme.txt'])
+ ignored = set(["dataloss_warning_readme.txt"])
return len([f for f in os.listdir(mp) if f.lower() not in ignored])
- bmsg = ('partition %s (%s) on device %s was ntfs formatted' %
- (cand_part, cand_path, devpath))
+ bmsg = "partition %s (%s) on device %s was ntfs formatted" % (
+ cand_part,
+ cand_path,
+ devpath,
+ )
with events.ReportEventStack(
name="mount-ntfs-and-count",
description="mount-ntfs-and-count",
- parent=azure_ds_reporter
+ parent=azure_ds_reporter,
) as evt:
try:
- file_count = util.mount_cb(cand_path, count_files, mtype="ntfs",
- update_env_for_mount={'LANG': 'C'})
+ file_count = util.mount_cb(
+ cand_path,
+ count_files,
+ mtype="ntfs",
+ update_env_for_mount={"LANG": "C"},
+ )
except util.MountFailedError as e:
evt.description = "cannot mount ntfs"
if "unknown filesystem type 'ntfs'" in str(e):
- return True, (bmsg + ' but this system cannot mount NTFS,'
- ' assuming there are no important files.'
- ' Formatting allowed.')
- return False, bmsg + ' but mount of %s failed: %s' % (cand_part, e)
+ return (
+ True,
+ (
+ bmsg + " but this system cannot mount NTFS,"
+ " assuming there are no important files."
+ " Formatting allowed."
+ ),
+ )
+ return False, bmsg + " but mount of %s failed: %s" % (cand_part, e)
if file_count != 0:
evt.description = "mounted and counted %d files" % file_count
- LOG.warning("it looks like you're using NTFS on the ephemeral"
- " disk, to ensure that filesystem does not get wiped,"
- " set %s.%s in config", '.'.join(DS_CFG_PATH),
- DS_CFG_KEY_PRESERVE_NTFS)
- return False, bmsg + ' but had %d files on it.' % file_count
+ LOG.warning(
+ "it looks like you're using NTFS on the ephemeral"
+ " disk, to ensure that filesystem does not get wiped,"
+ " set %s.%s in config",
+ ".".join(DS_CFG_PATH),
+ DS_CFG_KEY_PRESERVE_NTFS,
+ )
+ return False, bmsg + " but had %d files on it." % file_count
- return True, bmsg + ' and had no important files. Safe for reformatting.'
+ return True, bmsg + " and had no important files. Safe for reformatting."
@azure_ds_telemetry_reporter
-def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
- is_new_instance=False, preserve_ntfs=False):
- # wait for ephemeral disk to come up
- naplen = .2
- with events.ReportEventStack(
- name="wait-for-ephemeral-disk",
- description="wait for ephemeral disk",
- parent=azure_ds_reporter
- ):
- missing = util.wait_for_files([devpath],
- maxwait=maxwait,
- naplen=naplen,
- log_pre="Azure ephemeral disk: ")
-
- if missing:
- report_diagnostic_event(
- "ephemeral device '%s' did not appear after %d seconds." %
- (devpath, maxwait),
- logger_func=LOG.warning)
- return
+def address_ephemeral_resize(
+ devpath=RESOURCE_DISK_PATH, is_new_instance=False, preserve_ntfs=False
+):
+ if not os.path.exists(devpath):
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' does not exist." % devpath,
+ logger_func=LOG.debug,
+ )
+ return
+ else:
+ report_diagnostic_event(
+ "Ephemeral resource disk '%s' exists." % devpath,
+ logger_func=LOG.debug,
+ )
result = False
msg = None
@@ -1483,94 +1800,32 @@ def address_ephemeral_resize(devpath=RESOURCE_DISK_PATH, maxwait=120,
if not result:
return
- for mod in ['disk_setup', 'mounts']:
- sempath = '/var/lib/cloud/instance/sem/config_' + mod
+ for mod in ["disk_setup", "mounts"]:
+ sempath = "/var/lib/cloud/instance/sem/config_" + mod
bmsg = 'Marker "%s" for module "%s"' % (sempath, mod)
if os.path.exists(sempath):
try:
os.unlink(sempath)
- LOG.debug('%s removed.', bmsg)
+ LOG.debug("%s removed.", bmsg)
except Exception as e:
# python3 throws FileNotFoundError, python2 throws OSError
- LOG.warning('%s: remove failed! (%s)', bmsg, e)
+ LOG.warning("%s: remove failed! (%s)", bmsg, e)
else:
- LOG.debug('%s did not exist.', bmsg)
+ LOG.debug("%s did not exist.", bmsg)
return
@azure_ds_telemetry_reporter
-def perform_hostname_bounce(hostname, cfg, prev_hostname):
- # set the hostname to 'hostname' if it is not already set to that.
- # then, if policy is not off, bounce the interface using command
- # Returns True if the network was bounced, False otherwise.
- command = cfg['command']
- interface = cfg['interface']
- policy = cfg['policy']
-
- msg = ("hostname=%s policy=%s interface=%s" %
- (hostname, policy, interface))
- env = os.environ.copy()
- env['interface'] = interface
- env['hostname'] = hostname
- env['old_hostname'] = prev_hostname
-
- if command == "builtin":
- if util.is_FreeBSD():
- command = BOUNCE_COMMAND_FREEBSD
- elif subp.which('ifup'):
- command = BOUNCE_COMMAND_IFUP
- else:
- LOG.debug(
- "Skipping network bounce: ifupdown utils aren't present.")
- # Don't bounce as networkd handles hostname DDNS updates
- return False
- LOG.debug("pubhname: publishing hostname [%s]", msg)
- shell = not isinstance(command, (list, tuple))
- # capture=False, see comments in bug 1202758 and bug 1206164.
- util.log_time(logfunc=LOG.debug, msg="publishing hostname",
- get_uptime=True, func=subp.subp,
- kwargs={'args': command, 'shell': shell, 'capture': False,
- 'env': env})
- return True
-
-
-@azure_ds_telemetry_reporter
-def crtfile_to_pubkey(fname, data=None):
- pipeline = ('openssl x509 -noout -pubkey < "$0" |'
- 'ssh-keygen -i -m PKCS8 -f /dev/stdin')
- (out, _err) = subp.subp(['sh', '-c', pipeline, fname],
- capture=True, data=data)
- return out.rstrip()
-
-
-@azure_ds_telemetry_reporter
-def pubkeys_from_crt_files(flist):
- pubkeys = []
- errors = []
- for fname in flist:
- try:
- pubkeys.append(crtfile_to_pubkey(fname))
- except subp.ProcessExecutionError:
- errors.append(fname)
-
- if errors:
- report_diagnostic_event(
- "failed to convert the crt files to pubkey: %s" % errors,
- logger_func=LOG.warning)
-
- return pubkeys
-
-
-@azure_ds_telemetry_reporter
def write_files(datadir, files, dirmode=None):
-
def _redact_password(cnt, fname):
"""Azure provides the UserPassword in plain text. So we redact it"""
try:
root = ET.fromstring(cnt)
for elem in root.iter():
- if ('UserPassword' in elem.tag and
- elem.text != DEF_PASSWD_REDACTION):
+ if (
+ "UserPassword" in elem.tag
+ and elem.text != DEF_PASSWD_REDACTION
+ ):
elem.text = DEF_PASSWD_REDACTION
return ET.tostring(root)
except Exception:
@@ -1584,21 +1839,11 @@ def write_files(datadir, files, dirmode=None):
util.ensure_dir(datadir, dirmode)
for (name, content) in files.items():
fname = os.path.join(datadir, name)
- if 'ovf-env.xml' in name:
+ if "ovf-env.xml" in name:
content = _redact_password(content, fname)
util.write_file(filename=fname, content=content, mode=0o600)
-@azure_ds_telemetry_reporter
-def invoke_agent(cmd):
- # this is a function itself to simplify patching it for test
- if cmd:
- LOG.debug("invoking agent: %s", cmd)
- subp.subp(cmd, shell=(not isinstance(cmd, list)))
- else:
- LOG.debug("not invoking agent")
-
-
def find_child(node, filter_func):
ret = []
if not node.hasChildNodes():
@@ -1626,8 +1871,9 @@ def load_azure_ovf_pubkeys(sshnode):
if len(results) == 0:
return []
if len(results) > 1:
- raise BrokenAzureDataSource("Multiple 'PublicKeys'(%s) in SSH node" %
- len(results))
+ raise BrokenAzureDataSource(
+ "Multiple 'PublicKeys'(%s) in SSH node" % len(results)
+ )
pubkeys_node = results[0]
pubkeys = find_child(pubkeys_node, lambda n: n.localName == "PublicKey")
@@ -1642,7 +1888,7 @@ def load_azure_ovf_pubkeys(sshnode):
if not pk_node.hasChildNodes():
continue
- cur = {'fingerprint': "", 'path': "", 'value': ""}
+ cur = {"fingerprint": "", "path": "", "value": ""}
for child in pk_node.childNodes:
if child.nodeType == text_node or not child.localName:
continue
@@ -1652,8 +1898,10 @@ def load_azure_ovf_pubkeys(sshnode):
if name not in cur.keys():
continue
- if (len(child.childNodes) != 1 or
- child.childNodes[0].nodeType != text_node):
+ if (
+ len(child.childNodes) != 1
+ or child.childNodes[0].nodeType != text_node
+ ):
continue
cur[name] = child.childNodes[0].wholeText.strip()
@@ -1671,33 +1919,37 @@ def read_azure_ovf(contents):
report_diagnostic_event(error_str, logger_func=LOG.warning)
raise BrokenAzureDataSource(error_str) from e
- results = find_child(dom.documentElement,
- lambda n: n.localName == "ProvisioningSection")
+ results = find_child(
+ dom.documentElement, lambda n: n.localName == "ProvisioningSection"
+ )
if len(results) == 0:
raise NonAzureDataSource("No ProvisioningSection")
if len(results) > 1:
- raise BrokenAzureDataSource("found '%d' ProvisioningSection items" %
- len(results))
+ raise BrokenAzureDataSource(
+ "found '%d' ProvisioningSection items" % len(results)
+ )
provSection = results[0]
- lpcs_nodes = find_child(provSection,
- lambda n:
- n.localName == "LinuxProvisioningConfigurationSet")
+ lpcs_nodes = find_child(
+ provSection,
+ lambda n: n.localName == "LinuxProvisioningConfigurationSet",
+ )
if len(lpcs_nodes) == 0:
raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
if len(lpcs_nodes) > 1:
- raise BrokenAzureDataSource("found '%d' %ss" %
- (len(lpcs_nodes),
- "LinuxProvisioningConfigurationSet"))
+ raise BrokenAzureDataSource(
+ "found '%d' %ss"
+ % (len(lpcs_nodes), "LinuxProvisioningConfigurationSet")
+ )
lpcs = lpcs_nodes[0]
if not lpcs.hasChildNodes():
raise BrokenAzureDataSource("no child nodes of configuration set")
- md_props = 'seedfrom'
- md = {'azure_data': {}}
+ md_props = "seedfrom"
+ md: Dict[str, Any] = {"azure_data": {}}
cfg = {}
ud = ""
password = None
@@ -1711,8 +1963,10 @@ def read_azure_ovf(contents):
simple = False
value = ""
- if (len(child.childNodes) == 1 and
- child.childNodes[0].nodeType == dom.TEXT_NODE):
+ if (
+ len(child.childNodes) == 1
+ and child.childNodes[0].nodeType == dom.TEXT_NODE
+ ):
simple = True
value = child.childNodes[0].wholeText
@@ -1721,8 +1975,8 @@ def read_azure_ovf(contents):
# we accept either UserData or CustomData. If both are present
# then behavior is undefined.
if name == "userdata" or name == "customdata":
- if attrs.get('encoding') in (None, "base64"):
- ud = base64.b64decode(''.join(value.split()))
+ if attrs.get("encoding") in (None, "base64"):
+ ud = base64.b64decode("".join(value.split()))
else:
ud = value
elif name == "username":
@@ -1730,36 +1984,36 @@ def read_azure_ovf(contents):
elif name == "userpassword":
password = value
elif name == "hostname":
- md['local-hostname'] = value
+ md["local-hostname"] = value
elif name == "dscfg":
- if attrs.get('encoding') in (None, "base64"):
- dscfg = base64.b64decode(''.join(value.split()))
+ if attrs.get("encoding") in (None, "base64"):
+ dscfg = base64.b64decode("".join(value.split()))
else:
dscfg = value
- cfg['datasource'] = {DS_NAME: util.load_yaml(dscfg, default={})}
+ cfg["datasource"] = {DS_NAME: util.load_yaml(dscfg, default={})}
elif name == "ssh":
- cfg['_pubkeys'] = load_azure_ovf_pubkeys(child)
+ cfg["_pubkeys"] = load_azure_ovf_pubkeys(child)
elif name == "disablesshpasswordauthentication":
- cfg['ssh_pwauth'] = util.is_false(value)
+ cfg["ssh_pwauth"] = util.is_false(value)
elif simple:
if name in md_props:
md[name] = value
else:
- md['azure_data'][name] = value
+ md["azure_data"][name] = value
defuser = {}
if username:
- defuser['name'] = username
+ defuser["name"] = username
if password:
- defuser['lock_passwd'] = False
+ defuser["lock_passwd"] = False
if DEF_PASSWD_REDACTION != password:
- defuser['passwd'] = cfg['password'] = encrypt_pass(password)
+ defuser["passwd"] = cfg["password"] = encrypt_pass(password)
if defuser:
- cfg['system_info'] = {'default_user': defuser}
+ cfg["system_info"] = {"default_user": defuser}
- if 'ssh_pwauth' not in cfg and password:
- cfg['ssh_pwauth'] = True
+ if "ssh_pwauth" not in cfg and password:
+ cfg["ssh_pwauth"] = True
preprovisioning_cfg = _get_preprovisioning_cfgs(dom)
cfg = util.mergemanydict([cfg, preprovisioning_cfg])
@@ -1785,20 +2039,18 @@ def _get_preprovisioning_cfgs(dom):
More specifically, this will never happen:
- PreprovisionedVm=True and PreprovisionedVMType=Savable
"""
- cfg = {
- "PreprovisionedVm": False,
- "PreprovisionedVMType": None
- }
+ cfg = {"PreprovisionedVm": False, "PreprovisionedVMType": None}
platform_settings_section = find_child(
- dom.documentElement,
- lambda n: n.localName == "PlatformSettingsSection")
+ dom.documentElement, lambda n: n.localName == "PlatformSettingsSection"
+ )
if not platform_settings_section or len(platform_settings_section) == 0:
LOG.debug("PlatformSettingsSection not found")
return cfg
platform_settings = find_child(
platform_settings_section[0],
- lambda n: n.localName == "PlatformSettings")
+ lambda n: n.localName == "PlatformSettings",
+ )
if not platform_settings or len(platform_settings) == 0:
LOG.debug("PlatformSettings not found")
return cfg
@@ -1807,10 +2059,12 @@ def _get_preprovisioning_cfgs(dom):
# platform has removed PreprovisionedVm and only surfaces
# PreprovisionedVMType.
cfg["PreprovisionedVm"] = _get_preprovisionedvm_cfg_value(
- platform_settings)
+ platform_settings
+ )
cfg["PreprovisionedVMType"] = _get_preprovisionedvmtype_cfg_value(
- platform_settings)
+ platform_settings
+ )
return cfg
@@ -1822,16 +2076,18 @@ def _get_preprovisionedvm_cfg_value(platform_settings):
# platform has removed PreprovisionedVm and only surfaces
# PreprovisionedVMType.
preprovisionedVmVal = find_child(
- platform_settings[0],
- lambda n: n.localName == "PreprovisionedVm")
+ platform_settings[0], lambda n: n.localName == "PreprovisionedVm"
+ )
if not preprovisionedVmVal or len(preprovisionedVmVal) == 0:
LOG.debug("PreprovisionedVm not found")
return preprovisionedVm
preprovisionedVm = util.translate_bool(
- preprovisionedVmVal[0].firstChild.nodeValue)
+ preprovisionedVmVal[0].firstChild.nodeValue
+ )
report_diagnostic_event(
- "PreprovisionedVm: %s" % preprovisionedVm, logger_func=LOG.info)
+ "PreprovisionedVm: %s" % preprovisionedVm, logger_func=LOG.info
+ )
return preprovisionedVm
@@ -1850,18 +2106,21 @@ def _get_preprovisionedvmtype_cfg_value(platform_settings):
# Once assigned to customer, the customer-requested nics are
# hot-attached to it and reprovision happens like today.
preprovisionedVMTypeVal = find_child(
- platform_settings[0],
- lambda n: n.localName == "PreprovisionedVMType")
- if (not preprovisionedVMTypeVal or len(preprovisionedVMTypeVal) == 0 or
- preprovisionedVMTypeVal[0].firstChild is None):
+ platform_settings[0], lambda n: n.localName == "PreprovisionedVMType"
+ )
+ if (
+ not preprovisionedVMTypeVal
+ or len(preprovisionedVMTypeVal) == 0
+ or preprovisionedVMTypeVal[0].firstChild is None
+ ):
LOG.debug("PreprovisionedVMType not found")
return preprovisionedVMType
preprovisionedVMType = preprovisionedVMTypeVal[0].firstChild.nodeValue
report_diagnostic_event(
- "PreprovisionedVMType: %s" % preprovisionedVMType,
- logger_func=LOG.info)
+ "PreprovisionedVMType: %s" % preprovisionedVMType, logger_func=LOG.info
+ )
return preprovisionedVMType
@@ -1885,7 +2144,7 @@ def _check_freebsd_cdrom(cdrom_dev):
@azure_ds_telemetry_reporter
def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
"""Return content random seed file if available, otherwise,
- return None."""
+ return None."""
# azure / hyper-v provides random data here
# now update ds_cfg to reflect contents pass in config
if source is None:
@@ -1901,24 +2160,22 @@ def _get_random_seed(source=PLATFORM_ENTROPY_SOURCE):
# string. Same number of bits of entropy, just with 25% more zeroes.
# There's no need to undo this base64-encoding when the random seed is
# actually used in cc_seed_random.py.
- seed = base64.b64encode(seed).decode()
-
- return seed
+ return base64.b64encode(seed).decode() # type: ignore
@azure_ds_telemetry_reporter
-def list_possible_azure_ds_devs():
- devlist = []
+def list_possible_azure_ds(seed, cache_dir):
+ yield seed
+ yield DEFAULT_PROVISIONING_ISO_DEV
if util.is_FreeBSD():
cdrom_dev = "/dev/cd0"
if _check_freebsd_cdrom(cdrom_dev):
- return [cdrom_dev]
+ yield cdrom_dev
else:
for fstype in ("iso9660", "udf"):
- devlist.extend(util.find_devs_with("TYPE=%s" % fstype))
-
- devlist.sort(reverse=True)
- return devlist
+ yield from util.find_devs_with("TYPE=%s" % fstype)
+ if cache_dir:
+ yield cache_dir
@azure_ds_telemetry_reporter
@@ -1932,7 +2189,7 @@ def load_azure_ds_dir(source_dir):
contents = fp.read()
md, ud, cfg = read_azure_ovf(contents)
- return (md, ud, cfg, {'ovf-env.xml': contents})
+ return (md, ud, cfg, {"ovf-env.xml": contents})
@azure_ds_telemetry_reporter
@@ -1949,12 +2206,14 @@ def parse_network_config(imds_metadata) -> dict:
return _generate_network_config_from_imds_metadata(imds_metadata)
except Exception as e:
LOG.error(
- 'Failed generating network config '
- 'from IMDS network metadata: %s', str(e))
+ "Failed generating network config "
+ "from IMDS network metadata: %s",
+ str(e),
+ )
try:
return _generate_network_config_from_fallback_config()
except Exception as e:
- LOG.error('Failed generating fallback network config: %s', str(e))
+ LOG.error("Failed generating fallback network config: %s", str(e))
return {}
@@ -1966,51 +2225,69 @@ def _generate_network_config_from_imds_metadata(imds_metadata) -> dict:
@param: imds_metadata: Dict of content read from IMDS network service.
@return: Dictionary containing network version 2 standard configuration.
"""
- netconfig = {'version': 2, 'ethernets': {}}
- network_metadata = imds_metadata['network']
- for idx, intf in enumerate(network_metadata['interface']):
+ netconfig: Dict[str, Any] = {"version": 2, "ethernets": {}}
+ network_metadata = imds_metadata["network"]
+ for idx, intf in enumerate(network_metadata["interface"]):
+ has_ip_address = False
# First IPv4 and/or IPv6 address will be obtained via DHCP.
# Any additional IPs of each type will be set as static
# addresses.
- nicname = 'eth{idx}'.format(idx=idx)
- dhcp_override = {'route-metric': (idx + 1) * 100}
- dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override,
- 'dhcp6': False}
- for addr_type in ('ipv4', 'ipv6'):
- addresses = intf.get(addr_type, {}).get('ipAddress', [])
- if addr_type == 'ipv4':
- default_prefix = '24'
+ nicname = "eth{idx}".format(idx=idx)
+ dhcp_override = {"route-metric": (idx + 1) * 100}
+ dev_config: Dict[str, Any] = {
+ "dhcp4": True,
+ "dhcp4-overrides": dhcp_override,
+ "dhcp6": False,
+ }
+ for addr_type in ("ipv4", "ipv6"):
+ addresses = intf.get(addr_type, {}).get("ipAddress", [])
+ # If there are no available IP addresses, then we don't
+ # want to add this interface to the generated config.
+ if not addresses:
+ LOG.debug("No %s addresses found for: %r", addr_type, intf)
+ continue
+ has_ip_address = True
+ if addr_type == "ipv4":
+ default_prefix = "24"
else:
- default_prefix = '128'
+ default_prefix = "128"
if addresses:
- dev_config['dhcp6'] = True
+ dev_config["dhcp6"] = True
# non-primary interfaces should have a higher
# route-metric (cost) so default routes prefer
# primary nic due to lower route-metric value
- dev_config['dhcp6-overrides'] = dhcp_override
+ dev_config["dhcp6-overrides"] = dhcp_override
for addr in addresses[1:]:
# Append static address config for ip > 1
- netPrefix = intf[addr_type]['subnet'][0].get(
- 'prefix', default_prefix)
- privateIp = addr['privateIpAddress']
- if not dev_config.get('addresses'):
- dev_config['addresses'] = []
- dev_config['addresses'].append(
- '{ip}/{prefix}'.format(
- ip=privateIp, prefix=netPrefix))
- if dev_config:
- mac = ':'.join(re.findall(r'..', intf['macAddress']))
- dev_config.update({
- 'match': {'macaddress': mac.lower()},
- 'set-name': nicname
- })
+ netPrefix = intf[addr_type]["subnet"][0].get(
+ "prefix", default_prefix
+ )
+ privateIp = addr["privateIpAddress"]
+ if not dev_config.get("addresses"):
+ dev_config["addresses"] = []
+ dev_config["addresses"].append(
+ "{ip}/{prefix}".format(ip=privateIp, prefix=netPrefix)
+ )
+ if dev_config and has_ip_address:
+ mac = normalize_mac_address(intf["macAddress"])
+ dev_config.update(
+ {"match": {"macaddress": mac.lower()}, "set-name": nicname}
+ )
# With netvsc, we can get two interfaces that
# share the same MAC, so we need to make sure
# our match condition also contains the driver
driver = device_driver(nicname)
- if driver and driver == 'hv_netvsc':
- dev_config['match']['driver'] = driver
- netconfig['ethernets'][nicname] = dev_config
+ if driver and driver == "hv_netvsc":
+ dev_config["match"]["driver"] = driver
+ netconfig["ethernets"][nicname] = dev_config
+ continue
+
+ LOG.debug(
+ "No configuration for: %s (dev_config=%r) (has_ip_address=%r)",
+ nicname,
+ dev_config,
+ has_ip_address,
+ )
return netconfig
@@ -2020,72 +2297,101 @@ def _generate_network_config_from_fallback_config() -> dict:
@return: Dictionary containing network version 2 standard configuration.
"""
- return net.generate_fallback_config(
- blacklist_drivers=BLACKLIST_DRIVERS, config_driver=True)
+ cfg = net.generate_fallback_config(
+ blacklist_drivers=BLACKLIST_DRIVERS, config_driver=True
+ )
+ if cfg is None:
+ return {}
+ return cfg
@azure_ds_telemetry_reporter
-def get_metadata_from_imds(fallback_nic,
- retries,
- md_type=metadata_type.compute):
+def get_metadata_from_imds(
+ retries,
+ md_type=MetadataType.ALL,
+ api_version=IMDS_VER_MIN,
+ exc_cb=retry_on_url_exc,
+ infinite=False,
+):
"""Query Azure's instance metadata service, returning a dictionary.
- If network is not up, setup ephemeral dhcp on fallback_nic to talk to the
- IMDS. For more info on IMDS:
+ For more info on IMDS:
https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service
- @param fallback_nic: String. The name of the nic which requires active
- network in order to query IMDS.
@param retries: The number of retries of the IMDS_URL.
+ @param md_type: Metadata type for IMDS request.
+ @param api_version: IMDS api-version to use in the request.
@return: A dict of instance metadata containing compute and network
info.
"""
- kwargs = {'logfunc': LOG.debug,
- 'msg': 'Crawl of Azure Instance Metadata Service (IMDS)',
- 'func': _get_metadata_from_imds, 'args': (retries, md_type,)}
- if net.is_up(fallback_nic):
+ kwargs = {
+ "logfunc": LOG.debug,
+ "msg": "Crawl of Azure Instance Metadata Service (IMDS)",
+ "func": _get_metadata_from_imds,
+ "args": (retries, exc_cb, md_type, api_version, infinite),
+ }
+ try:
return util.log_time(**kwargs)
- else:
- try:
- with EphemeralDHCPv4WithReporting(
- azure_ds_reporter, fallback_nic):
- return util.log_time(**kwargs)
- except Exception as e:
- report_diagnostic_event(
- "exception while getting metadata: %s" % e,
- logger_func=LOG.warning)
- raise
+ except Exception as e:
+ report_diagnostic_event(
+ "exception while getting metadata: %s" % e,
+ logger_func=LOG.warning,
+ )
+ raise
@azure_ds_telemetry_reporter
-def _get_metadata_from_imds(retries, md_type=metadata_type.compute):
-
- url = md_type.value
+def _get_metadata_from_imds(
+ retries,
+ exc_cb,
+ md_type=MetadataType.ALL,
+ api_version=IMDS_VER_MIN,
+ infinite=False,
+):
+ url = "{}?api-version={}".format(md_type.value, api_version)
headers = {"Metadata": "true"}
+
+ # support for extended metadata begins with 2021-03-01
+ if api_version >= IMDS_EXTENDED_VER_MIN and md_type == MetadataType.ALL:
+ url = url + "&extended=true"
+
try:
response = readurl(
- url, timeout=IMDS_TIMEOUT_IN_SECONDS, headers=headers,
- retries=retries, exception_cb=retry_on_url_exc)
+ url,
+ timeout=IMDS_TIMEOUT_IN_SECONDS,
+ headers=headers,
+ retries=retries,
+ exception_cb=exc_cb,
+ infinite=infinite,
+ )
except Exception as e:
- report_diagnostic_event(
- 'Ignoring IMDS instance metadata. '
- 'Get metadata from IMDS failed: %s' % e,
- logger_func=LOG.warning)
- return {}
+ # pylint:disable=no-member
+ if isinstance(e, UrlError) and e.code == 400:
+ raise
+ else:
+ report_diagnostic_event(
+ "Ignoring IMDS instance metadata. "
+ "Get metadata from IMDS failed: %s" % e,
+ logger_func=LOG.warning,
+ )
+ return {}
try:
from json.decoder import JSONDecodeError
+
json_decode_error = JSONDecodeError
except ImportError:
json_decode_error = ValueError
try:
- return util.load_json(str(response))
+ return util.load_json(response.contents)
except json_decode_error as e:
report_diagnostic_event(
- 'Ignoring non-json IMDS instance metadata response: %s. '
- 'Loading non-json IMDS response failed: %s' % (str(response), e),
- logger_func=LOG.warning)
+ "Ignoring non-json IMDS instance metadata response: %s. "
+ "Loading non-json IMDS response failed: %s"
+ % (response.contents, e),
+ logger_func=LOG.warning,
+ )
return {}
@@ -2115,10 +2421,11 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None):
if os.path.exists(path):
if not logged:
LOG.info(
- 'Removing Ubuntu extended network scripts because'
- ' cloud-init updates Azure network configuration on the'
- ' following event: %s.',
- EventType.BOOT)
+ "Removing Ubuntu extended network scripts because"
+ " cloud-init updates Azure network configuration on the"
+ " following events: %s.",
+ [EventType.BOOT.value, EventType.BOOT_LEGACY.value],
+ )
logged = True
if os.path.isdir(path):
util.del_dir(path)
@@ -2131,15 +2438,15 @@ def _is_platform_viable(seed_dir):
with events.ReportEventStack(
name="check-platform-viability",
description="found azure asset tag",
- parent=azure_ds_reporter
+ parent=azure_ds_reporter,
) as evt:
- asset_tag = dmi.read_dmi_data('chassis-asset-tag')
+ asset_tag = dmi.read_dmi_data("chassis-asset-tag")
if asset_tag == AZURE_CHASSIS_ASSET_TAG:
return True
msg = "Non-Azure DMI asset tag '%s' discovered." % asset_tag
evt.description = msg
report_diagnostic_event(msg, logger_func=LOG.debug)
- if os.path.exists(os.path.join(seed_dir, 'ovf-env.xml')):
+ if os.path.exists(os.path.join(seed_dir, "ovf-env.xml")):
return True
return False
@@ -2157,7 +2464,7 @@ DataSourceAzureNet = DataSourceAzure
# Used to match classes to dependencies
datasources = [
- (DataSourceAzure, (sources.DEP_FILESYSTEM, )),
+ (DataSourceAzure, (sources.DEP_FILESYSTEM,)),
]
@@ -2165,4 +2472,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
index 63435279..426a762e 100644
--- a/cloudinit/sources/DataSourceBigstep.py
+++ b/cloudinit/sources/DataSourceBigstep.py
@@ -7,14 +7,12 @@
import errno
import json
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import sources, url_helper, util
class DataSourceBigstep(sources.DataSource):
- dsname = 'Bigstep'
+ dsname = "Bigstep"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -35,7 +33,7 @@ class DataSourceBigstep(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'metadata (%s)' % get_url_from_file()
+ return "metadata (%s)" % get_url_from_file()
def get_url_from_file():
@@ -61,4 +59,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index f63baf74..de71c3e9 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -4,14 +4,13 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from base64 import b64decode
import re
-
-from cloudinit.cs_utils import Cepko, SERIAL_PORT
+from base64 import b64decode
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import sources
+from cloudinit.cs_utils import SERIAL_PORT, Cepko
LOG = logging.getLogger(__name__)
@@ -24,11 +23,11 @@ class DataSourceCloudSigma(sources.DataSource):
http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
"""
- dsname = 'CloudSigma'
+ dsname = "CloudSigma"
def __init__(self, sys_cfg, distro, paths):
self.cepko = Cepko()
- self.ssh_public_key = ''
+ self.ssh_public_key = ""
sources.DataSource.__init__(self, sys_cfg, distro, paths)
def is_running_in_cloudsigma(self):
@@ -43,7 +42,7 @@ class DataSourceCloudSigma(sources.DataSource):
LOG.debug("system-product-name not available in dmi data")
return False
LOG.debug("detected hypervisor as %s", sys_product_name)
- return 'cloudsigma' in sys_product_name.lower()
+ return "cloudsigma" in sys_product_name.lower()
def _get_data(self):
"""
@@ -56,7 +55,7 @@ class DataSourceCloudSigma(sources.DataSource):
try:
server_context = self.cepko.all().result
- server_meta = server_context['meta']
+ server_meta = server_context["meta"]
except Exception:
# TODO: check for explicit "config on", and then warn
# but since no explicit config is available now, just debug.
@@ -64,41 +63,42 @@ class DataSourceCloudSigma(sources.DataSource):
return False
self.dsmode = self._determine_dsmode(
- [server_meta.get('cloudinit-dsmode')])
+ [server_meta.get("cloudinit-dsmode")]
+ )
if dsmode == sources.DSMODE_DISABLED:
return False
- base64_fields = server_meta.get('base64_fields', '').split(',')
- self.userdata_raw = server_meta.get('cloudinit-user-data', "")
- if 'cloudinit-user-data' in base64_fields:
+ base64_fields = server_meta.get("base64_fields", "").split(",")
+ self.userdata_raw = server_meta.get("cloudinit-user-data", "")
+ if "cloudinit-user-data" in base64_fields:
self.userdata_raw = b64decode(self.userdata_raw)
- if 'cloudinit' in server_context.get('vendor_data', {}):
+ if "cloudinit" in server_context.get("vendor_data", {}):
self.vendordata_raw = server_context["vendor_data"]["cloudinit"]
self.metadata = server_context
- self.ssh_public_key = server_meta['ssh_public_key']
+ self.ssh_public_key = server_meta["ssh_public_key"]
return True
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'cepko (%s)' % SERIAL_PORT
+ return "cepko (%s)" % SERIAL_PORT
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
"""
Cleans up and uses the server's name if the latter is set. Otherwise
the first part from uuid is being used.
"""
- if re.match(r'^[A-Za-z0-9 -_\.]+$', self.metadata['name']):
- return self.metadata['name'][:61]
+ if re.match(r"^[A-Za-z0-9 -_\.]+$", self.metadata["name"]):
+ return self.metadata["name"][:61]
else:
- return self.metadata['uuid'].split('-')[0]
+ return self.metadata["uuid"].split("-")[0]
def get_public_ssh_keys(self):
return [self.ssh_public_key]
def get_instance_id(self):
- return self.metadata['uuid']
+ return self.metadata["uuid"]
# Legacy: Must be present in case we load an old pkl object
@@ -107,7 +107,7 @@ DataSourceCloudSigmaNet = DataSourceCloudSigma
# Used to match classes to dependencies. Since this datasource uses the serial
# port network is not really required, so it's okay to load without it, too.
datasources = [
- (DataSourceCloudSigma, (sources.DEP_FILESYSTEM, )),
+ (DataSourceCloudSigma, (sources.DEP_FILESYSTEM,)),
]
@@ -117,4 +117,5 @@ def get_datasource_list(depends):
"""
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 54810439..a742a5e6 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -13,17 +13,16 @@
# This file is part of cloud-init. See LICENSE file for license information.
import os
-from socket import inet_ntoa, getaddrinfo, gaierror
-from struct import pack
import time
+from socket import gaierror, getaddrinfo, inet_ntoa
+from struct import pack
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
-from cloudinit.net import dhcp
-from cloudinit import sources
+from cloudinit import sources, subp
from cloudinit import url_helper as uhelp
-from cloudinit import subp
from cloudinit import util
+from cloudinit.net import dhcp
LOG = logging.getLogger(__name__)
@@ -47,27 +46,36 @@ class CloudStackPasswordServerClient(object):
# The password server was in the past, a broken HTTP server, but is now
# fixed. wget handles this seamlessly, so it's easier to shell out to
# that rather than write our own handling code.
- output, _ = subp.subp([
- 'wget', '--quiet', '--tries', '3', '--timeout', '20',
- '--output-document', '-', '--header',
- 'DomU_Request: {0}'.format(domu_request),
- '{0}:8080'.format(self.virtual_router_address)
- ])
+ output, _ = subp.subp(
+ [
+ "wget",
+ "--quiet",
+ "--tries",
+ "3",
+ "--timeout",
+ "20",
+ "--output-document",
+ "-",
+ "--header",
+ "DomU_Request: {0}".format(domu_request),
+ "{0}:8080".format(self.virtual_router_address),
+ ]
+ )
return output.strip()
def get_password(self):
- password = self._do_request('send_my_password')
- if password in ['', 'saved_password']:
+ password = self._do_request("send_my_password")
+ if password in ["", "saved_password"]:
return None
- if password == 'bad_request':
- raise RuntimeError('Error when attempting to fetch root password.')
- self._do_request('saved_password')
+ if password == "bad_request":
+ raise RuntimeError("Error when attempting to fetch root password.")
+ self._do_request("saved_password")
return password
class DataSourceCloudStack(sources.DataSource):
- dsname = 'CloudStack'
+ dsname = "CloudStack"
# Setup read_url parameters per get_url_params.
url_max_wait = 120
@@ -75,10 +83,10 @@ class DataSourceCloudStack(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'cs')
+ self.seed_dir = os.path.join(paths.seed_dir, "cs")
# Cloudstack has its metadata/userdata URLs located at
# http://<virtual-router-ip>/latest/
- self.api_ver = 'latest'
+ self.api_ver = "latest"
self.vr_addr = get_vr_address()
if not self.vr_addr:
raise RuntimeError("No virtual router found!")
@@ -91,19 +99,28 @@ class DataSourceCloudStack(sources.DataSource):
if url_params.max_wait_seconds <= 0:
return False
- urls = [uhelp.combine_url(self.metadata_address,
- 'latest/meta-data/instance-id')]
+ urls = [
+ uhelp.combine_url(
+ self.metadata_address, "latest/meta-data/instance-id"
+ )
+ ]
start_time = time.time()
url, _response = uhelp.wait_for_url(
- urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warning)
+ urls=urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ status_cb=LOG.warning,
+ )
if url:
LOG.debug("Using metadata source: '%s'", url)
else:
- LOG.critical(("Giving up on waiting for the metadata from %s"
- " after %s seconds"),
- urls, int(time.time() - start_time))
+ LOG.critical(
+ "Giving up on waiting for the metadata from %s"
+ " after %s seconds",
+ urls,
+ int(time.time() - start_time),
+ )
return bool(url)
@@ -113,8 +130,8 @@ class DataSourceCloudStack(sources.DataSource):
def _get_data(self):
seed_ret = {}
if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
- self.userdata_raw = seed_ret['user-data']
- self.metadata = seed_ret['meta-data']
+ self.userdata_raw = seed_ret["user-data"]
+ self.metadata = seed_ret["meta-data"]
LOG.debug("Using seeded cloudstack data from: %s", self.seed_dir)
return True
try:
@@ -122,45 +139,54 @@ class DataSourceCloudStack(sources.DataSource):
return False
start_time = time.time()
self.userdata_raw = ec2.get_instance_userdata(
- self.api_ver, self.metadata_address)
- self.metadata = ec2.get_instance_metadata(self.api_ver,
- self.metadata_address)
- LOG.debug("Crawl of metadata service took %s seconds",
- int(time.time() - start_time))
+ self.api_ver, self.metadata_address
+ )
+ self.metadata = ec2.get_instance_metadata(
+ self.api_ver, self.metadata_address
+ )
+ LOG.debug(
+ "Crawl of metadata service took %s seconds",
+ int(time.time() - start_time),
+ )
password_client = CloudStackPasswordServerClient(self.vr_addr)
try:
set_password = password_client.get_password()
except Exception:
- util.logexc(LOG,
- 'Failed to fetch password from virtual router %s',
- self.vr_addr)
+ util.logexc(
+ LOG,
+ "Failed to fetch password from virtual router %s",
+ self.vr_addr,
+ )
else:
if set_password:
self.cfg = {
- 'ssh_pwauth': True,
- 'password': set_password,
- 'chpasswd': {
- 'expire': False,
+ "ssh_pwauth": True,
+ "password": set_password,
+ "chpasswd": {
+ "expire": False,
},
}
return True
except Exception:
- util.logexc(LOG, 'Failed fetching from metadata service %s',
- self.metadata_address)
+ util.logexc(
+ LOG,
+ "Failed fetching from metadata service %s",
+ self.metadata_address,
+ )
return False
def get_instance_id(self):
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
@property
def availability_zone(self):
- return self.metadata['availability-zone']
+ return self.metadata["availability-zone"]
def get_data_server():
# Returns the metadataserver from dns
try:
- addrinfo = getaddrinfo("data-server.", 80)
+ addrinfo = getaddrinfo("data-server", 80)
except gaierror:
LOG.debug("DNS Entry data-server not found")
return None
@@ -183,8 +209,11 @@ def get_default_gateway():
def get_dhclient_d():
# find lease files directory
- supported_dirs = ["/var/lib/dhclient", "/var/lib/dhcp",
- "/var/lib/NetworkManager"]
+ supported_dirs = [
+ "/var/lib/dhclient",
+ "/var/lib/dhcp",
+ "/var/lib/NetworkManager",
+ ]
for d in supported_dirs:
if os.path.exists(d) and len(os.listdir(d)) > 0:
LOG.debug("Using %s lease directory", d)
@@ -233,15 +262,18 @@ def get_vr_address():
# Try data-server DNS entry first
latest_address = get_data_server()
if latest_address:
- LOG.debug("Found metadata server '%s' via data-server DNS entry",
- latest_address)
+ LOG.debug(
+ "Found metadata server '%s' via data-server DNS entry",
+ latest_address,
+ )
return latest_address
# Try networkd second...
- latest_address = dhcp.networkd_get_option_from_leases('SERVER_ADDRESS')
+ latest_address = dhcp.networkd_get_option_from_leases("SERVER_ADDRESS")
if latest_address:
- LOG.debug("Found SERVER_ADDRESS '%s' via networkd_leases",
- latest_address)
+ LOG.debug(
+ "Found SERVER_ADDRESS '%s' via networkd_leases", latest_address
+ )
return latest_address
# Try dhcp lease files next...
@@ -275,4 +307,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 62756cf7..f7c58b12 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -9,12 +9,9 @@
import os
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import sources, subp, util
+from cloudinit.event import EventScope, EventType
from cloudinit.net import eni
-
from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform
from cloudinit.sources.helpers import openstack
@@ -22,25 +19,35 @@ LOG = logging.getLogger(__name__)
# Various defaults/constants...
DEFAULT_IID = "iid-dsconfigdrive"
-DEFAULT_MODE = 'pass'
+DEFAULT_MODE = "pass"
DEFAULT_METADATA = {
"instance-id": DEFAULT_IID,
}
-FS_TYPES = ('vfat', 'iso9660')
-LABEL_TYPES = ('config-2', 'CONFIG-2')
-POSSIBLE_MOUNTS = ('sr', 'cd')
-OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS
- for i in range(0, 2)))
+FS_TYPES = ("vfat", "iso9660")
+LABEL_TYPES = ("config-2", "CONFIG-2")
+POSSIBLE_MOUNTS = ("sr", "cd")
+OPTICAL_DEVICES = tuple(
+ ("/dev/%s%s" % (z, i) for z in POSSIBLE_MOUNTS for i in range(0, 2))
+)
class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
- dsname = 'ConfigDrive'
+ dsname = "ConfigDrive"
+
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
super(DataSourceConfigDrive, self).__init__(sys_cfg, distro, paths)
self.source = None
- self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
+ self.seed_dir = os.path.join(paths.seed_dir, "config_drive")
self.version = None
self.ec2_metadata = None
self._network_config = None
@@ -70,15 +77,16 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
util.logexc(LOG, "Failed reading config drive from %s", sdir)
if not found:
- dslist = self.sys_cfg.get('datasource_list')
+ dslist = self.sys_cfg.get("datasource_list")
for dev in find_candidate_devs(dslist=dslist):
mtype = None
if util.is_BSD():
if dev.startswith("/dev/cd"):
mtype = "cd9660"
try:
- results = util.mount_cb(dev, read_config_drive,
- mtype=mtype)
+ results = util.mount_cb(
+ dev, read_config_drive, mtype=mtype
+ )
found = dev
except openstack.NonReadable:
pass
@@ -91,41 +99,49 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
if not found:
return False
- md = results.get('metadata', {})
+ md = results.get("metadata", {})
md = util.mergemanydict([md, DEFAULT_METADATA])
self.dsmode = self._determine_dsmode(
- [results.get('dsmode'), self.ds_cfg.get('dsmode'),
- sources.DSMODE_PASS if results['version'] == 1 else None])
+ [
+ results.get("dsmode"),
+ self.ds_cfg.get("dsmode"),
+ sources.DSMODE_PASS if results["version"] == 1 else None,
+ ]
+ )
if self.dsmode == sources.DSMODE_DISABLED:
return False
prev_iid = get_previous_iid(self.paths)
- cur_iid = md['instance-id']
+ cur_iid = md["instance-id"]
if prev_iid != cur_iid:
# better would be to handle this centrally, allowing
# the datasource to do something on new instance id
# note, networking is only rendered here if dsmode is DSMODE_PASS
# which means "DISABLED, but render files and networking"
- on_first_boot(results, distro=self.distro,
- network=self.dsmode == sources.DSMODE_PASS)
+ on_first_boot(
+ results,
+ distro=self.distro,
+ network=self.dsmode == sources.DSMODE_PASS,
+ )
# This is legacy and sneaky. If dsmode is 'pass' then do not claim
# the datasource was used, even though we did run on_first_boot above.
if self.dsmode == sources.DSMODE_PASS:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self,
- self.dsmode)
+ LOG.debug(
+ "%s: not claiming datasource, dsmode=%s", self, self.dsmode
+ )
return False
self.source = found
self.metadata = md
- self.ec2_metadata = results.get('ec2-metadata')
- self.userdata_raw = results.get('userdata')
- self.version = results['version']
- self.files.update(results.get('files', {}))
+ self.ec2_metadata = results.get("ec2-metadata")
+ self.userdata_raw = results.get("userdata")
+ self.version = results["version"]
+ self.files.update(results.get("files", {}))
- vd = results.get('vendordata')
+ vd = results.get("vendordata")
self.vendordata_pure = vd
try:
self.vendordata_raw = sources.convert_vendordata(vd)
@@ -137,7 +153,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
# obsolete compared to networkdata (from network_data.json) but both
# might be present.
self.network_eni = results.get("network_config")
- self.network_json = results.get('networkdata')
+ self.network_json = results.get("networkdata")
return True
def check_instance_id(self, sys_cfg):
@@ -150,7 +166,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
if self.network_json not in (None, sources.UNSET):
LOG.debug("network config provided via network_json")
self._network_config = openstack.convert_net_json(
- self.network_json, known_macs=self.known_macs)
+ self.network_json, known_macs=self.known_macs
+ )
elif self.network_eni is not None:
self._network_config = eni.convert_eni_data(self.network_eni)
LOG.debug("network config provided via converted eni data")
@@ -160,15 +177,15 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
@property
def platform(self):
- return 'openstack'
+ return "openstack"
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- if self.source.startswith('/dev'):
- subplatform_type = 'config-disk'
+ if self.source.startswith("/dev"):
+ subplatform_type = "config-disk"
else:
- subplatform_type = 'seed-dir'
- return '%s (%s)' % (subplatform_type, self.source)
+ subplatform_type = "seed-dir"
+ return "%s (%s)" % (subplatform_type, self.source)
def read_config_drive(source_dir):
@@ -190,7 +207,7 @@ def get_previous_iid(paths):
# interestingly, for this purpose the "previous" instance-id is the current
# instance-id. cloud-init hasn't moved them over yet as this datasource
# hasn't declared itself found.
- fname = os.path.join(paths.get_cpath('data'), 'instance-id')
+ fname = os.path.join(paths.get_cpath("data"), "instance-id")
try:
return util.load_file(fname).rstrip("\n")
except IOError:
@@ -200,14 +217,15 @@ def get_previous_iid(paths):
def on_first_boot(data, distro=None, network=True):
"""Performs any first-boot actions using data read from a config-drive."""
if not isinstance(data, dict):
- raise TypeError("Config-drive data expected to be a dict; not %s"
- % (type(data)))
+ raise TypeError(
+ "Config-drive data expected to be a dict; not %s" % (type(data))
+ )
if network:
- net_conf = data.get("network_config", '')
+ net_conf = data.get("network_config", "")
if net_conf and distro:
LOG.warning("Updating network interfaces from config drive")
distro.apply_network_config(eni.convert_eni_data(net_conf))
- write_injected_files(data.get('files'))
+ write_injected_files(data.get("files"))
def write_injected_files(files):
@@ -264,12 +282,13 @@ def find_candidate_devs(probe_optical=True, dslist=None):
# combine list of items by putting by-label items first
# followed by fstype items, but with dupes removed
- candidates = (by_label + [d for d in by_fstype if d not in by_label])
+ candidates = by_label + [d for d in by_fstype if d not in by_label]
# We are looking for a block device or partition with necessary label or
# an unpartitioned block device (ex sda, not sda1)
- devices = [d for d in candidates
- if d in by_label or not util.is_partition(d)]
+ devices = [
+ d for d in candidates if d in by_label or not util.is_partition(d)
+ ]
LOG.debug("devices=%s dslist=%s", devices, dslist)
if devices and "IBMCloud" in dslist:
@@ -277,8 +296,11 @@ def find_candidate_devs(probe_optical=True, dslist=None):
ibm_platform, ibm_path = get_ibm_platform()
if ibm_path in devices:
devices.remove(ibm_path)
- LOG.debug("IBMCloud device '%s' (%s) removed from candidate list",
- ibm_path, ibm_platform)
+ LOG.debug(
+ "IBMCloud device '%s' (%s) removed from candidate list",
+ ibm_path,
+ ibm_platform,
+ )
return devices
@@ -296,4 +318,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 5040ce5b..52d3ad26 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -6,16 +6,14 @@
# DigitalOcean Droplet API:
# https://developers.digitalocean.com/documentation/metadata/
-from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import util
-
import cloudinit.sources.helpers.digitalocean as do_helper
+from cloudinit import log as logging
+from cloudinit import sources, util
LOG = logging.getLogger(__name__)
BUILTIN_DS_CONFIG = {
- 'metadata_url': 'http://169.254.169.254/metadata/v1.json',
+ "metadata_url": "http://169.254.169.254/metadata/v1.json",
}
# Wait for a up to a minute, retrying the meta-data server
@@ -28,20 +26,25 @@ MD_USE_IPV4LL = True
class DataSourceDigitalOcean(sources.DataSource):
- dsname = 'DigitalOcean'
+ dsname = "DigitalOcean"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.distro = distro
self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "DigitalOcean"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
- self.retries = self.ds_cfg.get('retries', MD_RETRIES)
- self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT)
- self.use_ip4LL = self.ds_cfg.get('use_ip4LL', MD_USE_IPV4LL)
- self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY)
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(
+ sys_cfg, ["datasource", "DigitalOcean"], {}
+ ),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.retries = self.ds_cfg.get("retries", MD_RETRIES)
+ self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT)
+ self.use_ip4LL = self.ds_cfg.get("use_ip4LL", MD_USE_IPV4LL)
+ self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY)
self._network_config = None
def _get_sysinfo(self):
@@ -54,22 +57,25 @@ class DataSourceDigitalOcean(sources.DataSource):
if not is_do:
return False
- LOG.info("Running on digital ocean. droplet_id=%s", droplet_id)
+ LOG.info("Running on DigitalOcean. droplet_id=%s", droplet_id)
ipv4LL_nic = None
if self.use_ip4LL:
ipv4LL_nic = do_helper.assign_ipv4_link_local(self.distro)
md = do_helper.read_metadata(
- self.metadata_address, timeout=self.timeout,
- sec_between=self.wait_retry, retries=self.retries)
+ self.metadata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
self.metadata_full = md
- self.metadata['instance-id'] = md.get('droplet_id', droplet_id)
- self.metadata['local-hostname'] = md.get('hostname', droplet_id)
- self.metadata['interfaces'] = md.get('interfaces')
- self.metadata['public-keys'] = md.get('public_keys')
- self.metadata['availability_zone'] = md.get('region', 'default')
+ self.metadata["instance-id"] = md.get("droplet_id", droplet_id)
+ self.metadata["local-hostname"] = md.get("hostname", droplet_id)
+ self.metadata["interfaces"] = md.get("interfaces")
+ self.metadata["public-keys"] = md.get("public_keys")
+ self.metadata["availability_zone"] = md.get("region", "default")
self.vendordata_raw = md.get("vendor_data", None)
self.userdata_raw = md.get("user_data", None)
@@ -80,32 +86,34 @@ class DataSourceDigitalOcean(sources.DataSource):
def check_instance_id(self, sys_cfg):
return sources.instance_id_matches_system_uuid(
- self.get_instance_id(), 'system-serial-number')
+ self.get_instance_id(), "system-serial-number"
+ )
@property
def network_config(self):
"""Configure the networking. This needs to be done each boot, since
- the IP information may have changed due to snapshot and/or
- migration.
+ the IP information may have changed due to snapshot and/or
+ migration.
"""
if self._network_config:
return self._network_config
- interfaces = self.metadata.get('interfaces')
+ interfaces = self.metadata.get("interfaces")
LOG.debug(interfaces)
if not interfaces:
raise Exception("Unable to get meta-data from server....")
- nameservers = self.metadata_full['dns']['nameservers']
+ nameservers = self.metadata_full["dns"]["nameservers"]
self._network_config = do_helper.convert_network_configuration(
- interfaces, nameservers)
+ interfaces, nameservers
+ )
return self._network_config
# Used to match classes to dependencies
datasources = [
- (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, )),
+ (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM,)),
]
@@ -113,4 +121,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 1930a509..03b3870c 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -8,19 +8,18 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
+import copy
import os
import time
from cloudinit import dmi
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-from cloudinit import sources
+from cloudinit import net, sources
from cloudinit import url_helper as uhelp
-from cloudinit import util
-from cloudinit import warnings
-from cloudinit.event import EventType
+from cloudinit import util, warnings
+from cloudinit.event import EventScope, EventType
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
LOG = logging.getLogger(__name__)
@@ -29,10 +28,10 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND])
STRICT_ID_PATH = ("datasource", "Ec2", "strict_id")
STRICT_ID_DEFAULT = "warn"
-API_TOKEN_ROUTE = 'latest/api/token'
-AWS_TOKEN_TTL_SECONDS = '21600'
-AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token'
-AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds'
+API_TOKEN_ROUTE = "latest/api/token"
+AWS_TOKEN_TTL_SECONDS = "21600"
+AWS_TOKEN_PUT_HEADER = "X-aws-ec2-metadata-token"
+AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + "-ttl-seconds"
AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER]
@@ -52,18 +51,18 @@ class CloudNames(object):
class DataSourceEc2(sources.DataSource):
- dsname = 'Ec2'
+ dsname = "Ec2"
# Default metadata urls that will be used if none are provided
# They will be checked for 'resolveability' and some of the
# following may be discarded if they do not resolve
metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"]
# The minimum supported metadata_version from the ec2 metadata apis
- min_metadata_version = '2009-04-04'
+ min_metadata_version = "2009-04-04"
# Priority ordered list of additional metadata versions which will be tried
# for extended metadata content. IPv6 support comes in 2016-09-02
- extended_metadata_versions = ['2018-09-24', '2016-09-02']
+ extended_metadata_versions = ["2018-09-24", "2016-09-02"]
# Setup read_url parameters per get_url_params.
url_max_wait = 120
@@ -75,6 +74,15 @@ class DataSourceEc2(sources.DataSource):
# Whether we want to get network configuration from the metadata service.
perform_dhcp_setup = False
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceEc2, self).__init__(sys_cfg, distro, paths)
self.metadata_address = None
@@ -85,11 +93,18 @@ class DataSourceEc2(sources.DataSource):
def _get_data(self):
strict_mode, _sleep = read_strict_mode(
- util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH,
- STRICT_ID_DEFAULT), ("warn", None))
-
- LOG.debug("strict_mode: %s, cloud_name=%s cloud_platform=%s",
- strict_mode, self.cloud_name, self.platform)
+ util.get_cfg_by_path(
+ self.sys_cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT
+ ),
+ ("warn", None),
+ )
+
+ LOG.debug(
+ "strict_mode: %s, cloud_name=%s cloud_platform=%s",
+ strict_mode,
+ self.cloud_name,
+ self.platform,
+ )
if strict_mode == "true" and self.cloud_name == CloudNames.UNKNOWN:
return False
elif self.cloud_name == CloudNames.NO_EC2_METADATA:
@@ -102,20 +117,27 @@ class DataSourceEc2(sources.DataSource):
try:
with EphemeralDHCPv4(self.fallback_interface):
self._crawled_metadata = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
+ )
except NoDHCPLeaseError:
return False
else:
self._crawled_metadata = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
+ )
if not self._crawled_metadata:
return False
- self.metadata = self._crawled_metadata.get('meta-data', None)
- self.userdata_raw = self._crawled_metadata.get('user-data', None)
- self.identity = self._crawled_metadata.get(
- 'dynamic', {}).get('instance-identity', {}).get('document', {})
+ self.metadata = self._crawled_metadata.get("meta-data", None)
+ self.userdata_raw = self._crawled_metadata.get("user-data", None)
+ self.identity = (
+ self._crawled_metadata.get("dynamic", {})
+ .get("instance-identity", {})
+ .get("document", {})
+ )
return True
def is_classic_instance(self):
@@ -125,9 +147,9 @@ class DataSourceEc2(sources.DataSource):
# network_config where metadata will be present.
# Secondary call site is in packaging postinst script.
return False
- ifaces_md = self.metadata.get('network', {}).get('interfaces', {})
- for _mac, mac_data in ifaces_md.get('macs', {}).items():
- if 'vpc-id' in mac_data:
+ ifaces_md = self.metadata.get("network", {}).get("interfaces", {})
+ for _mac, mac_data in ifaces_md.get("macs", {}).items():
+ if "vpc-id" in mac_data:
return False
return True
@@ -135,12 +157,12 @@ class DataSourceEc2(sources.DataSource):
def launch_index(self):
if not self.metadata:
return None
- return self.metadata.get('ami-launch-index')
+ return self.metadata.get("ami-launch-index")
@property
def platform(self):
# Handle upgrade path of pickled ds
- if not hasattr(self, '_platform_type'):
+ if not hasattr(self, "_platform_type"):
self._platform_type = DataSourceEc2.dsname.lower()
if not self._platform_type:
self._platform_type = DataSourceEc2.dsname.lower()
@@ -156,44 +178,47 @@ class DataSourceEc2(sources.DataSource):
min_metadata_version.
"""
# Assumes metadata service is already up
- url_tmpl = '{0}/{1}/meta-data/instance-id'
+ url_tmpl = "{0}/{1}/meta-data/instance-id"
headers = self._get_headers()
for api_ver in self.extended_metadata_versions:
url = url_tmpl.format(self.metadata_address, api_ver)
try:
- resp = uhelp.readurl(url=url, headers=headers,
- headers_redact=AWS_TOKEN_REDACT)
+ resp = uhelp.readurl(
+ url=url, headers=headers, headers_redact=AWS_TOKEN_REDACT
+ )
except uhelp.UrlError as e:
- LOG.debug('url %s raised exception %s', url, e)
+ LOG.debug("url %s raised exception %s", url, e)
else:
if resp.code == 200:
- LOG.debug('Found preferred metadata version %s', api_ver)
+ LOG.debug("Found preferred metadata version %s", api_ver)
return api_ver
elif resp.code == 404:
- msg = 'Metadata api version %s not present. Headers: %s'
+ msg = "Metadata api version %s not present. Headers: %s"
LOG.debug(msg, api_ver, resp.headers)
return self.min_metadata_version
def get_instance_id(self):
if self.cloud_name == CloudNames.AWS:
# Prefer the ID from the instance identity document, but fall back
- if not getattr(self, 'identity', None):
+ if not getattr(self, "identity", None):
# If re-using cached datasource, it's get_data run didn't
# setup self.identity. So we need to do that now.
api_version = self.get_metadata_api_version()
self.identity = ec2.get_instance_identity(
- api_version, self.metadata_address,
+ api_version,
+ self.metadata_address,
headers_cb=self._get_headers,
headers_redact=AWS_TOKEN_REDACT,
- exception_cb=self._refresh_stale_aws_token_cb).get(
- 'document', {})
+ exception_cb=self._refresh_stale_aws_token_cb,
+ ).get("document", {})
return self.identity.get(
- 'instanceId', self.metadata['instance-id'])
+ "instanceId", self.metadata["instance-id"]
+ )
else:
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None):
- """ Get an API token for EC2 Instance Metadata Service.
+ """Get an API token for EC2 Instance Metadata Service.
On EC2. IMDS will always answer an API token, unless
the instance owner has disabled the IMDS HTTP endpoint or
@@ -205,26 +230,29 @@ class DataSourceEc2(sources.DataSource):
urls = []
url2base = {}
url_path = API_TOKEN_ROUTE
- request_method = 'PUT'
+ request_method = "PUT"
for url in mdurls:
- cur = '{0}/{1}'.format(url, url_path)
+ cur = "{0}/{1}".format(url, url_path)
urls.append(cur)
url2base[cur] = url
# use the self._imds_exception_cb to check for Read errors
- LOG.debug('Fetching Ec2 IMDSv2 API Token')
+ LOG.debug("Fetching Ec2 IMDSv2 API Token")
response = None
url = None
url_params = self.get_url_params()
try:
url, response = uhelp.wait_for_url(
- urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warning,
+ urls=urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ status_cb=LOG.warning,
headers_cb=self._get_headers,
exception_cb=self._imds_exception_cb,
request_method=request_method,
- headers_redact=AWS_TOKEN_REDACT)
+ headers_redact=AWS_TOKEN_REDACT,
+ )
except uhelp.UrlError:
# We use the raised exception to interupt the retry loop.
# Nothing else to do here.
@@ -250,8 +278,10 @@ class DataSourceEc2(sources.DataSource):
filtered = [x for x in mdurls if util.is_resolvable_url(x)]
if set(filtered) != set(mdurls):
- LOG.debug("Removed the following from metadata urls: %s",
- list((set(mdurls) - set(filtered))))
+ LOG.debug(
+ "Removed the following from metadata urls: %s",
+ list((set(mdurls) - set(filtered))),
+ )
if len(filtered):
mdurls = filtered
@@ -269,20 +299,25 @@ class DataSourceEc2(sources.DataSource):
# if we can't get a token, use instance-id path
urls = []
url2base = {}
- url_path = '{ver}/meta-data/instance-id'.format(
- ver=self.min_metadata_version)
- request_method = 'GET'
+ url_path = "{ver}/meta-data/instance-id".format(
+ ver=self.min_metadata_version
+ )
+ request_method = "GET"
for url in mdurls:
- cur = '{0}/{1}'.format(url, url_path)
+ cur = "{0}/{1}".format(url, url_path)
urls.append(cur)
url2base[cur] = url
start_time = time.time()
url, _ = uhelp.wait_for_url(
- urls=urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds, status_cb=LOG.warning,
- headers_redact=AWS_TOKEN_REDACT, headers_cb=self._get_headers,
- request_method=request_method)
+ urls=urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ status_cb=LOG.warning,
+ headers_redact=AWS_TOKEN_REDACT,
+ headers_cb=self._get_headers,
+ request_method=request_method,
+ )
if url:
metadata_address = url2base[url]
@@ -293,8 +328,11 @@ class DataSourceEc2(sources.DataSource):
elif self.cloud_name == CloudNames.AWS:
LOG.warning("IMDS's HTTP endpoint is probably disabled")
else:
- LOG.critical("Giving up on md from %s after %s seconds",
- urls, int(time.time() - start_time))
+ LOG.critical(
+ "Giving up on md from %s after %s seconds",
+ urls,
+ int(time.time() - start_time),
+ )
return bool(metadata_address)
@@ -302,7 +340,7 @@ class DataSourceEc2(sources.DataSource):
# Consult metadata service, that has
# ephemeral0: sdb
# and return 'sdb' for input 'ephemeral0'
- if 'block-device-mapping' not in self.metadata:
+ if "block-device-mapping" not in self.metadata:
return None
# Example:
@@ -311,7 +349,7 @@ class DataSourceEc2(sources.DataSource):
# 'ephemeral0': '/dev/sdb',
# 'root': '/dev/sda1'}
found = None
- bdm = self.metadata['block-device-mapping']
+ bdm = self.metadata["block-device-mapping"]
if not isinstance(bdm, dict):
LOG.debug("block-device-mapping not a dictionary: '%s'", bdm)
return None
@@ -354,17 +392,18 @@ class DataSourceEc2(sources.DataSource):
try:
if self.cloud_name == CloudNames.AWS:
return self.identity.get(
- 'availabilityZone',
- self.metadata['placement']['availability-zone'])
+ "availabilityZone",
+ self.metadata["placement"]["availability-zone"],
+ )
else:
- return self.metadata['placement']['availability-zone']
+ return self.metadata["placement"]["availability-zone"]
except KeyError:
return None
@property
def region(self):
if self.cloud_name == CloudNames.AWS:
- region = self.identity.get('region')
+ region = self.identity.get("region")
# Fallback to trimming the availability zone if region is missing
if self.availability_zone and not region:
region = self.availability_zone[:-1]
@@ -381,7 +420,8 @@ class DataSourceEc2(sources.DataSource):
if self.cloud_name == CloudNames.UNKNOWN:
warn_if_necessary(
util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT),
- cfg)
+ cfg,
+ )
@property
def network_config(self):
@@ -392,30 +432,39 @@ class DataSourceEc2(sources.DataSource):
if self.metadata is None:
# this would happen if get_data hadn't been called. leave as UNSET
LOG.warning(
- "Unexpected call to network_config when metadata is None.")
+ "Unexpected call to network_config when metadata is None."
+ )
return None
result = None
no_network_metadata_on_aws = bool(
- 'network' not in self.metadata and
- self.cloud_name == CloudNames.AWS)
+ "network" not in self.metadata
+ and self.cloud_name == CloudNames.AWS
+ )
if no_network_metadata_on_aws:
- LOG.debug("Metadata 'network' not present:"
- " Refreshing stale metadata from prior to upgrade.")
+ LOG.debug(
+ "Metadata 'network' not present:"
+ " Refreshing stale metadata from prior to upgrade."
+ )
util.log_time(
- logfunc=LOG.debug, msg='Re-crawl of metadata service',
- func=self.get_data)
+ logfunc=LOG.debug,
+ msg="Re-crawl of metadata service",
+ func=self.get_data,
+ )
iface = self.fallback_interface
- net_md = self.metadata.get('network')
+ net_md = self.metadata.get("network")
if isinstance(net_md, dict):
# SRU_BLOCKER: xenial, bionic and eoan should default
# apply_full_imds_network_config to False to retain original
# behavior on those releases.
result = convert_ec2_metadata_network_config(
- net_md, fallback_nic=iface,
+ net_md,
+ fallback_nic=iface,
full_network_config=util.get_cfg_option_bool(
- self.ds_cfg, 'apply_full_imds_network_config', True))
+ self.ds_cfg, "apply_full_imds_network_config", True
+ ),
+ )
# RELEASE_BLOCKER: xenial should drop the below if statement,
# because the issue being addressed doesn't exist pre-netplan.
@@ -426,7 +475,15 @@ class DataSourceEc2(sources.DataSource):
# Non-VPC (aka Classic) Ec2 instances need to rewrite the
# network config file every boot due to MAC address change.
if self.is_classic_instance():
- self.update_events['network'].add(EventType.BOOT)
+ self.default_update_events = copy.deepcopy(
+ self.default_update_events
+ )
+ self.default_update_events[EventScope.NETWORK].add(
+ EventType.BOOT
+ )
+ self.default_update_events[EventScope.NETWORK].add(
+ EventType.BOOT_LEGACY
+ )
else:
LOG.warning("Metadata 'network' key not valid: %s.", net_md)
self._network_config = result
@@ -438,7 +495,7 @@ class DataSourceEc2(sources.DataSource):
if self._fallback_interface is None:
# fallback_nic was used at one point, so restored objects may
# have an attribute there. respect that if found.
- _legacy_fbnic = getattr(self, 'fallback_nic', None)
+ _legacy_fbnic = getattr(self, "fallback_nic", None)
if _legacy_fbnic:
self._fallback_interface = _legacy_fbnic
self.fallback_nic = None
@@ -463,26 +520,37 @@ class DataSourceEc2(sources.DataSource):
else:
exc_cb = exc_cb_ud = None
try:
- crawled_metadata['user-data'] = ec2.get_instance_userdata(
- api_version, self.metadata_address,
- headers_cb=self._get_headers, headers_redact=redact,
- exception_cb=exc_cb_ud)
- crawled_metadata['meta-data'] = ec2.get_instance_metadata(
- api_version, self.metadata_address,
- headers_cb=self._get_headers, headers_redact=redact,
- exception_cb=exc_cb)
+ crawled_metadata["user-data"] = ec2.get_instance_userdata(
+ api_version,
+ self.metadata_address,
+ headers_cb=self._get_headers,
+ headers_redact=redact,
+ exception_cb=exc_cb_ud,
+ )
+ crawled_metadata["meta-data"] = ec2.get_instance_metadata(
+ api_version,
+ self.metadata_address,
+ headers_cb=self._get_headers,
+ headers_redact=redact,
+ exception_cb=exc_cb,
+ )
if self.cloud_name == CloudNames.AWS:
identity = ec2.get_instance_identity(
- api_version, self.metadata_address,
- headers_cb=self._get_headers, headers_redact=redact,
- exception_cb=exc_cb)
- crawled_metadata['dynamic'] = {'instance-identity': identity}
+ api_version,
+ self.metadata_address,
+ headers_cb=self._get_headers,
+ headers_redact=redact,
+ exception_cb=exc_cb,
+ )
+ crawled_metadata["dynamic"] = {"instance-identity": identity}
except Exception:
util.logexc(
- LOG, "Failed reading from metadata address %s",
- self.metadata_address)
+ LOG,
+ "Failed reading from metadata address %s",
+ self.metadata_address,
+ )
return {}
- crawled_metadata['_metadata_api_version'] = api_version
+ crawled_metadata["_metadata_api_version"] = api_version
return crawled_metadata
def _refresh_api_token(self, seconds=AWS_TOKEN_TTL_SECONDS):
@@ -495,23 +563,27 @@ class DataSourceEc2(sources.DataSource):
return None
LOG.debug("Refreshing Ec2 metadata API token")
request_header = {AWS_TOKEN_REQ_HEADER: seconds}
- token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE)
+ token_url = "{}/{}".format(self.metadata_address, API_TOKEN_ROUTE)
try:
- response = uhelp.readurl(token_url, headers=request_header,
- headers_redact=AWS_TOKEN_REDACT,
- request_method="PUT")
+ response = uhelp.readurl(
+ token_url,
+ headers=request_header,
+ headers_redact=AWS_TOKEN_REDACT,
+ request_method="PUT",
+ )
except uhelp.UrlError as e:
LOG.warning(
- 'Unable to get API token: %s raised exception %s',
- token_url, e)
+ "Unable to get API token: %s raised exception %s", token_url, e
+ )
return None
return response.contents
def _skip_or_refresh_stale_aws_token_cb(self, msg, exception):
"""Callback will not retry on SKIP_USERDATA_CODES or if no token
- is available."""
+ is available."""
retry = ec2.skip_retry_on_codes(
- ec2.SKIP_USERDATA_CODES, msg, exception)
+ ec2.SKIP_USERDATA_CODES, msg, exception
+ )
if not retry:
return False # False raises exception
return self._refresh_stale_aws_token_cb(msg, exception)
@@ -541,14 +613,17 @@ class DataSourceEc2(sources.DataSource):
# requests.ConnectionError will have exception.code == None
if exception.code and exception.code >= 400:
if exception.code == 403:
- LOG.warning('Ec2 IMDS endpoint returned a 403 error. '
- 'HTTP endpoint is disabled. Aborting.')
+ LOG.warning(
+ "Ec2 IMDS endpoint returned a 403 error. "
+ "HTTP endpoint is disabled. Aborting."
+ )
else:
- LOG.warning('Fatal error while requesting '
- 'Ec2 IMDSv2 API tokens')
+ LOG.warning(
+ "Fatal error while requesting Ec2 IMDSv2 API tokens"
+ )
raise exception
- def _get_headers(self, url=''):
+ def _get_headers(self, url=""):
"""Return a dict of headers for accessing a url.
If _api_token is unset on AWS, attempt to refresh the token via a PUT
@@ -578,13 +653,17 @@ class DataSourceEc2Local(DataSourceEc2):
metadata service. If the metadata service provides network configuration
then render the network configuration for that instance based on metadata.
"""
+
perform_dhcp_setup = True # Use dhcp before querying metadata
def get_data(self):
supported_platforms = (CloudNames.AWS,)
if self.cloud_name not in supported_platforms:
- LOG.debug("Local Ec2 mode only supported on %s, not %s",
- supported_platforms, self.cloud_name)
+ LOG.debug(
+ "Local Ec2 mode only supported on %s, not %s",
+ supported_platforms,
+ self.cloud_name,
+ )
return False
return super(DataSourceEc2Local, self).get_data()
@@ -602,18 +681,19 @@ def parse_strict_mode(cfgval):
# true, false, warn,[sleep]
# return tuple with string mode (true|false|warn) and sleep.
if cfgval is True:
- return 'true', None
+ return "true", None
if cfgval is False:
- return 'false', None
+ return "false", None
if not cfgval:
- return 'warn', 0
+ return "warn", 0
mode, _, sleep = cfgval.partition(",")
- if mode not in ('true', 'false', 'warn'):
+ if mode not in ("true", "false", "warn"):
raise ValueError(
"Invalid mode '%s' in strict_id setting '%s': "
- "Expected one of 'true', 'false', 'warn'." % (mode, cfgval))
+ "Expected one of 'true', 'false', 'warn'." % (mode, cfgval)
+ )
if sleep:
try:
@@ -639,47 +719,53 @@ def warn_if_necessary(cfgval, cfg):
if mode == "false":
return
- warnings.show_warning('non_ec2_md', cfg, mode=True, sleep=sleep)
+ warnings.show_warning("non_ec2_md", cfg, mode=True, sleep=sleep)
def identify_aws(data):
# data is a dictionary returned by _collect_platform_data.
- if (data['uuid'].startswith('ec2') and
- (data['uuid_source'] == 'hypervisor' or
- data['uuid'] == data['serial'])):
+ if data["uuid"].startswith("ec2") and (
+ data["uuid_source"] == "hypervisor" or data["uuid"] == data["serial"]
+ ):
return CloudNames.AWS
return None
def identify_brightbox(data):
- if data['serial'].endswith('.brightbox.com'):
+ if data["serial"].endswith(".brightbox.com"):
return CloudNames.BRIGHTBOX
def identify_zstack(data):
- if data['asset_tag'].endswith('.zstack.io'):
+ if data["asset_tag"].endswith(".zstack.io"):
return CloudNames.ZSTACK
def identify_e24cloud(data):
- if data['vendor'] == 'e24cloud':
+ if data["vendor"] == "e24cloud":
return CloudNames.E24CLOUD
def identify_platform():
# identify the platform and return an entry in CloudNames.
data = _collect_platform_data()
- checks = (identify_aws, identify_brightbox, identify_zstack,
- identify_e24cloud, lambda x: CloudNames.UNKNOWN)
+ checks = (
+ identify_aws,
+ identify_brightbox,
+ identify_zstack,
+ identify_e24cloud,
+ lambda x: CloudNames.UNKNOWN,
+ )
for checker in checks:
try:
result = checker(data)
if result:
return result
except Exception as e:
- LOG.warning("calling %s with %s raised exception: %s",
- checker, data, e)
+ LOG.warning(
+ "calling %s with %s raised exception: %s", checker, data, e
+ )
def _collect_platform_data():
@@ -698,36 +784,36 @@ def _collect_platform_data():
data = {}
try:
uuid = util.load_file("/sys/hypervisor/uuid").strip()
- data['uuid_source'] = 'hypervisor'
+ data["uuid_source"] = "hypervisor"
except Exception:
- uuid = dmi.read_dmi_data('system-uuid')
- data['uuid_source'] = 'dmi'
+ uuid = dmi.read_dmi_data("system-uuid")
+ data["uuid_source"] = "dmi"
if uuid is None:
- uuid = ''
- data['uuid'] = uuid.lower()
+ uuid = ""
+ data["uuid"] = uuid.lower()
- serial = dmi.read_dmi_data('system-serial-number')
+ serial = dmi.read_dmi_data("system-serial-number")
if serial is None:
- serial = ''
+ serial = ""
- data['serial'] = serial.lower()
+ data["serial"] = serial.lower()
- asset_tag = dmi.read_dmi_data('chassis-asset-tag')
+ asset_tag = dmi.read_dmi_data("chassis-asset-tag")
if asset_tag is None:
- asset_tag = ''
+ asset_tag = ""
- data['asset_tag'] = asset_tag.lower()
+ data["asset_tag"] = asset_tag.lower()
- vendor = dmi.read_dmi_data('system-manufacturer')
- data['vendor'] = (vendor if vendor else '').lower()
+ vendor = dmi.read_dmi_data("system-manufacturer")
+ data["vendor"] = (vendor if vendor else "").lower()
return data
def convert_ec2_metadata_network_config(
- network_md, macs_to_nics=None, fallback_nic=None,
- full_network_config=True):
+ network_md, macs_to_nics=None, fallback_nic=None, full_network_config=True
+):
"""Convert ec2 metadata to network config version 2 data dict.
@param: network_md: 'network' portion of EC2 metadata.
@@ -746,49 +832,55 @@ def convert_ec2_metadata_network_config(
@return A dict of network config version 2 based on the metadata and macs.
"""
- netcfg = {'version': 2, 'ethernets': {}}
+ netcfg = {"version": 2, "ethernets": {}}
if not macs_to_nics:
macs_to_nics = net.get_interfaces_by_mac()
- macs_metadata = network_md['interfaces']['macs']
+ macs_metadata = network_md["interfaces"]["macs"]
if not full_network_config:
for mac, nic_name in macs_to_nics.items():
if nic_name == fallback_nic:
break
- dev_config = {'dhcp4': True,
- 'dhcp6': False,
- 'match': {'macaddress': mac.lower()},
- 'set-name': nic_name}
+ dev_config = {
+ "dhcp4": True,
+ "dhcp6": False,
+ "match": {"macaddress": mac.lower()},
+ "set-name": nic_name,
+ }
nic_metadata = macs_metadata.get(mac)
- if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
- dev_config['dhcp6'] = True
- netcfg['ethernets'][nic_name] = dev_config
+ if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured
+ dev_config["dhcp6"] = True
+ netcfg["ethernets"][nic_name] = dev_config
return netcfg
# Apply network config for all nics and any secondary IPv4/v6 addresses
+ nic_idx = 0
for mac, nic_name in sorted(macs_to_nics.items()):
nic_metadata = macs_metadata.get(mac)
if not nic_metadata:
continue # Not a physical nic represented in metadata
# device-number is zero-indexed, we want it 1-indexed for the
# multiplication on the following line
- nic_idx = int(nic_metadata['device-number']) + 1
- dhcp_override = {'route-metric': nic_idx * 100}
- dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override,
- 'dhcp6': False,
- 'match': {'macaddress': mac.lower()},
- 'set-name': nic_name}
- if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured
- dev_config['dhcp6'] = True
- dev_config['dhcp6-overrides'] = dhcp_override
- dev_config['addresses'] = get_secondary_addresses(nic_metadata, mac)
- if not dev_config['addresses']:
- dev_config.pop('addresses') # Since we found none configured
- netcfg['ethernets'][nic_name] = dev_config
+ nic_idx = int(nic_metadata.get("device-number", nic_idx)) + 1
+ dhcp_override = {"route-metric": nic_idx * 100}
+ dev_config = {
+ "dhcp4": True,
+ "dhcp4-overrides": dhcp_override,
+ "dhcp6": False,
+ "match": {"macaddress": mac.lower()},
+ "set-name": nic_name,
+ }
+ if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured
+ dev_config["dhcp6"] = True
+ dev_config["dhcp6-overrides"] = dhcp_override
+ dev_config["addresses"] = get_secondary_addresses(nic_metadata, mac)
+ if not dev_config["addresses"]:
+ dev_config.pop("addresses") # Since we found none configured
+ netcfg["ethernets"][nic_name] = dev_config
# Remove route-metric dhcp overrides if only one nic configured
- if len(netcfg['ethernets']) == 1:
- for nic_name in netcfg['ethernets'].keys():
- netcfg['ethernets'][nic_name].pop('dhcp4-overrides')
- netcfg['ethernets'][nic_name].pop('dhcp6-overrides', None)
+ if len(netcfg["ethernets"]) == 1:
+ for nic_name in netcfg["ethernets"].keys():
+ netcfg["ethernets"][nic_name].pop("dhcp4-overrides")
+ netcfg["ethernets"][nic_name].pop("dhcp6-overrides", None)
return netcfg
@@ -798,18 +890,22 @@ def get_secondary_addresses(nic_metadata, mac):
:return: List of secondary IPv4 or IPv6 addresses to configure on the
interface
"""
- ipv4s = nic_metadata.get('local-ipv4s')
- ipv6s = nic_metadata.get('ipv6s')
+ ipv4s = nic_metadata.get("local-ipv4s")
+ ipv6s = nic_metadata.get("ipv6s")
addresses = []
# In version < 2018-09-24 local_ipv4s or ipv6s is a str with one IP
if bool(isinstance(ipv4s, list) and len(ipv4s) > 1):
addresses.extend(
_get_secondary_addresses(
- nic_metadata, 'subnet-ipv4-cidr-block', mac, ipv4s, '24'))
+ nic_metadata, "subnet-ipv4-cidr-block", mac, ipv4s, "24"
+ )
+ )
if bool(isinstance(ipv6s, list) and len(ipv6s) > 1):
addresses.extend(
_get_secondary_addresses(
- nic_metadata, 'subnet-ipv6-cidr-block', mac, ipv6s, '128'))
+ nic_metadata, "subnet-ipv6-cidr-block", mac, ipv6s, "128"
+ )
+ )
return sorted(addresses)
@@ -822,18 +918,22 @@ def _get_secondary_addresses(nic_metadata, cidr_key, mac, ips, default_prefix):
addresses = []
cidr = nic_metadata.get(cidr_key)
prefix = default_prefix
- if not cidr or len(cidr.split('/')) != 2:
- ip_type = 'ipv4' if 'ipv4' in cidr_key else 'ipv6'
+ if not cidr or len(cidr.split("/")) != 2:
+ ip_type = "ipv4" if "ipv4" in cidr_key else "ipv6"
LOG.warning(
- 'Could not parse %s %s for mac %s. %s network'
- ' config prefix defaults to /%s',
- cidr_key, cidr, mac, ip_type, prefix)
+ "Could not parse %s %s for mac %s. %s network"
+ " config prefix defaults to /%s",
+ cidr_key,
+ cidr,
+ mac,
+ ip_type,
+ prefix,
+ )
else:
- prefix = cidr.split('/')[1]
+ prefix = cidr.split("/")[1]
# We know we have > 1 ips for in metadata for this IP type
for ip in ips[1:]:
- addresses.append(
- '{ip}/{prefix}'.format(ip=ip, prefix=prefix))
+ addresses.append("{ip}/{prefix}".format(ip=ip, prefix=prefix))
return addresses
@@ -848,4 +948,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py
index adee6d79..cc5136d7 100644
--- a/cloudinit/sources/DataSourceExoscale.py
+++ b/cloudinit/sources/DataSourceExoscale.py
@@ -5,11 +5,9 @@
from cloudinit import dmi
from cloudinit import ec2_utils as ec2
-from cloudinit import log as logging
-from cloudinit import sources
from cloudinit import helpers
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import log as logging
+from cloudinit import sources, url_helper, util
LOG = logging.getLogger(__name__)
@@ -25,7 +23,7 @@ EXOSCALE_DMI_NAME = "Exoscale"
class DataSourceExoscale(sources.DataSource):
- dsname = 'Exoscale'
+ dsname = "Exoscale"
url_max_wait = 120
@@ -33,12 +31,13 @@ class DataSourceExoscale(sources.DataSource):
super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths)
LOG.debug("Initializing the Exoscale datasource")
- self.metadata_url = self.ds_cfg.get('metadata_url', METADATA_URL)
- self.api_version = self.ds_cfg.get('api_version', API_VERSION)
+ self.metadata_url = self.ds_cfg.get("metadata_url", METADATA_URL)
+ self.api_version = self.ds_cfg.get("api_version", API_VERSION)
self.password_server_port = int(
- self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT))
- self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT)
- self.url_retries = self.ds_cfg.get('retries', URL_RETRIES)
+ self.ds_cfg.get("password_server_port", PASSWORD_SERVER_PORT)
+ )
+ self.url_timeout = self.ds_cfg.get("timeout", URL_TIMEOUT)
+ self.url_retries = self.ds_cfg.get("retries", URL_RETRIES)
self.extra_config = {}
def activate(self, cfg, is_new_instance):
@@ -50,23 +49,25 @@ class DataSourceExoscale(sources.DataSource):
# a user has triggered a password reset. So calling that password
# service generally results in no additional cloud-config.
# TODO(Create util functions for overriding merged sys_cfg module freq)
- mod = 'set_passwords'
- sem_path = self.paths.get_ipath_cur('sem')
+ mod = "set_passwords"
+ sem_path = self.paths.get_ipath_cur("sem")
sem_helper = helpers.FileSemaphores(sem_path)
- if sem_helper.clear('config_' + mod, None):
- LOG.debug('Overriding module set-passwords with frequency always')
+ if sem_helper.clear("config_" + mod, None):
+ LOG.debug("Overriding module set-passwords with frequency always")
def wait_for_metadata_service(self):
"""Wait for the metadata service to be reachable."""
metadata_url = "{}/{}/meta-data/instance-id".format(
- self.metadata_url, self.api_version)
+ self.metadata_url, self.api_version
+ )
url, _response = url_helper.wait_for_url(
urls=[metadata_url],
max_wait=self.url_max_wait,
timeout=self.url_timeout,
- status_cb=LOG.critical)
+ status_cb=LOG.critical,
+ )
return bool(url)
@@ -78,15 +79,20 @@ class DataSourceExoscale(sources.DataSource):
"""
metadata_ready = util.log_time(
logfunc=LOG.info,
- msg='waiting for the metadata service',
- func=self.wait_for_metadata_service)
+ msg="waiting for the metadata service",
+ func=self.wait_for_metadata_service,
+ )
if not metadata_ready:
return {}
- return read_metadata(self.metadata_url, self.api_version,
- self.password_server_port, self.url_timeout,
- self.url_retries)
+ return read_metadata(
+ self.metadata_url,
+ self.api_version,
+ self.password_server_port,
+ self.url_timeout,
+ self.url_retries,
+ )
def _get_data(self):
"""Fetch the user data, the metadata and the VM password
@@ -100,15 +106,16 @@ class DataSourceExoscale(sources.DataSource):
data = util.log_time(
logfunc=LOG.debug,
- msg='Crawl of metadata service',
- func=self.crawl_metadata)
+ msg="Crawl of metadata service",
+ func=self.crawl_metadata,
+ )
if not data:
return False
- self.userdata_raw = data['user-data']
- self.metadata = data['meta-data']
- password = data.get('password')
+ self.userdata_raw = data["user-data"]
+ self.metadata = data["meta-data"]
+ password = data.get("password")
password_config = {}
if password:
@@ -119,16 +126,17 @@ class DataSourceExoscale(sources.DataSource):
# leave the password always disabled if no password is ever set, or
# leave the password login enabled if we set it once.
password_config = {
- 'ssh_pwauth': True,
- 'password': password,
- 'chpasswd': {
- 'expire': False,
+ "ssh_pwauth": True,
+ "password": password,
+ "chpasswd": {
+ "expire": False,
},
}
# builtin extra_config overrides password_config
self.extra_config = util.mergemanydict(
- [self.extra_config, password_config])
+ [self.extra_config, password_config]
+ )
return True
@@ -136,8 +144,9 @@ class DataSourceExoscale(sources.DataSource):
return self.extra_config
def _is_platform_viable(self):
- return dmi.read_dmi_data('system-product-name').startswith(
- EXOSCALE_DMI_NAME)
+ return dmi.read_dmi_data("system-product-name").startswith(
+ EXOSCALE_DMI_NAME
+ )
# Used to match classes to dependencies
@@ -151,28 +160,32 @@ def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
-def get_password(metadata_url=METADATA_URL,
- api_version=API_VERSION,
- password_server_port=PASSWORD_SERVER_PORT,
- url_timeout=URL_TIMEOUT,
- url_retries=URL_RETRIES):
+def get_password(
+ metadata_url=METADATA_URL,
+ api_version=API_VERSION,
+ password_server_port=PASSWORD_SERVER_PORT,
+ url_timeout=URL_TIMEOUT,
+ url_retries=URL_RETRIES,
+):
"""Obtain the VM's password if set.
Once fetched the password is marked saved. Future calls to this method may
return empty string or 'saved_password'."""
- password_url = "{}:{}/{}/".format(metadata_url, password_server_port,
- api_version)
+ password_url = "{}:{}/{}/".format(
+ metadata_url, password_server_port, api_version
+ )
response = url_helper.read_file_or_url(
password_url,
ssl_details=None,
headers={"DomU_Request": "send_my_password"},
timeout=url_timeout,
- retries=url_retries)
- password = response.contents.decode('utf-8')
+ retries=url_retries,
+ )
+ password = response.contents.decode("utf-8")
# the password is empty or already saved
# Note: the original metadata server would answer an additional
# 'bad_request' status, but the Exoscale implementation does not.
- if password in ['', 'saved_password']:
+ if password in ["", "saved_password"]:
return None
# save the password
url_helper.read_file_or_url(
@@ -180,44 +193,50 @@ def get_password(metadata_url=METADATA_URL,
ssl_details=None,
headers={"DomU_Request": "saved_password"},
timeout=url_timeout,
- retries=url_retries)
+ retries=url_retries,
+ )
return password
-def read_metadata(metadata_url=METADATA_URL,
- api_version=API_VERSION,
- password_server_port=PASSWORD_SERVER_PORT,
- url_timeout=URL_TIMEOUT,
- url_retries=URL_RETRIES):
+def read_metadata(
+ metadata_url=METADATA_URL,
+ api_version=API_VERSION,
+ password_server_port=PASSWORD_SERVER_PORT,
+ url_timeout=URL_TIMEOUT,
+ url_retries=URL_RETRIES,
+):
"""Query the metadata server and return the retrieved data."""
crawled_metadata = {}
- crawled_metadata['_metadata_api_version'] = api_version
+ crawled_metadata["_metadata_api_version"] = api_version
try:
- crawled_metadata['user-data'] = ec2.get_instance_userdata(
- api_version,
- metadata_url,
- timeout=url_timeout,
- retries=url_retries)
- crawled_metadata['meta-data'] = ec2.get_instance_metadata(
- api_version,
- metadata_url,
- timeout=url_timeout,
- retries=url_retries)
+ crawled_metadata["user-data"] = ec2.get_instance_userdata(
+ api_version, metadata_url, timeout=url_timeout, retries=url_retries
+ )
+ crawled_metadata["meta-data"] = ec2.get_instance_metadata(
+ api_version, metadata_url, timeout=url_timeout, retries=url_retries
+ )
except Exception as e:
- util.logexc(LOG, "failed reading from metadata url %s (%s)",
- metadata_url, e)
+ util.logexc(
+ LOG, "failed reading from metadata url %s (%s)", metadata_url, e
+ )
return {}
try:
- crawled_metadata['password'] = get_password(
+ crawled_metadata["password"] = get_password(
api_version=api_version,
metadata_url=metadata_url,
password_server_port=password_server_port,
url_retries=url_retries,
- url_timeout=url_timeout)
+ url_timeout=url_timeout,
+ )
except Exception as e:
- util.logexc(LOG, "failed to read from password server url %s:%s (%s)",
- metadata_url, password_server_port, e)
+ util.logexc(
+ LOG,
+ "failed to read from password server url %s:%s (%s)",
+ metadata_url,
+ password_server_port,
+ e,
+ )
return crawled_metadata
@@ -225,35 +244,40 @@ def read_metadata(metadata_url=METADATA_URL,
if __name__ == "__main__":
import argparse
- parser = argparse.ArgumentParser(description='Query Exoscale Metadata')
+ parser = argparse.ArgumentParser(description="Query Exoscale Metadata")
parser.add_argument(
"--endpoint",
metavar="URL",
help="The url of the metadata service.",
- default=METADATA_URL)
+ default=METADATA_URL,
+ )
parser.add_argument(
"--version",
metavar="VERSION",
help="The version of the metadata endpoint to query.",
- default=API_VERSION)
+ default=API_VERSION,
+ )
parser.add_argument(
"--retries",
metavar="NUM",
type=int,
help="The number of retries querying the endpoint.",
- default=URL_RETRIES)
+ default=URL_RETRIES,
+ )
parser.add_argument(
"--timeout",
metavar="NUM",
type=int,
help="The time in seconds to wait before timing out.",
- default=URL_TIMEOUT)
+ default=URL_TIMEOUT,
+ )
parser.add_argument(
"--password-port",
metavar="PORT",
type=int,
help="The port on which the password endpoint listens",
- default=PASSWORD_SERVER_PORT)
+ default=PASSWORD_SERVER_PORT,
+ )
args = parser.parse_args()
@@ -262,7 +286,8 @@ if __name__ == "__main__":
api_version=args.version,
password_server_port=args.password_port,
url_timeout=args.timeout,
- url_retries=args.retries)
+ url_retries=args.retries,
+ )
print(util.json_dumps(data))
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 746caddb..c470bea8 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -4,39 +4,46 @@
import datetime
import json
-
from base64 import b64decode
+from contextlib import suppress as noop
from cloudinit import dmi
-from cloudinit.distros import ug_util
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import sources, url_helper, util
+from cloudinit.distros import ug_util
+from cloudinit.net.dhcp import EphemeralDHCPv4
LOG = logging.getLogger(__name__)
-MD_V1_URL = 'http://metadata.google.internal/computeMetadata/v1/'
-BUILTIN_DS_CONFIG = {'metadata_url': MD_V1_URL}
-REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
-GUEST_ATTRIBUTES_URL = ('http://metadata.google.internal/computeMetadata/'
- 'v1/instance/guest-attributes')
-HOSTKEY_NAMESPACE = 'hostkeys'
-HEADERS = {'Metadata-Flavor': 'Google'}
+MD_V1_URL = "http://metadata.google.internal/computeMetadata/v1/"
+BUILTIN_DS_CONFIG = {"metadata_url": MD_V1_URL}
+REQUIRED_FIELDS = ("instance-id", "availability-zone", "local-hostname")
+GUEST_ATTRIBUTES_URL = (
+ "http://metadata.google.internal/computeMetadata/"
+ "v1/instance/guest-attributes"
+)
+HOSTKEY_NAMESPACE = "hostkeys"
+HEADERS = {"Metadata-Flavor": "Google"}
class GoogleMetadataFetcher(object):
-
- def __init__(self, metadata_address):
+ def __init__(self, metadata_address, num_retries, sec_between_retries):
self.metadata_address = metadata_address
+ self.num_retries = num_retries
+ self.sec_between_retries = sec_between_retries
def get_value(self, path, is_text, is_recursive=False):
value = None
try:
url = self.metadata_address + path
if is_recursive:
- url += '/?recursive=True'
- resp = url_helper.readurl(url=url, headers=HEADERS)
+ url += "/?recursive=True"
+ resp = url_helper.readurl(
+ url=url,
+ headers=HEADERS,
+ retries=self.num_retries,
+ sec_between=self.sec_between_retries,
+ )
except url_helper.UrlError as exc:
msg = "url %s raised exception %s"
LOG.debug(msg, path, exc)
@@ -45,7 +52,7 @@ class GoogleMetadataFetcher(object):
if is_text:
value = util.decode_binary(resp.contents)
else:
- value = resp.contents.decode('utf-8')
+ value = resp.contents.decode("utf-8")
else:
LOG.debug("url %s returned code %s", path, resp.code)
return value
@@ -53,7 +60,8 @@ class GoogleMetadataFetcher(object):
class DataSourceGCE(sources.DataSource):
- dsname = 'GCE'
+ dsname = "GCE"
+ perform_dhcp_setup = False
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -62,24 +70,38 @@ class DataSourceGCE(sources.DataSource):
(users, _groups) = ug_util.normalize_users_groups(sys_cfg, distro)
(self.default_user, _user_config) = ug_util.extract_default(users)
self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "GCE"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
def _get_data(self):
- ret = util.log_time(
- LOG.debug, 'Crawl of GCE metadata service',
- read_md, kwargs={'address': self.metadata_address})
-
- if not ret['success']:
- if ret['platform_reports_gce']:
- LOG.warning(ret['reason'])
+ url_params = self.get_url_params()
+ network_context = noop()
+ if self.perform_dhcp_setup:
+ network_context = EphemeralDHCPv4(self.fallback_interface)
+ with network_context:
+ ret = util.log_time(
+ LOG.debug,
+ "Crawl of GCE metadata service",
+ read_md,
+ kwargs={
+ "address": self.metadata_address,
+ "url_params": url_params,
+ },
+ )
+
+ if not ret["success"]:
+ if ret["platform_reports_gce"]:
+ LOG.warning(ret["reason"])
else:
- LOG.debug(ret['reason'])
+ LOG.debug(ret["reason"])
return False
- self.metadata = ret['meta-data']
- self.userdata_raw = ret['user-data']
+ self.metadata = ret["meta-data"]
+ self.userdata_raw = ret["user-data"]
return True
@property
@@ -88,10 +110,10 @@ class DataSourceGCE(sources.DataSource):
return None
def get_instance_id(self):
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
def get_public_ssh_keys(self):
- public_keys_data = self.metadata['public-keys-data']
+ public_keys_data = self.metadata["public-keys-data"]
return _parse_public_keys(public_keys_data, self.default_user)
def publish_host_keys(self, hostkeys):
@@ -100,26 +122,35 @@ class DataSourceGCE(sources.DataSource):
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
# GCE has long FDQN's and has asked for short hostnames.
- return self.metadata['local-hostname'].split('.')[0]
+ return self.metadata["local-hostname"].split(".")[0]
@property
def availability_zone(self):
- return self.metadata['availability-zone']
+ return self.metadata["availability-zone"]
@property
def region(self):
- return self.availability_zone.rsplit('-', 1)[0]
+ return self.availability_zone.rsplit("-", 1)[0]
+
+
+class DataSourceGCELocal(DataSourceGCE):
+ perform_dhcp_setup = True
def _write_host_key_to_guest_attributes(key_type, key_value):
- url = '%s/%s/%s' % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type)
- key_value = key_value.encode('utf-8')
- resp = url_helper.readurl(url=url, data=key_value, headers=HEADERS,
- request_method='PUT', check_status=False)
+ url = "%s/%s/%s" % (GUEST_ATTRIBUTES_URL, HOSTKEY_NAMESPACE, key_type)
+ key_value = key_value.encode("utf-8")
+ resp = url_helper.readurl(
+ url=url,
+ data=key_value,
+ headers=HEADERS,
+ request_method="PUT",
+ check_status=False,
+ )
if resp.ok():
- LOG.debug('Wrote %s host key to guest attributes.', key_type)
+ LOG.debug("Wrote %s host key to guest attributes.", key_type)
else:
- LOG.debug('Unable to write %s host key to guest attributes.', key_type)
+ LOG.debug("Unable to write %s host key to guest attributes.", key_type)
def _has_expired(public_key):
@@ -133,7 +164,7 @@ def _has_expired(public_key):
return False
# Do not expire keys if they do not have the expected schema identifier.
- if schema != 'google-ssh':
+ if schema != "google-ssh":
return False
try:
@@ -142,11 +173,11 @@ def _has_expired(public_key):
return False
# Do not expire keys if there is no expriation timestamp.
- if 'expireOn' not in json_obj:
+ if "expireOn" not in json_obj:
return False
- expire_str = json_obj['expireOn']
- format_str = '%Y-%m-%dT%H:%M:%S+0000'
+ expire_str = json_obj["expireOn"]
+ format_str = "%Y-%m-%dT%H:%M:%S+0000"
try:
expire_time = datetime.datetime.strptime(expire_str, format_str)
except ValueError:
@@ -167,44 +198,49 @@ def _parse_public_keys(public_keys_data, default_user=None):
for public_key in public_keys_data:
if not public_key or not all(ord(c) < 128 for c in public_key):
continue
- split_public_key = public_key.split(':', 1)
+ split_public_key = public_key.split(":", 1)
if len(split_public_key) != 2:
continue
user, key = split_public_key
- if user in ('cloudinit', default_user) and not _has_expired(key):
+ if user in ("cloudinit", default_user) and not _has_expired(key):
public_keys.append(key)
return public_keys
-def read_md(address=None, platform_check=True):
+def read_md(address=None, url_params=None, platform_check=True):
if address is None:
address = MD_V1_URL
- ret = {'meta-data': None, 'user-data': None,
- 'success': False, 'reason': None}
- ret['platform_reports_gce'] = platform_reports_gce()
+ ret = {
+ "meta-data": None,
+ "user-data": None,
+ "success": False,
+ "reason": None,
+ }
+ ret["platform_reports_gce"] = platform_reports_gce()
- if platform_check and not ret['platform_reports_gce']:
- ret['reason'] = "Not running on GCE."
+ if platform_check and not ret["platform_reports_gce"]:
+ ret["reason"] = "Not running on GCE."
return ret
# If we cannot resolve the metadata server, then no point in trying.
if not util.is_resolvable_url(address):
LOG.debug("%s is not resolvable", address)
- ret['reason'] = 'address "%s" is not resolvable' % address
+ ret["reason"] = 'address "%s" is not resolvable' % address
return ret
# url_map: (our-key, path, required, is_text, is_recursive)
url_map = [
- ('instance-id', ('instance/id',), True, True, False),
- ('availability-zone', ('instance/zone',), True, True, False),
- ('local-hostname', ('instance/hostname',), True, True, False),
- ('instance-data', ('instance/attributes',), False, False, True),
- ('project-data', ('project/attributes',), False, False, True),
+ ("instance-id", ("instance/id",), True, True, False),
+ ("availability-zone", ("instance/zone",), True, True, False),
+ ("local-hostname", ("instance/hostname",), True, True, False),
+ ("instance-data", ("instance/attributes",), False, False, True),
+ ("project-data", ("project/attributes",), False, False, True),
]
-
- metadata_fetcher = GoogleMetadataFetcher(address)
+ metadata_fetcher = GoogleMetadataFetcher(
+ address, url_params.num_retries, url_params.sec_between_retries
+ )
md = {}
# Iterate over url_map keys to get metadata items.
for (mkey, paths, required, is_text, is_recursive) in url_map:
@@ -215,56 +251,58 @@ def read_md(address=None, platform_check=True):
value = new_value
if required and value is None:
msg = "required key %s returned nothing. not GCE"
- ret['reason'] = msg % mkey
+ ret["reason"] = msg % mkey
return ret
md[mkey] = value
- instance_data = json.loads(md['instance-data'] or '{}')
- project_data = json.loads(md['project-data'] or '{}')
- valid_keys = [instance_data.get('sshKeys'), instance_data.get('ssh-keys')]
- block_project = instance_data.get('block-project-ssh-keys', '').lower()
- if block_project != 'true' and not instance_data.get('sshKeys'):
- valid_keys.append(project_data.get('ssh-keys'))
- valid_keys.append(project_data.get('sshKeys'))
- public_keys_data = '\n'.join([key for key in valid_keys if key])
- md['public-keys-data'] = public_keys_data.splitlines()
+ instance_data = json.loads(md["instance-data"] or "{}")
+ project_data = json.loads(md["project-data"] or "{}")
+ valid_keys = [instance_data.get("sshKeys"), instance_data.get("ssh-keys")]
+ block_project = instance_data.get("block-project-ssh-keys", "").lower()
+ if block_project != "true" and not instance_data.get("sshKeys"):
+ valid_keys.append(project_data.get("ssh-keys"))
+ valid_keys.append(project_data.get("sshKeys"))
+ public_keys_data = "\n".join([key for key in valid_keys if key])
+ md["public-keys-data"] = public_keys_data.splitlines()
- if md['availability-zone']:
- md['availability-zone'] = md['availability-zone'].split('/')[-1]
+ if md["availability-zone"]:
+ md["availability-zone"] = md["availability-zone"].split("/")[-1]
- if 'user-data' in instance_data:
+ if "user-data" in instance_data:
# instance_data was json, so values are all utf-8 strings.
- ud = instance_data['user-data'].encode("utf-8")
- encoding = instance_data.get('user-data-encoding')
- if encoding == 'base64':
+ ud = instance_data["user-data"].encode("utf-8")
+ encoding = instance_data.get("user-data-encoding")
+ if encoding == "base64":
ud = b64decode(ud)
elif encoding:
- LOG.warning('unknown user-data-encoding: %s, ignoring', encoding)
- ret['user-data'] = ud
+ LOG.warning("unknown user-data-encoding: %s, ignoring", encoding)
+ ret["user-data"] = ud
- ret['meta-data'] = md
- ret['success'] = True
+ ret["meta-data"] = md
+ ret["success"] = True
return ret
def platform_reports_gce():
- pname = dmi.read_dmi_data('system-product-name') or "N/A"
- if pname == "Google Compute Engine":
+ pname = dmi.read_dmi_data("system-product-name") or "N/A"
+ if pname == "Google Compute Engine" or pname == "Google":
return True
# system-product-name is not always guaranteed (LP: #1674861)
- serial = dmi.read_dmi_data('system-serial-number') or "N/A"
+ serial = dmi.read_dmi_data("system-serial-number") or "N/A"
if serial.startswith("GoogleCloud-"):
return True
- LOG.debug("Not running on google cloud. product-name=%s serial=%s",
- pname, serial)
+ LOG.debug(
+ "Not running on google cloud. product-name=%s serial=%s", pname, serial
+ )
return False
# Used to match classes to dependencies.
datasources = [
+ (DataSourceGCELocal, (sources.DEP_FILESYSTEM,)),
(DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
@@ -277,31 +315,38 @@ def get_datasource_list(depends):
if __name__ == "__main__":
import argparse
import sys
-
from base64 import b64encode
- parser = argparse.ArgumentParser(description='Query GCE Metadata Service')
- parser.add_argument("--endpoint", metavar="URL",
- help="The url of the metadata service.",
- default=MD_V1_URL)
- parser.add_argument("--no-platform-check", dest="platform_check",
- help="Ignore smbios platform check",
- action='store_false', default=True)
+ parser = argparse.ArgumentParser(description="Query GCE Metadata Service")
+ parser.add_argument(
+ "--endpoint",
+ metavar="URL",
+ help="The url of the metadata service.",
+ default=MD_V1_URL,
+ )
+ parser.add_argument(
+ "--no-platform-check",
+ dest="platform_check",
+ help="Ignore smbios platform check",
+ action="store_false",
+ default=True,
+ )
args = parser.parse_args()
data = read_md(address=args.endpoint, platform_check=args.platform_check)
- if 'user-data' in data:
+ if "user-data" in data:
# user-data is bytes not string like other things. Handle it specially.
# If it can be represented as utf-8 then do so. Otherwise print base64
# encoded value in the key user-data-b64.
try:
- data['user-data'] = data['user-data'].decode()
+ data["user-data"] = data["user-data"].decode()
except UnicodeDecodeError:
- sys.stderr.write("User-data cannot be decoded. "
- "Writing as base64\n")
- del data['user-data']
+ sys.stderr.write(
+ "User-data cannot be decoded. Writing as base64\n"
+ )
+ del data["user-data"]
# b64encode returns a bytes value. Decode to get the string.
- data['user-data-b64'] = b64encode(data['user-data']).decode()
+ data["user-data-b64"] = b64encode(data["user-data"]).decode()
- print(json.dumps(data, indent=1, sort_keys=True, separators=(',', ': ')))
+ print(json.dumps(data, indent=1, sort_keys=True, separators=(",", ": ")))
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py
index c7c88dd7..91a6f9c9 100644
--- a/cloudinit/sources/DataSourceHetzner.py
+++ b/cloudinit/sources/DataSourceHetzner.py
@@ -6,21 +6,19 @@
"""Hetzner Cloud API Documentation
https://docs.hetzner.cloud/"""
+import cloudinit.sources.helpers.hetzner as hc_helper
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import net as cloudnet
-from cloudinit import sources
-from cloudinit import util
-
-import cloudinit.sources.helpers.hetzner as hc_helper
+from cloudinit import net, sources, util
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
LOG = logging.getLogger(__name__)
-BASE_URL_V1 = 'http://169.254.169.254/hetzner/v1'
+BASE_URL_V1 = "http://169.254.169.254/hetzner/v1"
BUILTIN_DS_CONFIG = {
- 'metadata_url': BASE_URL_V1 + '/metadata',
- 'userdata_url': BASE_URL_V1 + '/userdata',
+ "metadata_url": BASE_URL_V1 + "/metadata",
+ "userdata_url": BASE_URL_V1 + "/userdata",
}
MD_RETRIES = 60
@@ -30,21 +28,24 @@ MD_WAIT_RETRY = 2
class DataSourceHetzner(sources.DataSource):
- dsname = 'Hetzner'
+ dsname = "Hetzner"
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.distro = distro
self.metadata = dict()
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}),
- BUILTIN_DS_CONFIG])
- self.metadata_address = self.ds_cfg['metadata_url']
- self.userdata_address = self.ds_cfg['userdata_url']
- self.retries = self.ds_cfg.get('retries', MD_RETRIES)
- self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT)
- self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY)
- self._network_config = None
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.userdata_address = self.ds_cfg["userdata_url"]
+ self.retries = self.ds_cfg.get("retries", MD_RETRIES)
+ self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT)
+ self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY)
+ self._network_config = sources.UNSET
self.dsmode = sources.DSMODE_NETWORK
def _get_data(self):
@@ -53,15 +54,28 @@ class DataSourceHetzner(sources.DataSource):
if not on_hetzner:
return False
- nic = cloudnet.find_fallback_nic()
- with cloudnet.EphemeralIPv4Network(nic, "169.254.0.1", 16,
- "169.254.255.255"):
- md = hc_helper.read_metadata(
- self.metadata_address, timeout=self.timeout,
- sec_between=self.wait_retry, retries=self.retries)
- ud = hc_helper.read_userdata(
- self.userdata_address, timeout=self.timeout,
- sec_between=self.wait_retry, retries=self.retries)
+ try:
+ with EphemeralDHCPv4(
+ iface=net.find_fallback_nic(),
+ connectivity_url_data={
+ "url": BASE_URL_V1 + "/metadata/instance-id",
+ },
+ ):
+ md = hc_helper.read_metadata(
+ self.metadata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
+ ud = hc_helper.read_userdata(
+ self.userdata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
+ except (NoDHCPLeaseError) as e:
+ LOG.error("Bailing, DHCP Exception: %s", e)
+ raise
# Hetzner cloud does not support binary user-data. So here, do a
# base64 decode of the data if we can. The end result being that a
@@ -76,10 +90,10 @@ class DataSourceHetzner(sources.DataSource):
# hostname is name provided by user at launch. The API enforces it is
# a valid hostname, but it is not guaranteed to be resolvable in dns or
# fully qualified.
- self.metadata['instance-id'] = md['instance-id']
- self.metadata['local-hostname'] = md['hostname']
- self.metadata['network-config'] = md.get('network-config', None)
- self.metadata['public-keys'] = md.get('public-keys', None)
+ self.metadata["instance-id"] = md["instance-id"]
+ self.metadata["local-hostname"] = md["hostname"]
+ self.metadata["network-config"] = md.get("network-config", None)
+ self.metadata["public-keys"] = md.get("public-keys", None)
self.vendordata_raw = md.get("vendor_data", None)
# instance-id and serial from SMBIOS should be identical
@@ -92,19 +106,27 @@ class DataSourceHetzner(sources.DataSource):
def check_instance_id(self, sys_cfg):
return sources.instance_id_matches_system_uuid(
- self.get_instance_id(), 'system-serial-number')
+ self.get_instance_id(), "system-serial-number"
+ )
@property
def network_config(self):
"""Configure the networking. This needs to be done each boot, since
- the IP information may have changed due to snapshot and/or
- migration.
+ the IP information may have changed due to snapshot and/or
+ migration.
"""
- if self._network_config:
+ if self._network_config is None:
+ LOG.warning(
+ "Found None as cached _network_config. Resetting to %s",
+ sources.UNSET,
+ )
+ self._network_config = sources.UNSET
+
+ if self._network_config != sources.UNSET:
return self._network_config
- _net_config = self.metadata['network-config']
+ _net_config = self.metadata["network-config"]
if not _net_config:
raise Exception("Unable to get meta-data from server....")
@@ -114,7 +136,7 @@ class DataSourceHetzner(sources.DataSource):
def get_hcloud_data():
- vendor_name = dmi.read_dmi_data('system-manufacturer')
+ vendor_name = dmi.read_dmi_data("system-manufacturer")
if vendor_name != "Hetzner":
return (False, None)
@@ -129,7 +151,7 @@ def get_hcloud_data():
# Used to match classes to dependencies
datasources = [
- (DataSourceHetzner, (sources.DEP_FILESYSTEM, )),
+ (DataSourceHetzner, (sources.DEP_FILESYSTEM,)),
]
@@ -137,4 +159,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py
index 8d196185..18c3848f 100644
--- a/cloudinit/sources/DataSourceIBMCloud.py
+++ b/cloudinit/sources/DataSourceIBMCloud.py
@@ -97,10 +97,8 @@ import json
import os
from cloudinit import log as logging
-from cloudinit import sources
+from cloudinit import sources, subp, util
from cloudinit.sources.helpers import openstack
-from cloudinit import subp
-from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -117,12 +115,13 @@ class Platforms(object):
PROVISIONING = (
Platforms.TEMPLATE_PROVISIONING_METADATA,
- Platforms.TEMPLATE_PROVISIONING_NODATA)
+ Platforms.TEMPLATE_PROVISIONING_NODATA,
+)
class DataSourceIBMCloud(sources.DataSource):
- dsname = 'IBMCloud'
+ dsname = "IBMCloud"
system_uuid = None
def __init__(self, sys_cfg, distro, paths):
@@ -142,14 +141,14 @@ class DataSourceIBMCloud(sources.DataSource):
if results is None:
return False
- self.source = results['source']
- self.platform = results['platform']
- self.metadata = results['metadata']
- self.userdata_raw = results.get('userdata')
- self.network_json = results.get('networkdata')
- vd = results.get('vendordata')
+ self.source = results["source"]
+ self.platform = results["platform"]
+ self.metadata = results["metadata"]
+ self.userdata_raw = results.get("userdata")
+ self.network_json = results.get("networkdata")
+ vd = results.get("vendordata")
self.vendordata_pure = vd
- self.system_uuid = results['system-uuid']
+ self.system_uuid = results["system-uuid"]
try:
self.vendordata_raw = sources.convert_vendordata(vd)
except ValueError as e:
@@ -160,7 +159,7 @@ class DataSourceIBMCloud(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return '%s (%s)' % (self.platform, self.source)
+ return "%s (%s)" % (self.platform, self.source)
def check_instance_id(self, sys_cfg):
"""quickly (local check only) if self.instance_id is still valid
@@ -177,12 +176,13 @@ class DataSourceIBMCloud(sources.DataSource):
if self.platform != Platforms.OS_CODE:
# If deployed from template, an agent in the provisioning
# environment handles networking configuration. Not cloud-init.
- return {'config': 'disabled', 'version': 1}
+ return {"config": "disabled", "version": 1}
if self._network_config is None:
if self.network_json is not None:
LOG.debug("network config provided via network_json")
self._network_config = openstack.convert_net_json(
- self.network_json, known_macs=None)
+ self.network_json, known_macs=None
+ )
else:
LOG.debug("no network configuration available.")
return self._network_config
@@ -200,22 +200,28 @@ def _is_xen():
def _is_ibm_provisioning(
- prov_cfg="/root/provisioningConfiguration.cfg",
- inst_log="/root/swinstall.log",
- boot_ref="/proc/1/environ"):
+ prov_cfg="/root/provisioningConfiguration.cfg",
+ inst_log="/root/swinstall.log",
+ boot_ref="/proc/1/environ",
+):
"""Return boolean indicating if this boot is ibm provisioning boot."""
if os.path.exists(prov_cfg):
msg = "config '%s' exists." % prov_cfg
result = True
if os.path.exists(inst_log):
if os.path.exists(boot_ref):
- result = (os.stat(inst_log).st_mtime >
- os.stat(boot_ref).st_mtime)
- msg += (" log '%s' from %s boot." %
- (inst_log, "current" if result else "previous"))
+ result = (
+ os.stat(inst_log).st_mtime > os.stat(boot_ref).st_mtime
+ )
+ msg += " log '%s' from %s boot." % (
+ inst_log,
+ "current" if result else "previous",
+ )
else:
- msg += (" log '%s' existed, but no reference file '%s'." %
- (inst_log, boot_ref))
+ msg += " log '%s' existed, but no reference file '%s'." % (
+ inst_log,
+ boot_ref,
+ )
result = False
else:
msg += " log '%s' did not exist." % inst_log
@@ -252,17 +258,26 @@ def get_ibm_platform():
if label not in (label_mdata, label_cfg2):
continue
if label in fslabels:
- LOG.warning("Duplicate fslabel '%s'. existing=%s current=%s",
- label, fslabels[label], data)
+ LOG.warning(
+ "Duplicate fslabel '%s'. existing=%s current=%s",
+ label,
+ fslabels[label],
+ data,
+ )
continue
if label == label_cfg2 and uuid != IBM_CONFIG_UUID:
- LOG.debug("Skipping %s with LABEL=%s due to uuid != %s: %s",
- dev, label, uuid, data)
+ LOG.debug(
+ "Skipping %s with LABEL=%s due to uuid != %s: %s",
+ dev,
+ label,
+ uuid,
+ data,
+ )
continue
fslabels[label] = data
- metadata_path = fslabels.get(label_mdata, {}).get('DEVNAME')
- cfg2_path = fslabels.get(label_cfg2, {}).get('DEVNAME')
+ metadata_path = fslabels.get(label_mdata, {}).get("DEVNAME")
+ cfg2_path = fslabels.get(label_cfg2, {}).get("DEVNAME")
if cfg2_path:
return (Platforms.OS_CODE, cfg2_path)
@@ -288,12 +303,14 @@ def read_md():
LOG.debug("This is not an IBMCloud platform.")
return None
elif platform in PROVISIONING:
- LOG.debug("Cloud-init is disabled during provisioning: %s.",
- platform)
+ LOG.debug("Cloud-init is disabled during provisioning: %s.", platform)
return None
- ret = {'platform': platform, 'source': path,
- 'system-uuid': _read_system_uuid()}
+ ret = {
+ "platform": platform,
+ "source": path,
+ "system-uuid": _read_system_uuid(),
+ }
try:
if os.path.isdir(path):
@@ -302,8 +319,8 @@ def read_md():
results = util.mount_cb(path, metadata_from_dir)
except sources.BrokenMetadata as e:
raise RuntimeError(
- "Failed reading IBM config disk (platform=%s path=%s): %s" %
- (platform, path, e)
+ "Failed reading IBM config disk (platform=%s path=%s): %s"
+ % (platform, path, e)
) from e
ret.update(results)
@@ -329,14 +346,14 @@ def metadata_from_dir(source_dir):
return os.path.join("openstack", "latest", fname)
def load_json_bytes(blob):
- return json.loads(blob.decode('utf-8'))
+ return json.loads(blob.decode("utf-8"))
files = [
# tuples of (results_name, path, translator)
- ('metadata_raw', opath('meta_data.json'), load_json_bytes),
- ('userdata', opath('user_data'), None),
- ('vendordata', opath('vendor_data.json'), load_json_bytes),
- ('networkdata', opath('network_data.json'), load_json_bytes),
+ ("metadata_raw", opath("meta_data.json"), load_json_bytes),
+ ("userdata", opath("user_data"), None),
+ ("vendordata", opath("vendor_data.json"), load_json_bytes),
+ ("networkdata", opath("network_data.json"), load_json_bytes),
]
results = {}
@@ -355,28 +372,33 @@ def metadata_from_dir(source_dir):
data = transl(raw)
except Exception as e:
raise sources.BrokenMetadata(
- "Failed decoding %s: %s" % (path, e))
+ "Failed decoding %s: %s" % (path, e)
+ )
results[name] = data
- if results.get('metadata_raw') is None:
+ if results.get("metadata_raw") is None:
raise sources.BrokenMetadata(
- "%s missing required file 'meta_data.json'" % source_dir)
+ "%s missing required file 'meta_data.json'" % source_dir
+ )
- results['metadata'] = {}
+ results["metadata"] = {}
- md_raw = results['metadata_raw']
- md = results['metadata']
- if 'random_seed' in md_raw:
+ md_raw = results["metadata_raw"]
+ md = results["metadata"]
+ if "random_seed" in md_raw:
try:
- md['random_seed'] = base64.b64decode(md_raw['random_seed'])
+ md["random_seed"] = base64.b64decode(md_raw["random_seed"])
except (ValueError, TypeError) as e:
raise sources.BrokenMetadata(
- "Badly formatted metadata random_seed entry: %s" % e)
+ "Badly formatted metadata random_seed entry: %s" % e
+ )
renames = (
- ('public_keys', 'public-keys'), ('hostname', 'local-hostname'),
- ('uuid', 'instance-id'))
+ ("public_keys", "public-keys"),
+ ("hostname", "local-hostname"),
+ ("uuid", "instance-id"),
+ )
for mdname, newname in renames:
if mdname in md_raw:
md[newname] = md_raw[mdname]
@@ -398,7 +420,7 @@ def get_datasource_list(depends):
if __name__ == "__main__":
import argparse
- parser = argparse.ArgumentParser(description='Query IBM Cloud Metadata')
+ parser = argparse.ArgumentParser(description="Query IBM Cloud Metadata")
args = parser.parse_args()
data = read_md()
print(util.json_dumps(data))
diff --git a/cloudinit/sources/DataSourceLXD.py b/cloudinit/sources/DataSourceLXD.py
new file mode 100644
index 00000000..071ea87c
--- /dev/null
+++ b/cloudinit/sources/DataSourceLXD.py
@@ -0,0 +1,392 @@
+"""Datasource for LXD, reads /dev/lxd/sock representaton of instance data.
+
+Notes:
+ * This datasource replaces previous NoCloud datasource for LXD.
+ * Older LXD images may not have updates for cloud-init so NoCloud may
+ still be detected on those images.
+ * Detect LXD datasource when /dev/lxd/sock is an active socket file.
+ * Info on dev-lxd API: https://linuxcontainers.org/lxd/docs/master/dev-lxd
+ * TODO( Hotplug support using websockets API 1.0/events )
+"""
+
+import os
+import socket
+import stat
+from json.decoder import JSONDecodeError
+
+import requests
+from requests.adapters import HTTPAdapter
+
+# pylint fails to import the two modules below.
+# These are imported via requests.packages rather than urllib3 because:
+# a.) the provider of the requests package should ensure that urllib3
+# contained in it is consistent/correct.
+# b.) cloud-init does not specifically have a dependency on urllib3
+#
+# For future reference, see:
+# https://github.com/kennethreitz/requests/pull/2375
+# https://github.com/requests/requests/issues/4104
+# pylint: disable=E0401
+from requests.packages.urllib3.connection import HTTPConnection
+from requests.packages.urllib3.connectionpool import HTTPConnectionPool
+
+from cloudinit import log as logging
+from cloudinit import sources, subp, util
+
+LOG = logging.getLogger(__name__)
+
+LXD_SOCKET_PATH = "/dev/lxd/sock"
+LXD_SOCKET_API_VERSION = "1.0"
+
+# Config key mappings to alias as top-level instance data keys
+CONFIG_KEY_ALIASES = {
+ "cloud-init.user-data": "user-data",
+ "cloud-init.network-config": "network-config",
+ "cloud-init.vendor-data": "vendor-data",
+ "user.user-data": "user-data",
+ "user.network-config": "network-config",
+ "user.vendor-data": "vendor-data",
+}
+
+
+def generate_fallback_network_config() -> dict:
+ """Return network config V1 dict representing instance network config."""
+ network_v1 = {
+ "version": 1,
+ "config": [
+ {
+ "type": "physical",
+ "name": "eth0",
+ "subnets": [{"type": "dhcp", "control": "auto"}],
+ }
+ ],
+ }
+ if subp.which("systemd-detect-virt"):
+ try:
+ virt_type, _ = subp.subp(["systemd-detect-virt"])
+ except subp.ProcessExecutionError as err:
+ LOG.warning(
+ "Unable to run systemd-detect-virt: %s."
+ " Rendering default network config.",
+ err,
+ )
+ return network_v1
+ if virt_type.strip() == "kvm": # instance.type VIRTUAL-MACHINE
+ arch = util.system_info()["uname"][4]
+ if arch == "ppc64le":
+ network_v1["config"][0]["name"] = "enp0s5"
+ elif arch == "s390x":
+ network_v1["config"][0]["name"] = "enc9"
+ else:
+ network_v1["config"][0]["name"] = "enp5s0"
+ return network_v1
+
+
+class SocketHTTPConnection(HTTPConnection):
+ def __init__(self, socket_path):
+ super().__init__("localhost")
+ self.socket_path = socket_path
+
+ def connect(self):
+ self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.sock.connect(self.socket_path)
+
+
+class SocketConnectionPool(HTTPConnectionPool):
+ def __init__(self, socket_path):
+ self.socket_path = socket_path
+ super().__init__("localhost")
+
+ def _new_conn(self):
+ return SocketHTTPConnection(self.socket_path)
+
+
+class LXDSocketAdapter(HTTPAdapter):
+ def get_connection(self, url, proxies=None):
+ return SocketConnectionPool(LXD_SOCKET_PATH)
+
+
+def _maybe_remove_top_network(cfg):
+ """If network-config contains top level 'network' key, then remove it.
+
+ Some providers of network configuration may provide a top level
+ 'network' key (LP: #1798117) even though it is not necessary.
+
+ Be friendly and remove it if it really seems so.
+
+ Return the original value if no change or the updated value if changed."""
+ if "network" not in cfg:
+ return cfg
+ network_val = cfg["network"]
+ bmsg = "Top level network key in network-config %s: %s"
+ if not isinstance(network_val, dict):
+ LOG.debug(bmsg, "was not a dict", cfg)
+ return cfg
+ if len(list(cfg.keys())) != 1:
+ LOG.debug(bmsg, "had multiple top level keys", cfg)
+ return cfg
+ if network_val.get("config") == "disabled":
+ LOG.debug(bmsg, "was config/disabled", cfg)
+ elif not all(("config" in network_val, "version" in network_val)):
+ LOG.debug(bmsg, "but missing 'config' or 'version'", cfg)
+ return cfg
+ LOG.debug(bmsg, "fixed by removing shifting network.", cfg)
+ return network_val
+
+
+def _raw_instance_data_to_dict(metadata_type: str, metadata_value) -> dict:
+ """Convert raw instance data from str, bytes, YAML to dict
+
+ :param metadata_type: string, one of as: meta-data, vendor-data, user-data
+ network-config
+
+ :param metadata_value: str, bytes or dict representing or instance-data.
+
+ :raises: InvalidMetaDataError on invalid instance-data content.
+ """
+ if isinstance(metadata_value, dict):
+ return metadata_value
+ if metadata_value is None:
+ return {}
+ try:
+ parsed_metadata = util.load_yaml(metadata_value)
+ except AttributeError as exc: # not str or bytes
+ raise sources.InvalidMetaDataException(
+ "Invalid {md_type}. Expected str, bytes or dict but found:"
+ " {value}".format(md_type=metadata_type, value=metadata_value)
+ ) from exc
+ if parsed_metadata is None:
+ raise sources.InvalidMetaDataException(
+ "Invalid {md_type} format. Expected YAML but found:"
+ " {value}".format(md_type=metadata_type, value=metadata_value)
+ )
+ return parsed_metadata
+
+
+class DataSourceLXD(sources.DataSource):
+
+ dsname = "LXD"
+
+ _network_config = sources.UNSET
+ _crawled_metadata = sources.UNSET
+
+ sensitive_metadata_keys = (
+ "merged_cfg",
+ "user.meta-data",
+ "user.vendor-data",
+ "user.user-data",
+ )
+
+ def _is_platform_viable(self) -> bool:
+ """Check platform environment to report if this datasource may run."""
+ return is_platform_viable()
+
+ def _get_data(self) -> bool:
+ """Crawl LXD socket API instance data and return True on success"""
+ if not self._is_platform_viable():
+ LOG.debug("Not an LXD datasource: No LXD socket found.")
+ return False
+
+ self._crawled_metadata = util.log_time(
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=read_metadata,
+ )
+ self.metadata = _raw_instance_data_to_dict(
+ "meta-data", self._crawled_metadata.get("meta-data")
+ )
+ config = self._crawled_metadata.get("config", {})
+ user_metadata = config.get("user.meta-data", {})
+ if user_metadata:
+ user_metadata = _raw_instance_data_to_dict(
+ "user.meta-data", user_metadata
+ )
+ if not isinstance(self.metadata, dict):
+ self.metadata = util.mergemanydict(
+ [util.load_yaml(self.metadata), user_metadata]
+ )
+ if "user-data" in self._crawled_metadata:
+ self.userdata_raw = self._crawled_metadata["user-data"]
+ if "network-config" in self._crawled_metadata:
+ self._network_config = _maybe_remove_top_network(
+ _raw_instance_data_to_dict(
+ "network-config", self._crawled_metadata["network-config"]
+ )
+ )
+ if "vendor-data" in self._crawled_metadata:
+ self.vendordata_raw = self._crawled_metadata["vendor-data"]
+ return True
+
+ def _get_subplatform(self) -> str:
+ """Return subplatform details for this datasource"""
+ return "LXD socket API v. {ver} ({socket})".format(
+ ver=LXD_SOCKET_API_VERSION, socket=LXD_SOCKET_PATH
+ )
+
+ def check_instance_id(self, sys_cfg) -> str:
+ """Return True if instance_id unchanged."""
+ response = read_metadata(metadata_only=True)
+ md = response.get("meta-data", {})
+ if not isinstance(md, dict):
+ md = util.load_yaml(md)
+ return md.get("instance-id") == self.metadata.get("instance-id")
+
+ @property
+ def network_config(self) -> dict:
+ """Network config read from LXD socket config/user.network-config.
+
+ If none is present, then we generate fallback configuration.
+ """
+ if self._network_config == sources.UNSET:
+ if self._crawled_metadata.get("network-config"):
+ self._network_config = self._crawled_metadata.get(
+ "network-config"
+ )
+ else:
+ self._network_config = generate_fallback_network_config()
+ return self._network_config
+
+
+def is_platform_viable() -> bool:
+ """Return True when this platform appears to have an LXD socket."""
+ if os.path.exists(LXD_SOCKET_PATH):
+ return stat.S_ISSOCK(os.lstat(LXD_SOCKET_PATH).st_mode)
+ return False
+
+
+def read_metadata(
+ api_version: str = LXD_SOCKET_API_VERSION, metadata_only: bool = False
+) -> dict:
+ """Fetch metadata from the /dev/lxd/socket routes.
+
+ Perform a number of HTTP GETs on known routes on the devlxd socket API.
+ Minimally all containers must respond to http://lxd/1.0/meta-data when
+ the LXD configuration setting `security.devlxd` is true.
+
+ When `security.devlxd` is false, no /dev/lxd/socket file exists. This
+ datasource will return False from `is_platform_viable` in that case.
+
+ Perform a GET of <LXD_SOCKET_API_VERSION>/config` and walk all `user.*`
+ configuration keys, storing all keys and values under a dict key
+ LXD_SOCKET_API_VERSION: config {...}.
+
+ In the presence of the following optional user config keys,
+ create top level aliases:
+ - user.user-data -> user-data
+ - user.vendor-data -> vendor-data
+ - user.network-config -> network-config
+
+ :return:
+ A dict with the following mandatory key: meta-data.
+ Optional keys: user-data, vendor-data, network-config, network_mode
+
+ Below <LXD_SOCKET_API_VERSION> is a dict representation of all raw
+ configuration keys and values provided to the container surfaced by
+ the socket under the /1.0/config/ route.
+ """
+ md = {}
+ lxd_url = "http://lxd"
+ version_url = lxd_url + "/" + api_version + "/"
+ with requests.Session() as session:
+ session.mount(version_url, LXDSocketAdapter())
+ # Raw meta-data as text
+ md_route = "{route}meta-data".format(route=version_url)
+ response = session.get(md_route)
+ LOG.debug("[GET] [HTTP:%d] %s", response.status_code, md_route)
+ if not response.ok:
+ raise sources.InvalidMetaDataException(
+ "Invalid HTTP response [{code}] from {route}: {resp}".format(
+ code=response.status_code,
+ route=md_route,
+ resp=response.text,
+ )
+ )
+
+ md["meta-data"] = response.text
+ if metadata_only:
+ return md # Skip network-data, vendor-data, user-data
+
+ md = {
+ "_metadata_api_version": api_version, # Document API version read
+ "config": {},
+ "meta-data": md["meta-data"],
+ }
+
+ config_url = version_url + "config"
+ # Represent all advertized/available config routes under
+ # the dict path {LXD_SOCKET_API_VERSION: {config: {...}}.
+ response = session.get(config_url)
+ LOG.debug("[GET] [HTTP:%d] %s", response.status_code, config_url)
+ if not response.ok:
+ raise sources.InvalidMetaDataException(
+ "Invalid HTTP response [{code}] from {route}: {resp}".format(
+ code=response.status_code,
+ route=config_url,
+ resp=response.text,
+ )
+ )
+ try:
+ config_routes = response.json()
+ except JSONDecodeError as exc:
+ raise sources.InvalidMetaDataException(
+ "Unable to determine cloud-init config from {route}."
+ " Expected JSON but found: {resp}".format(
+ route=config_url, resp=response.text
+ )
+ ) from exc
+
+ # Sorting keys to ensure we always process in alphabetical order.
+ # cloud-init.* keys will sort before user.* keys which is preferred
+ # precedence.
+ for config_route in sorted(config_routes):
+ url = "http://lxd{route}".format(route=config_route)
+ response = session.get(url)
+ LOG.debug("[GET] [HTTP:%d] %s", response.status_code, url)
+ if response.ok:
+ cfg_key = config_route.rpartition("/")[-1]
+ # Leave raw data values/format unchanged to represent it in
+ # instance-data.json for cloud-init query or jinja template
+ # use.
+ md["config"][cfg_key] = response.text
+ # Promote common CONFIG_KEY_ALIASES to top-level keys.
+ if cfg_key in CONFIG_KEY_ALIASES:
+ # Due to sort of config_routes, promote cloud-init.*
+ # aliases before user.*. This allows user.* keys to act as
+ # fallback config on old LXD, with new cloud-init images.
+ if CONFIG_KEY_ALIASES[cfg_key] not in md:
+ md[CONFIG_KEY_ALIASES[cfg_key]] = response.text
+ else:
+ LOG.warning(
+ "Ignoring LXD config %s in favor of %s value.",
+ cfg_key,
+ cfg_key.replace("user", "cloud-init", 1),
+ )
+ else:
+ LOG.debug(
+ "Skipping %s on [HTTP:%d]:%s",
+ url,
+ response.status_code,
+ response.text,
+ )
+ return md
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceLXD, (sources.DEP_FILESYSTEM,)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+if __name__ == "__main__":
+ import argparse
+
+ description = """Query LXD metadata and emit a JSON object."""
+ parser = argparse.ArgumentParser(description=description)
+ parser.parse_args()
+ print(util.json_dumps(read_metadata()))
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 9156925f..d554db0d 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -11,20 +11,18 @@ import os
import time
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import sources, url_helper, util
LOG = logging.getLogger(__name__)
MD_VERSION = "2012-03-01"
DS_FIELDS = [
# remote path, location in dictionary, binary data?, optional?
- ("meta-data/instance-id", 'meta-data/instance-id', False, False),
- ("meta-data/local-hostname", 'meta-data/local-hostname', False, False),
- ("meta-data/public-keys", 'meta-data/public-keys', False, True),
- ('meta-data/vendor-data', 'vendor-data', True, True),
- ('user-data', 'user-data', True, True),
+ ("meta-data/instance-id", "meta-data/instance-id", False, False),
+ ("meta-data/local-hostname", "meta-data/local-hostname", False, False),
+ ("meta-data/public-keys", "meta-data/public-keys", False, True),
+ ("meta-data/vendor-data", "vendor-data", True, True),
+ ("user-data", "user-data", True, True),
]
@@ -46,7 +44,7 @@ class DataSourceMAAS(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.base_url = None
- self.seed_dir = os.path.join(paths.seed_dir, 'maas')
+ self.seed_dir = os.path.join(paths.seed_dir, "maas")
self.id_hash = get_id_from_ds_cfg(self.ds_cfg)
@property
@@ -72,7 +70,7 @@ class DataSourceMAAS(sources.DataSource):
raise
# If there is no metadata_url, then we're not configured
- url = mcfg.get('metadata_url', None)
+ url = mcfg.get("metadata_url", None)
if not url:
return False
@@ -85,9 +83,14 @@ class DataSourceMAAS(sources.DataSource):
return False
self._set_data(
- url, read_maas_seed_url(
- url, read_file_or_url=self.oauth_helper.readurl,
- paths=self.paths, retries=1))
+ url,
+ read_maas_seed_url(
+ url,
+ read_file_or_url=self.oauth_helper.readurl,
+ paths=self.paths,
+ retries=1,
+ ),
+ )
return True
except Exception:
util.logexc(LOG, "Failed fetching metadata from url %s", url)
@@ -109,7 +112,7 @@ class DataSourceMAAS(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'seed-dir (%s)' % self.base_url
+ return "seed-dir (%s)" % self.base_url
def wait_for_metadata_service(self, url):
mcfg = self.ds_cfg
@@ -135,13 +138,17 @@ class DataSourceMAAS(sources.DataSource):
check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
urls = [check_url]
url, _response = self.oauth_helper.wait_for_url(
- urls=urls, max_wait=max_wait, timeout=timeout)
+ urls=urls, max_wait=max_wait, timeout=timeout
+ )
if url:
LOG.debug("Using metadata source: '%s'", url)
else:
- LOG.critical("Giving up on md from %s after %i seconds",
- urls, int(time.time() - starttime))
+ LOG.critical(
+ "Giving up on md from %s after %i seconds",
+ urls,
+ int(time.time() - starttime),
+ )
return bool(url)
@@ -154,26 +161,26 @@ class DataSourceMAAS(sources.DataSource):
if self.id_hash is None:
return False
ncfg = util.get_cfg_by_path(sys_cfg, ("datasource", self.dsname), {})
- return (self.id_hash == get_id_from_ds_cfg(ncfg))
+ return self.id_hash == get_id_from_ds_cfg(ncfg)
def get_oauth_helper(cfg):
"""Return an oauth helper instance for values in cfg.
- @raises ValueError from OauthUrlHelper if some required fields have
- true-ish values but others do not."""
- keys = ('consumer_key', 'consumer_secret', 'token_key', 'token_secret')
+ @raises ValueError from OauthUrlHelper if some required fields have
+ true-ish values but others do not."""
+ keys = ("consumer_key", "consumer_secret", "token_key", "token_secret")
kwargs = dict([(r, cfg.get(r)) for r in keys])
return url_helper.OauthUrlHelper(**kwargs)
def get_id_from_ds_cfg(ds_cfg):
"""Given a config, generate a unique identifier for this node."""
- fields = ('consumer_key', 'token_key', 'token_secret')
- idstr = '\0'.join([ds_cfg.get(f, "") for f in fields])
+ fields = ("consumer_key", "token_key", "token_secret")
+ idstr = "\0".join([ds_cfg.get(f, "") for f in fields])
# store the encoding version as part of the hash in the event
# that it ever changed we can compute older versions.
- return 'v1:' + hashlib.sha256(idstr.encode('utf-8')).hexdigest()
+ return "v1:" + hashlib.sha256(idstr.encode("utf-8")).hexdigest()
def read_maas_seed_dir(seed_d):
@@ -186,8 +193,14 @@ def read_maas_seed_dir(seed_d):
return read_maas_seed_url("file://%s" % seed_d, version=None)
-def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
- version=MD_VERSION, paths=None, retries=None):
+def read_maas_seed_url(
+ seed_url,
+ read_file_or_url=None,
+ timeout=None,
+ version=MD_VERSION,
+ paths=None,
+ retries=None,
+):
"""
Read the maas datasource at seed_url.
read_file_or_url is a method that should provide an interface
@@ -213,16 +226,20 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
url = "%s/%s/%s" % (seed_url, version, path)
try:
ssl_details = util.fetch_ssl_details(paths)
- resp = read_file_or_url(url, retries=retries, timeout=timeout,
- ssl_details=ssl_details)
+ resp = read_file_or_url(
+ url, retries=retries, timeout=timeout, ssl_details=ssl_details
+ )
if resp.ok():
if binary:
md[path] = resp.contents
else:
md[path] = util.decode_binary(resp.contents)
else:
- LOG.warning(("Fetching from %s resulted in"
- " an invalid http code %s"), url, resp.code)
+ LOG.warning(
+ "Fetching from %s resulted in an invalid http code %s",
+ url,
+ resp.code,
+ )
except url_helper.UrlError as e:
if e.code == 404 and not optional:
raise MAASSeedDirMalformed(
@@ -236,8 +253,8 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
def check_seed_contents(content, seed):
"""Validate if dictionary content valid as a return for a datasource.
- Either return a (userdata, metadata, vendordata) tuple or
- Raise MAASSeedDirMalformed or MAASSeedDirNone
+ Either return a (userdata, metadata, vendordata) tuple or
+ Raise MAASSeedDirMalformed or MAASSeedDirNone
"""
ret = {}
missing = []
@@ -262,14 +279,15 @@ def check_seed_contents(content, seed):
raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
vd_data = None
- if ret.get('vendor-data'):
+ if ret.get("vendor-data"):
err = object()
- vd_data = util.load_yaml(ret.get('vendor-data'), default=err,
- allowed=(object))
+ vd_data = util.load_yaml(
+ ret.get("vendor-data"), default=err, allowed=(object)
+ )
if vd_data is err:
raise MAASSeedDirMalformed("vendor-data was not loadable as yaml.")
- return ret.get('user-data'), ret.get('meta-data'), vd_data
+ return ret.get("user-data"), ret.get("meta-data"), vd_data
class MAASSeedDirNone(Exception):
@@ -292,6 +310,7 @@ def get_datasource_list(depends):
if __name__ == "__main__":
+
def main():
"""
Call with single argument of directory or http or https url.
@@ -302,36 +321,66 @@ if __name__ == "__main__":
import pprint
import sys
- parser = argparse.ArgumentParser(description='Interact with MAAS DS')
- parser.add_argument("--config", metavar="file",
- help="specify DS config file", default=None)
- parser.add_argument("--ckey", metavar="key",
- help="the consumer key to auth with", default=None)
- parser.add_argument("--tkey", metavar="key",
- help="the token key to auth with", default=None)
- parser.add_argument("--csec", metavar="secret",
- help="the consumer secret (likely '')", default="")
- parser.add_argument("--tsec", metavar="secret",
- help="the token secret to auth with", default=None)
- parser.add_argument("--apiver", metavar="version",
- help="the apiver to use ("" can be used)",
- default=MD_VERSION)
+ parser = argparse.ArgumentParser(description="Interact with MAAS DS")
+ parser.add_argument(
+ "--config",
+ metavar="file",
+ help="specify DS config file",
+ default=None,
+ )
+ parser.add_argument(
+ "--ckey",
+ metavar="key",
+ help="the consumer key to auth with",
+ default=None,
+ )
+ parser.add_argument(
+ "--tkey",
+ metavar="key",
+ help="the token key to auth with",
+ default=None,
+ )
+ parser.add_argument(
+ "--csec",
+ metavar="secret",
+ help="the consumer secret (likely '')",
+ default="",
+ )
+ parser.add_argument(
+ "--tsec",
+ metavar="secret",
+ help="the token secret to auth with",
+ default=None,
+ )
+ parser.add_argument(
+ "--apiver",
+ metavar="version",
+ help="the apiver to use ( can be used)",
+ default=MD_VERSION,
+ )
subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
- for (name, help) in (('crawl', 'crawl the datasource'),
- ('get', 'do a single GET of provided url'),
- ('check-seed', 'read and verify seed at url')):
+ for (name, help) in (
+ ("crawl", "crawl the datasource"),
+ ("get", "do a single GET of provided url"),
+ ("check-seed", "read and verify seed at url"),
+ ):
p = subcmds.add_parser(name, help=help)
- p.add_argument("url", help="the datasource url", nargs='?',
- default=None)
+ p.add_argument(
+ "url", help="the datasource url", nargs="?", default=None
+ )
args = parser.parse_args()
- creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
- 'token_secret': args.tsec, 'consumer_secret': args.csec}
+ creds = {
+ "consumer_key": args.ckey,
+ "token_key": args.tkey,
+ "token_secret": args.tsec,
+ "consumer_secret": args.csec,
+ }
if args.config is None:
- for fname in ('91_kernel_cmdline_url', '90_dpkg_maas'):
+ for fname in ("91_kernel_cmdline_url", "90_dpkg_maas"):
fpath = "/etc/cloud/cloud.cfg.d/" + fname + ".cfg"
if os.path.exists(fpath) and os.access(fpath, os.R_OK):
sys.stderr.write("Used config in %s.\n" % fpath)
@@ -339,13 +388,13 @@ if __name__ == "__main__":
if args.config:
cfg = util.read_conf(args.config)
- if 'datasource' in cfg:
- cfg = cfg['datasource']['MAAS']
+ if "datasource" in cfg:
+ cfg = cfg["datasource"]["MAAS"]
for key in creds.keys():
if key in cfg and creds[key] is None:
creds[key] = cfg[key]
- if args.url is None and 'metadata_url' in cfg:
- args.url = cfg['metadata_url']
+ if args.url is None and "metadata_url" in cfg:
+ args.url = cfg["metadata_url"]
if args.url is None:
sys.stderr.write("Must provide a url or a config with url.\n")
@@ -380,8 +429,11 @@ if __name__ == "__main__":
(userdata, metadata, vd) = read_maas_seed_dir(args.url)
else:
(userdata, metadata, vd) = read_maas_seed_url(
- args.url, version=args.apiver, read_file_or_url=readurl,
- retries=2)
+ args.url,
+ version=args.apiver,
+ read_file_or_url=readurl,
+ retries=2,
+ )
print("=== user-data ===")
print("N/A" if userdata is None else userdata.decode())
print("=== meta-data ===")
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index a126aad3..56559630 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -13,9 +13,8 @@ import os
from cloudinit import dmi
from cloudinit import log as logging
+from cloudinit import sources, util
from cloudinit.net import eni
-from cloudinit import sources
-from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -27,8 +26,10 @@ class DataSourceNoCloud(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
- self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'),
- os.path.join(paths.seed_dir, 'nocloud-net')]
+ self.seed_dirs = [
+ os.path.join(paths.seed_dir, "nocloud"),
+ os.path.join(paths.seed_dir, "nocloud-net"),
+ ]
self.seed_dir = None
self.supported_seed_starts = ("/", "file://")
@@ -55,17 +56,21 @@ class DataSourceNoCloud(sources.DataSource):
}
found = []
- mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
- 'network-config': None}
+ mydata = {
+ "meta-data": {},
+ "user-data": "",
+ "vendor-data": "",
+ "network-config": None,
+ }
try:
# Parse the system serial label from dmi. If not empty, try parsing
# like the commandline
md = {}
- serial = dmi.read_dmi_data('system-serial-number')
+ serial = dmi.read_dmi_data("system-serial-number")
if serial and load_cmdline_data(md, serial):
found.append("dmi")
- mydata = _merge_new_seed(mydata, {'meta-data': md})
+ mydata = _merge_new_seed(mydata, {"meta-data": md})
except Exception:
util.logexc(LOG, "Unable to parse dmi data")
return False
@@ -75,14 +80,16 @@ class DataSourceNoCloud(sources.DataSource):
md = {}
if load_cmdline_data(md):
found.append("cmdline")
- mydata = _merge_new_seed(mydata, {'meta-data': md})
+ mydata = _merge_new_seed(mydata, {"meta-data": md})
except Exception:
util.logexc(LOG, "Unable to parse command line data")
return False
# Check to see if the seed dir has data.
- pp2d_kwargs = {'required': ['user-data', 'meta-data'],
- 'optional': ['vendor-data', 'network-config']}
+ pp2d_kwargs = {
+ "required": ["user-data", "meta-data"],
+ "optional": ["vendor-data", "network-config"],
+ }
for path in self.seed_dirs:
try:
@@ -97,31 +104,35 @@ class DataSourceNoCloud(sources.DataSource):
# If the datasource config had a 'seedfrom' entry, then that takes
# precedence over a 'seedfrom' that was found in a filesystem
# but not over external media
- if self.ds_cfg.get('seedfrom'):
+ if self.ds_cfg.get("seedfrom"):
found.append("ds_config_seedfrom")
- mydata['meta-data']["seedfrom"] = self.ds_cfg['seedfrom']
+ mydata["meta-data"]["seedfrom"] = self.ds_cfg["seedfrom"]
# fields appropriately named can also just come from the datasource
# config (ie, 'user-data', 'meta-data', 'vendor-data' there)
- if 'user-data' in self.ds_cfg and 'meta-data' in self.ds_cfg:
+ if "user-data" in self.ds_cfg and "meta-data" in self.ds_cfg:
mydata = _merge_new_seed(mydata, self.ds_cfg)
found.append("ds_config")
def _pp2d_callback(mp, data):
return util.pathprefix2dict(mp, **data)
- label = self.ds_cfg.get('fs_label', "cidata")
+ label = self.ds_cfg.get("fs_label", "cidata")
if label is not None:
for dev in self._get_devices(label):
try:
LOG.debug("Attempting to use data from %s", dev)
try:
- seeded = util.mount_cb(dev, _pp2d_callback,
- pp2d_kwargs)
+ seeded = util.mount_cb(
+ dev, _pp2d_callback, pp2d_kwargs
+ )
except ValueError:
- LOG.warning("device %s with label=%s not a "
- "valid seed.", dev, label)
+ LOG.warning(
+ "device %s with label=%s not a valid seed.",
+ dev,
+ label,
+ )
continue
mydata = _merge_new_seed(mydata, seeded)
@@ -133,8 +144,9 @@ class DataSourceNoCloud(sources.DataSource):
if e.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for "
- "data", dev)
+ util.logexc(
+ LOG, "Failed to mount %s when looking for data", dev
+ )
# There was no indication on kernel cmdline or data
# in the seeddir suggesting this handler should be used.
@@ -145,8 +157,8 @@ class DataSourceNoCloud(sources.DataSource):
# attempt to seed the userdata / metadata from its value
# its primarily value is in allowing the user to type less
# on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
- if "seedfrom" in mydata['meta-data']:
- seedfrom = mydata['meta-data']["seedfrom"]
+ if "seedfrom" in mydata["meta-data"]:
+ seedfrom = mydata["meta-data"]["seedfrom"]
seedfound = False
for proto in self.supported_seed_starts:
if seedfrom.startswith(proto):
@@ -162,39 +174,43 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Using seeded cache data from %s", seedfrom)
# Values in the command line override those from the seed
- mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
- md_seed])
- mydata['user-data'] = ud
- mydata['vendor-data'] = vd
+ mydata["meta-data"] = util.mergemanydict(
+ [mydata["meta-data"], md_seed]
+ )
+ mydata["user-data"] = ud
+ mydata["vendor-data"] = vd
found.append(seedfrom)
# Now that we have exhausted any other places merge in the defaults
- mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
- defaults])
+ mydata["meta-data"] = util.mergemanydict(
+ [mydata["meta-data"], defaults]
+ )
self.dsmode = self._determine_dsmode(
- [mydata['meta-data'].get('dsmode')])
+ [mydata["meta-data"].get("dsmode")]
+ )
if self.dsmode == sources.DSMODE_DISABLED:
- LOG.debug("%s: not claiming datasource, dsmode=%s", self,
- self.dsmode)
+ LOG.debug(
+ "%s: not claiming datasource, dsmode=%s", self, self.dsmode
+ )
return False
self.seed = ",".join(found)
- self.metadata = mydata['meta-data']
- self.userdata_raw = mydata['user-data']
- self.vendordata_raw = mydata['vendor-data']
- self._network_config = mydata['network-config']
- self._network_eni = mydata['meta-data'].get('network-interfaces')
+ self.metadata = mydata["meta-data"]
+ self.userdata_raw = mydata["user-data"]
+ self.vendordata_raw = mydata["vendor-data"]
+ self._network_config = mydata["network-config"]
+ self._network_eni = mydata["meta-data"].get("network-interfaces")
return True
@property
def platform_type(self):
# Handle upgrade path of pickled ds
- if not hasattr(self, '_platform_type'):
+ if not hasattr(self, "_platform_type"):
self._platform_type = None
if not self._platform_type:
- self._platform_type = 'lxd' if util.is_lxd() else 'nocloud'
+ self._platform_type = "lxd" if util.is_lxd() else "nocloud"
return self._platform_type
def _get_cloud_name(self):
@@ -203,11 +219,11 @@ class DataSourceNoCloud(sources.DataSource):
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- if self.seed.startswith('/dev'):
- subplatform_type = 'config-disk'
+ if self.seed.startswith("/dev"):
+ subplatform_type = "config-disk"
else:
- subplatform_type = 'seed-dir'
- return '%s (%s)' % (subplatform_type, self.seed)
+ subplatform_type = "seed-dir"
+ return "%s (%s)" % (subplatform_type, self.seed)
def check_instance_id(self, sys_cfg):
# quickly (local check only) if self.instance_id is still valid
@@ -218,7 +234,7 @@ class DataSourceNoCloud(sources.DataSource):
# LP: #1568150 need getattr in the case that an old class object
# has been loaded from a pickled file and now executing new source.
- dirs = getattr(self, 'seed_dirs', [self.seed_dir])
+ dirs = getattr(self, "seed_dirs", [self.seed_dir])
quick_id = _quick_read_instance_id(dirs=dirs)
if not quick_id:
return None
@@ -236,7 +252,7 @@ def _quick_read_instance_id(dirs=None):
if dirs is None:
dirs = []
- iid_key = 'instance-id'
+ iid_key = "instance-id"
fill = {}
if load_cmdline_data(fill) and iid_key in fill:
return fill[iid_key]
@@ -245,9 +261,9 @@ def _quick_read_instance_id(dirs=None):
if d is None:
continue
try:
- data = util.pathprefix2dict(d, required=['meta-data'])
- md = util.load_yaml(data['meta-data'])
- if iid_key in md:
+ data = util.pathprefix2dict(d, required=["meta-data"])
+ md = util.load_yaml(data["meta-data"])
+ if md and iid_key in md:
return md[iid_key]
except ValueError:
pass
@@ -256,14 +272,16 @@ def _quick_read_instance_id(dirs=None):
def load_cmdline_data(fill, cmdline=None):
- pairs = [("ds=nocloud", sources.DSMODE_LOCAL),
- ("ds=nocloud-net", sources.DSMODE_NETWORK)]
+ pairs = [
+ ("ds=nocloud", sources.DSMODE_LOCAL),
+ ("ds=nocloud-net", sources.DSMODE_NETWORK),
+ ]
for idstr, dsmode in pairs:
if parse_cmdline_data(idstr, fill, cmdline):
# if dsmode was explicitly in the command line, then
# prefer it to the dsmode based on the command line id
- if 'dsmode' not in fill:
- fill['dsmode'] = dsmode
+ if "dsmode" not in fill:
+ fill["dsmode"] = dsmode
return True
return False
@@ -323,19 +341,19 @@ def _maybe_remove_top_network(cfg):
Return the original value if no change or the updated value if changed."""
nullval = object()
- network_val = cfg.get('network', nullval)
+ network_val = cfg.get("network", nullval)
if network_val is nullval:
return cfg
- bmsg = 'Top level network key in network-config %s: %s'
+ bmsg = "Top level network key in network-config %s: %s"
if not isinstance(network_val, dict):
LOG.debug(bmsg, "was not a dict", cfg)
return cfg
if len(list(cfg.keys())) != 1:
LOG.debug(bmsg, "had multiple top level keys", cfg)
return cfg
- if network_val.get('config') == "disabled":
+ if network_val.get("config") == "disabled":
LOG.debug(bmsg, "was config/disabled", cfg)
- elif not all(('config' in network_val, 'version' in network_val)):
+ elif not all(("config" in network_val, "version" in network_val)):
LOG.debug(bmsg, "but missing 'config' or 'version'", cfg)
return cfg
LOG.debug(bmsg, "fixed by removing shifting network.", cfg)
@@ -345,19 +363,20 @@ def _maybe_remove_top_network(cfg):
def _merge_new_seed(cur, seeded):
ret = cur.copy()
- newmd = seeded.get('meta-data', {})
- if not isinstance(seeded['meta-data'], dict):
- newmd = util.load_yaml(seeded['meta-data'])
- ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd])
+ newmd = seeded.get("meta-data", {})
+ if not isinstance(seeded["meta-data"], dict):
+ newmd = util.load_yaml(seeded["meta-data"])
+ ret["meta-data"] = util.mergemanydict([cur["meta-data"], newmd])
- if seeded.get('network-config'):
- ret['network-config'] = _maybe_remove_top_network(
- util.load_yaml(seeded.get('network-config')))
+ if seeded.get("network-config"):
+ ret["network-config"] = _maybe_remove_top_network(
+ util.load_yaml(seeded.get("network-config"))
+ )
- if 'user-data' in seeded:
- ret['user-data'] = seeded['user-data']
- if 'vendor-data' in seeded:
- ret['vendor-data'] = seeded['vendor-data']
+ if "user-data" in seeded:
+ ret["user-data"] = seeded["user-data"]
+ if "vendor-data" in seeded:
+ ret["vendor-data"] = seeded["vendor-data"]
return ret
@@ -369,7 +388,7 @@ class DataSourceNoCloudNet(DataSourceNoCloud):
# Used to match classes to dependencies
datasources = [
- (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
+ (DataSourceNoCloud, (sources.DEP_FILESYSTEM,)),
(DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
@@ -378,4 +397,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
index b7656ac5..036d00b2 100644
--- a/cloudinit/sources/DataSourceNone.py
+++ b/cloudinit/sources/DataSourceNone.py
@@ -14,23 +14,23 @@ class DataSourceNone(sources.DataSource):
def __init__(self, sys_cfg, distro, paths, ud_proc=None):
sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
self.metadata = {}
- self.userdata_raw = ''
+ self.userdata_raw = ""
def _get_data(self):
# If the datasource config has any provided 'fallback'
# userdata or metadata, use it...
- if 'userdata_raw' in self.ds_cfg:
- self.userdata_raw = self.ds_cfg['userdata_raw']
- if 'metadata' in self.ds_cfg:
- self.metadata = self.ds_cfg['metadata']
+ if "userdata_raw" in self.ds_cfg:
+ self.userdata_raw = self.ds_cfg["userdata_raw"]
+ if "metadata" in self.ds_cfg:
+ self.metadata = self.ds_cfg["metadata"]
return True
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
- return 'config'
+ return "config"
def get_instance_id(self):
- return 'iid-datasource-none'
+ return "iid-datasource-none"
@property
def is_disconnected(self):
@@ -48,4 +48,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 741c140a..0df39824 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -16,37 +16,39 @@ from xml.dom import minidom
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-from cloudinit.sources.helpers.vmware.imc.config \
- import Config
-from cloudinit.sources.helpers.vmware.imc.config_custom_script \
- import PreCustomScript, PostCustomScript
-from cloudinit.sources.helpers.vmware.imc.config_file \
- import ConfigFile
-from cloudinit.sources.helpers.vmware.imc.config_nic \
- import NicConfigurator
-from cloudinit.sources.helpers.vmware.imc.config_passwd \
- import PasswordConfigurator
-from cloudinit.sources.helpers.vmware.imc.guestcust_error \
- import GuestCustErrorEnum
-from cloudinit.sources.helpers.vmware.imc.guestcust_event \
- import GuestCustEventEnum as GuestCustEvent
-from cloudinit.sources.helpers.vmware.imc.guestcust_state \
- import GuestCustStateEnum
+from cloudinit import safeyaml, sources, subp, util
+from cloudinit.sources.helpers.vmware.imc.config import Config
+from cloudinit.sources.helpers.vmware.imc.config_custom_script import (
+ PostCustomScript,
+ PreCustomScript,
+)
+from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
+from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator
+from cloudinit.sources.helpers.vmware.imc.config_passwd import (
+ PasswordConfigurator,
+)
+from cloudinit.sources.helpers.vmware.imc.guestcust_error import (
+ GuestCustErrorEnum,
+)
+from cloudinit.sources.helpers.vmware.imc.guestcust_event import (
+ GuestCustEventEnum as GuestCustEvent,
+)
+from cloudinit.sources.helpers.vmware.imc.guestcust_state import (
+ GuestCustStateEnum,
+)
from cloudinit.sources.helpers.vmware.imc.guestcust_util import (
enable_nics,
get_nics_to_enable,
- set_customization_status,
get_tools_config,
- set_gc_status
+ set_customization_status,
+ set_gc_status,
)
LOG = logging.getLogger(__name__)
CONFGROUPNAME_GUESTCUSTOMIZATION = "deployPkg"
GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS = "enable-custom-scripts"
+VMWARE_IMC_DIR = "/var/run/vmware-imc"
class DataSourceOVF(sources.DataSource):
@@ -56,7 +58,7 @@ class DataSourceOVF(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
- self.seed_dir = os.path.join(paths.seed_dir, 'ovf')
+ self.seed_dir = os.path.join(paths.seed_dir, "ovf")
self.environment = None
self.cfg = {}
self.supported_seed_starts = ("/", "file://")
@@ -94,64 +96,152 @@ class DataSourceOVF(sources.DataSource):
(md, ud, cfg) = read_ovf_environment(contents)
self.environment = contents
found.append(seed)
- elif system_type and 'vmware' in system_type.lower():
+ elif system_type and "vmware" in system_type.lower():
LOG.debug("VMware Virtualization Platform found")
+ allow_vmware_cust = False
+ allow_raw_data = False
if not self.vmware_customization_supported:
- LOG.debug("Skipping the check for "
- "VMware Customization support")
- elif not util.get_cfg_option_bool(
- self.sys_cfg, "disable_vmware_customization", True):
-
+ LOG.debug(
+ "Skipping the check for VMware Customization support"
+ )
+ else:
+ allow_vmware_cust = not util.get_cfg_option_bool(
+ self.sys_cfg, "disable_vmware_customization", True
+ )
+ allow_raw_data = util.get_cfg_option_bool(
+ self.ds_cfg, "allow_raw_data", True
+ )
+
+ if not (allow_vmware_cust or allow_raw_data):
+ LOG.debug("Customization for VMware platform is disabled.")
+ else:
search_paths = (
- "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools",
- "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools")
+ "/usr/lib/vmware-tools",
+ "/usr/lib64/vmware-tools",
+ "/usr/lib/open-vm-tools",
+ "/usr/lib64/open-vm-tools",
+ "/usr/lib/x86_64-linux-gnu/open-vm-tools",
+ "/usr/lib/aarch64-linux-gnu/open-vm-tools",
+ )
plugin = "libdeployPkgPlugin.so"
deployPkgPluginPath = None
for path in search_paths:
deployPkgPluginPath = search_file(path, plugin)
if deployPkgPluginPath:
- LOG.debug("Found the customization plugin at %s",
- deployPkgPluginPath)
+ LOG.debug(
+ "Found the customization plugin at %s",
+ deployPkgPluginPath,
+ )
break
if deployPkgPluginPath:
# When the VM is powered on, the "VMware Tools" daemon
# copies the customization specification file to
# /var/run/vmware-imc directory. cloud-init code needs
- # to search for the file in that directory.
+ # to search for the file in that directory which indicates
+ # that required metadata and userdata files are now
+ # present.
max_wait = get_max_wait_from_cfg(self.ds_cfg)
vmwareImcConfigFilePath = util.log_time(
logfunc=LOG.debug,
msg="waiting for configuration file",
func=wait_for_imc_cfg_file,
- args=("cust.cfg", max_wait))
+ args=("cust.cfg", max_wait),
+ )
else:
LOG.debug("Did not find the customization plugin.")
+ md_path = None
if vmwareImcConfigFilePath:
- LOG.debug("Found VMware Customization Config File at %s",
- vmwareImcConfigFilePath)
- nicspath = wait_for_imc_cfg_file(
- filename="nics.txt", maxwait=10, naplen=5)
+ imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
+ cf = ConfigFile(vmwareImcConfigFilePath)
+ self._vmware_cust_conf = Config(cf)
+ LOG.debug(
+ "Found VMware Customization Config File at %s",
+ vmwareImcConfigFilePath,
+ )
+ try:
+ (md_path, ud_path, nicspath) = collect_imc_file_paths(
+ self._vmware_cust_conf
+ )
+ except FileNotFoundError as e:
+ _raise_error_status(
+ "File(s) missing in directory",
+ e,
+ GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf,
+ )
+ # Don't handle the customization for below 2 cases:
+ # 1. meta data is found, allow_raw_data is False.
+ # 2. no meta data is found, allow_vmware_cust is False.
+ if md_path and not allow_raw_data:
+ LOG.debug("Customization using raw data is disabled.")
+ # reset vmwareImcConfigFilePath to None to avoid
+ # customization for VMware platform
+ vmwareImcConfigFilePath = None
+ if md_path is None and not allow_vmware_cust:
+ LOG.debug(
+ "Customization using VMware config is disabled."
+ )
+ vmwareImcConfigFilePath = None
else:
LOG.debug("Did not find VMware Customization Config File")
- else:
- LOG.debug("Customization for VMware platform is disabled.")
- if vmwareImcConfigFilePath:
+ use_raw_data = bool(vmwareImcConfigFilePath and md_path)
+ if use_raw_data:
+ set_gc_status(self._vmware_cust_conf, "Started")
+ LOG.debug("Start to load cloud-init meta data and user data")
+ try:
+ (md, ud, cfg, network) = load_cloudinit_data(md_path, ud_path)
+
+ if network:
+ self._network_config = network
+ else:
+ self._network_config = (
+ self.distro.generate_fallback_config()
+ )
+
+ except safeyaml.YAMLError as e:
+ _raise_error_status(
+ "Error parsing the cloud-init meta data",
+ e,
+ GuestCustErrorEnum.GUESTCUST_ERROR_WRONG_META_FORMAT,
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf,
+ )
+ except Exception as e:
+ _raise_error_status(
+ "Error loading cloud-init configuration",
+ e,
+ GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
+ vmwareImcConfigFilePath,
+ self._vmware_cust_conf,
+ )
+
+ self._vmware_cust_found = True
+ found.append("vmware-tools")
+
+ util.del_dir(imcdirpath)
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_DONE,
+ GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS,
+ )
+ set_gc_status(self._vmware_cust_conf, "Successful")
+
+ elif vmwareImcConfigFilePath:
+ # Load configuration from vmware_imc
self._vmware_nics_to_enable = ""
try:
- cf = ConfigFile(vmwareImcConfigFilePath)
- self._vmware_cust_conf = Config(cf)
set_gc_status(self._vmware_cust_conf, "Started")
(md, ud, cfg) = read_vmware_imc(self._vmware_cust_conf)
self._vmware_nics_to_enable = get_nics_to_enable(nicspath)
- imcdirpath = os.path.dirname(vmwareImcConfigFilePath)
product_marker = self._vmware_cust_conf.marker_id
hasmarkerfile = check_marker_exists(
- product_marker, os.path.join(self.paths.cloud_dir, 'data'))
+ product_marker, os.path.join(self.paths.cloud_dir, "data")
+ )
special_customization = product_marker and not hasmarkerfile
customscript = self._vmware_cust_conf.custom_script_name
@@ -169,7 +259,8 @@ class DataSourceOVF(sources.DataSource):
custScriptConfig = get_tools_config(
CONFGROUPNAME_GUESTCUSTOMIZATION,
GUESTCUSTOMIZATION_ENABLE_CUST_SCRIPTS,
- defVal)
+ defVal,
+ )
if custScriptConfig.lower() != "true":
# Update the customization status if custom script
# is disabled
@@ -177,19 +268,21 @@ class DataSourceOVF(sources.DataSource):
LOG.debug(msg)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED)
+ GuestCustErrorEnum.GUESTCUST_ERROR_SCRIPT_DISABLED,
+ )
raise RuntimeError(msg)
ccScriptsDir = os.path.join(
- self.paths.get_cpath("scripts"),
- "per-instance")
+ self.paths.get_cpath("scripts"), "per-instance"
+ )
except Exception as e:
_raise_error_status(
"Error parsing the customization Config File",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if special_customization:
if customscript:
@@ -202,22 +295,22 @@ class DataSourceOVF(sources.DataSource):
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
try:
LOG.debug("Preparing the Network configuration")
self._network_config = get_network_config_from_conf(
- self._vmware_cust_conf,
- True,
- True,
- self.distro.osfamily)
+ self._vmware_cust_conf, True, True, self.distro.osfamily
+ )
except Exception as e:
_raise_error_status(
"Error preparing Network Configuration",
e,
GuestCustEvent.GUESTCUST_EVENT_NETWORK_SETUP_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if special_customization:
LOG.debug("Applying password customization")
@@ -226,8 +319,9 @@ class DataSourceOVF(sources.DataSource):
try:
resetpwd = self._vmware_cust_conf.reset_password
if adminpwd or resetpwd:
- pwdConfigurator.configure(adminpwd, resetpwd,
- self.distro)
+ pwdConfigurator.configure(
+ adminpwd, resetpwd, self.distro
+ )
else:
LOG.debug("Changing password is not needed")
except Exception as e:
@@ -236,13 +330,14 @@ class DataSourceOVF(sources.DataSource):
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if customscript:
try:
- postcust = PostCustomScript(customscript,
- imcdirpath,
- ccScriptsDir)
+ postcust = PostCustomScript(
+ customscript, imcdirpath, ccScriptsDir
+ )
postcust.execute()
except Exception as e:
_raise_error_status(
@@ -250,23 +345,26 @@ class DataSourceOVF(sources.DataSource):
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
if product_marker:
try:
setup_marker_files(
product_marker,
- os.path.join(self.paths.cloud_dir, 'data'))
+ os.path.join(self.paths.cloud_dir, "data"),
+ )
except Exception as e:
_raise_error_status(
"Error creating marker files",
e,
GuestCustEvent.GUESTCUST_EVENT_CUSTOMIZE_FAILED,
vmwareImcConfigFilePath,
- self._vmware_cust_conf)
+ self._vmware_cust_conf,
+ )
self._vmware_cust_found = True
- found.append('vmware-tools')
+ found.append("vmware-tools")
# TODO: Need to set the status to DONE only when the
# customization is done successfully.
@@ -274,36 +372,40 @@ class DataSourceOVF(sources.DataSource):
enable_nics(self._vmware_nics_to_enable)
set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_DONE,
- GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
+ GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS,
+ )
set_gc_status(self._vmware_cust_conf, "Successful")
else:
- np = [('com.vmware.guestInfo', transport_vmware_guestinfo),
- ('iso', transport_iso9660)]
+ np = [
+ ("com.vmware.guestInfo", transport_vmware_guestinfo),
+ ("iso", transport_iso9660),
+ ]
name = None
for name, transfunc in np:
contents = transfunc()
if contents:
break
if contents:
- (md, ud, cfg) = read_ovf_environment(contents)
+ (md, ud, cfg) = read_ovf_environment(contents, True)
self.environment = contents
+ if "network-config" in md and md["network-config"]:
+ self._network_config = md["network-config"]
found.append(name)
# There was no OVF transports found
if len(found) == 0:
return False
- if 'seedfrom' in md and md['seedfrom']:
- seedfrom = md['seedfrom']
+ if "seedfrom" in md and md["seedfrom"]:
+ seedfrom = md["seedfrom"]
seedfound = False
for proto in self.supported_seed_starts:
if seedfrom.startswith(proto):
seedfound = proto
break
if not seedfound:
- LOG.debug("Seed from %s not supported by %s",
- seedfrom, self)
+ LOG.debug("Seed from %s not supported by %s", seedfrom, self)
return False
(md_seed, ud, vd) = util.read_seeded(seedfrom, timeout=None)
@@ -324,14 +426,14 @@ class DataSourceOVF(sources.DataSource):
def _get_subplatform(self):
system_type = dmi.read_dmi_data("system-product-name").lower()
- if system_type == 'vmware':
- return 'vmware (%s)' % self.seed
- return 'ovf (%s)' % self.seed
+ if system_type == "vmware":
+ return "vmware (%s)" % self.seed
+ return "ovf (%s)" % self.seed
def get_public_ssh_keys(self):
- if 'public-keys' not in self.metadata:
+ if "public-keys" not in self.metadata:
return []
- pks = self.metadata['public-keys']
+ pks = self.metadata["public-keys"]
if isinstance(pks, (list)):
return pks
else:
@@ -351,14 +453,14 @@ class DataSourceOVF(sources.DataSource):
class DataSourceOVFNet(DataSourceOVF):
def __init__(self, sys_cfg, distro, paths):
DataSourceOVF.__init__(self, sys_cfg, distro, paths)
- self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net')
+ self.seed_dir = os.path.join(paths.seed_dir, "ovf-net")
self.supported_seed_starts = ("http://", "https://")
self.vmware_customization_supported = False
def get_max_wait_from_cfg(cfg):
- default_max_wait = 90
- max_wait_cfg_option = 'vmware_cust_file_max_wait'
+ default_max_wait = 15
+ max_wait_cfg_option = "vmware_cust_file_max_wait"
max_wait = default_max_wait
if not cfg:
@@ -367,20 +469,30 @@ def get_max_wait_from_cfg(cfg):
try:
max_wait = int(cfg.get(max_wait_cfg_option, default_max_wait))
except ValueError:
- LOG.warning("Failed to get '%s', using %s",
- max_wait_cfg_option, default_max_wait)
-
- if max_wait <= 0:
- LOG.warning("Invalid value '%s' for '%s', using '%s' instead",
- max_wait, max_wait_cfg_option, default_max_wait)
+ LOG.warning(
+ "Failed to get '%s', using %s",
+ max_wait_cfg_option,
+ default_max_wait,
+ )
+
+ if max_wait < 0:
+ LOG.warning(
+ "Invalid value '%s' for '%s', using '%s' instead",
+ max_wait,
+ max_wait_cfg_option,
+ default_max_wait,
+ )
max_wait = default_max_wait
return max_wait
-def wait_for_imc_cfg_file(filename, maxwait=180, naplen=5,
- dirpath="/var/run/vmware-imc"):
+def wait_for_imc_cfg_file(
+ filename, maxwait=180, naplen=5, dirpath="/var/run/vmware-imc"
+):
waited = 0
+ if maxwait <= naplen:
+ naplen = 1
while waited < maxwait:
fileFullPath = os.path.join(dirpath, filename)
@@ -392,24 +504,26 @@ def wait_for_imc_cfg_file(filename, maxwait=180, naplen=5,
return None
-def get_network_config_from_conf(config, use_system_devices=True,
- configure=False, osfamily=None):
+def get_network_config_from_conf(
+ config, use_system_devices=True, configure=False, osfamily=None
+):
nicConfigurator = NicConfigurator(config.nics, use_system_devices)
nics_cfg_list = nicConfigurator.generate(configure, osfamily)
- return get_network_config(nics_cfg_list,
- config.name_servers,
- config.dns_suffixes)
+ return get_network_config(
+ nics_cfg_list, config.name_servers, config.dns_suffixes
+ )
def get_network_config(nics=None, nameservers=None, search=None):
config_list = nics
if nameservers or search:
- config_list.append({'type': 'nameserver', 'address': nameservers,
- 'search': search})
+ config_list.append(
+ {"type": "nameserver", "address": nameservers, "search": search}
+ )
- return {'version': 1, 'config': config_list}
+ return {"version": 1, "config": config_list}
# This will return a dict with some content
@@ -420,33 +534,40 @@ def read_vmware_imc(config):
ud = None
if config.host_name:
if config.domain_name:
- md['local-hostname'] = config.host_name + "." + config.domain_name
+ md["local-hostname"] = config.host_name + "." + config.domain_name
else:
- md['local-hostname'] = config.host_name
+ md["local-hostname"] = config.host_name
if config.timezone:
- cfg['timezone'] = config.timezone
+ cfg["timezone"] = config.timezone
- md['instance-id'] = "iid-vmware-imc"
+ md["instance-id"] = "iid-vmware-imc"
return (md, ud, cfg)
# This will return a dict with some content
# meta-data, user-data, some config
-def read_ovf_environment(contents):
+def read_ovf_environment(contents, read_network=False):
props = get_properties(contents)
md = {}
cfg = {}
ud = None
- cfg_props = ['password']
- md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
+ cfg_props = ["password"]
+ md_props = ["seedfrom", "local-hostname", "public-keys", "instance-id"]
+ network_props = ["network-config"]
for (prop, val) in props.items():
- if prop == 'hostname':
+ if prop == "hostname":
prop = "local-hostname"
if prop in md_props:
md[prop] = val
elif prop in cfg_props:
cfg[prop] = val
+ elif prop in network_props and read_network:
+ try:
+ network_config = base64.b64decode(val.encode())
+ md[prop] = safeload_yaml_or_dict(network_config).get("network")
+ except Exception:
+ LOG.debug("Ignore network-config in wrong format")
elif prop == "user-data":
try:
ud = base64.b64decode(val.encode())
@@ -516,12 +637,12 @@ def transport_iso9660(require_iso=True):
# Go through mounts to see if it was already mounted
mounts = util.mounts()
for (dev, info) in mounts.items():
- fstype = info['fstype']
+ fstype = info["fstype"]
if fstype != "iso9660" and require_iso:
continue
if not maybe_cdrom_device(dev):
continue
- mp = info['mountpoint']
+ mp = info["mountpoint"]
(_fname, contents) = get_ovf_env(mp)
if contents is not False:
return contents
@@ -532,9 +653,11 @@ def transport_iso9660(require_iso=True):
mtype = None
# generate a list of devices with mtype filesystem, filter by regex
- devs = [dev for dev in
- util.find_devs_with("TYPE=%s" % mtype if mtype else None)
- if maybe_cdrom_device(dev)]
+ devs = [
+ dev
+ for dev in util.find_devs_with("TYPE=%s" % mtype if mtype else None)
+ if maybe_cdrom_device(dev)
+ ]
for dev in devs:
try:
(_fname, contents) = util.mount_cb(dev, get_ovf_env, mtype=mtype)
@@ -589,15 +712,17 @@ def get_properties(contents):
# could also check here that elem.namespaceURI ==
# "http://schemas.dmtf.org/ovf/environment/1"
- propSections = find_child(dom.documentElement,
- lambda n: n.localName == "PropertySection")
+ propSections = find_child(
+ dom.documentElement, lambda n: n.localName == "PropertySection"
+ )
if len(propSections) == 0:
raise XmlError("No 'PropertySection's")
props = {}
- propElems = find_child(propSections[0],
- (lambda n: n.localName == "Property"))
+ propElems = find_child(
+ propSections[0], (lambda n: n.localName == "Property")
+ )
for elem in propElems:
key = elem.attributes.getNamedItemNS(envNsURI, "key").value
@@ -624,7 +749,7 @@ class XmlError(Exception):
# Used to match classes to dependencies
datasources = (
- (DataSourceOVF, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOVF, (sources.DEP_FILESYSTEM,)),
(DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
)
@@ -668,7 +793,7 @@ def setup_marker_files(markerid, marker_dir):
for fname in os.listdir(marker_dir):
if fname.startswith(".markerfile"):
util.del_file(os.path.join(marker_dir, fname))
- open(markerfile, 'w').close()
+ open(markerfile, "w").close()
def _raise_error_status(prefix, error, event, config_file, conf):
@@ -676,12 +801,90 @@ def _raise_error_status(prefix, error, event, config_file, conf):
Raise error and send customization status to the underlying VMware
Virtualization Platform. Also, cleanup the imc directory.
"""
- LOG.debug('%s: %s', prefix, error)
- set_customization_status(
- GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
- event)
+ LOG.debug("%s: %s", prefix, error)
+ set_customization_status(GuestCustStateEnum.GUESTCUST_STATE_RUNNING, event)
set_gc_status(conf, prefix)
util.del_dir(os.path.dirname(config_file))
raise error
+
+def load_cloudinit_data(md_path, ud_path):
+ """
+ Load the cloud-init meta data, user data, cfg and network from the
+ given files
+
+ @return: 4-tuple of configuration
+ metadata, userdata, cfg={}, network
+
+ @raises: FileNotFoundError if md_path or ud_path are absent
+ """
+ LOG.debug("load meta data from: %s: user data from: %s", md_path, ud_path)
+ md = {}
+ ud = None
+ network = None
+
+ md = safeload_yaml_or_dict(util.load_file(md_path))
+
+ if "network" in md:
+ network = md["network"]
+
+ if ud_path:
+ ud = util.load_file(ud_path).replace("\r", "")
+ return md, ud, {}, network
+
+
+def safeload_yaml_or_dict(data):
+ """
+ The meta data could be JSON or YAML. Since YAML is a strict superset of
+ JSON, we will unmarshal the data as YAML. If data is None then a new
+ dictionary is returned.
+ """
+ if not data:
+ return {}
+ return safeyaml.load(data)
+
+
+def collect_imc_file_paths(cust_conf):
+ """
+ collect all the other imc files.
+
+ metadata is preferred to nics.txt configuration data.
+
+ If metadata file exists because it is specified in customization
+ configuration, then metadata is required and userdata is optional.
+
+ @return a 3-tuple containing desired configuration file paths if present
+ Expected returns:
+ 1. user provided metadata and userdata (md_path, ud_path, None)
+ 2. user provided metadata (md_path, None, None)
+ 3. user-provided network config (None, None, nics_path)
+ 4. No config found (None, None, None)
+ """
+ md_path = None
+ ud_path = None
+ nics_path = None
+ md_file = cust_conf.meta_data_name
+ if md_file:
+ md_path = os.path.join(VMWARE_IMC_DIR, md_file)
+ if not os.path.exists(md_path):
+ raise FileNotFoundError(
+ "meta data file is not found: %s" % md_path
+ )
+
+ ud_file = cust_conf.user_data_name
+ if ud_file:
+ ud_path = os.path.join(VMWARE_IMC_DIR, ud_file)
+ if not os.path.exists(ud_path):
+ raise FileNotFoundError(
+ "user data file is not found: %s" % ud_path
+ )
+ else:
+ nics_path = os.path.join(VMWARE_IMC_DIR, "nics.txt")
+ if not os.path.exists(nics_path):
+ LOG.debug("%s does not exist.", nics_path)
+ nics_path = None
+
+ return md_path, ud_path, nics_path
+
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 730ec586..e46f920d 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -20,16 +20,12 @@ import re
import string
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-
+from cloudinit import net, sources, subp, util
LOG = logging.getLogger(__name__)
DEFAULT_IID = "iid-dsopennebula"
-DEFAULT_PARSEUSER = 'nobody'
+DEFAULT_PARSEUSER = "nobody"
CONTEXT_DISK_FILES = ["context.sh"]
@@ -40,7 +36,7 @@ class DataSourceOpenNebula(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.seed = None
- self.seed_dir = os.path.join(paths.seed_dir, 'opennebula')
+ self.seed_dir = os.path.join(paths.seed_dir, "opennebula")
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -53,8 +49,8 @@ class DataSourceOpenNebula(sources.DataSource):
# decide parseuser for context.sh shell reader
parseuser = DEFAULT_PARSEUSER
- if 'parseuser' in self.ds_cfg:
- parseuser = self.ds_cfg.get('parseuser')
+ if "parseuser" in self.ds_cfg:
+ parseuser = self.ds_cfg.get("parseuser")
candidates = [self.seed_dir]
candidates.extend(find_candidate_devs())
@@ -90,29 +86,30 @@ class DataSourceOpenNebula(sources.DataSource):
return False
# merge fetched metadata with datasource defaults
- md = results['metadata']
+ md = results["metadata"]
md = util.mergemanydict([md, defaults])
# check for valid user specified dsmode
self.dsmode = self._determine_dsmode(
- [results.get('DSMODE'), self.ds_cfg.get('dsmode')])
+ [results.get("DSMODE"), self.ds_cfg.get("dsmode")]
+ )
if self.dsmode == sources.DSMODE_DISABLED:
return False
self.seed = seed
- self.network = results.get('network-interfaces')
+ self.network = results.get("network-interfaces")
self.metadata = md
- self.userdata_raw = results.get('userdata')
+ self.userdata_raw = results.get("userdata")
return True
def _get_subplatform(self):
"""Return the subplatform metadata source details."""
if self.seed_dir in self.seed:
- subplatform_type = 'seed-dir'
+ subplatform_type = "seed-dir"
else:
- subplatform_type = 'config-disk'
- return '%s (%s)' % (subplatform_type, self.seed)
+ subplatform_type = "config-disk"
+ return "%s (%s)" % (subplatform_type, self.seed)
@property
def network_config(self):
@@ -144,19 +141,25 @@ class OpenNebulaNetwork(object):
if system_nics_by_mac is None:
system_nics_by_mac = get_physical_nics_by_mac(distro)
self.ifaces = collections.OrderedDict(
- [k for k in sorted(system_nics_by_mac.items(),
- key=lambda k: net.natural_sort_key(k[1]))])
+ [
+ k
+ for k in sorted(
+ system_nics_by_mac.items(),
+ key=lambda k: net.natural_sort_key(k[1]),
+ )
+ ]
+ )
# OpenNebula 4.14+ provide macaddr for ETHX in variable ETH_MAC.
# context_devname provides {mac.lower():ETHX, mac2.lower():ETHX}
self.context_devname = {}
for k, v in context.items():
- m = re.match(r'^(.+)_MAC$', k)
+ m = re.match(r"^(.+)_MAC$", k)
if m:
self.context_devname[v.lower()] = m.group(1)
def mac2ip(self, mac):
- return '.'.join([str(int(c, 16)) for c in mac.split(':')[2:]])
+ return ".".join([str(int(c, 16)) for c in mac.split(":")[2:]])
def mac2network(self, mac):
return self.mac2ip(mac).rpartition(".")[0] + ".0"
@@ -164,12 +167,12 @@ class OpenNebulaNetwork(object):
def get_nameservers(self, dev):
nameservers = {}
dns = self.get_field(dev, "dns", "").split()
- dns.extend(self.context.get('DNS', "").split())
+ dns.extend(self.context.get("DNS", "").split())
if dns:
- nameservers['addresses'] = dns
+ nameservers["addresses"] = dns
search_domain = self.get_field(dev, "search_domain", "").split()
if search_domain:
- nameservers['search'] = search_domain
+ nameservers["search"] = search_domain
return nameservers
def get_mtu(self, dev):
@@ -195,7 +198,12 @@ class OpenNebulaNetwork(object):
return self.get_field(dev, "gateway")
def get_gateway6(self, dev):
- return self.get_field(dev, "gateway6")
+ # OpenNebula 6.1.80 introduced new context parameter ETHx_IP6_GATEWAY
+ # to replace old ETHx_GATEWAY6. Old ETHx_GATEWAY6 will be removed in
+ # OpenNebula 6.4.0 (https://github.com/OpenNebula/one/issues/5536).
+ return self.get_field(
+ dev, "ip6_gateway", self.get_field(dev, "gateway6")
+ )
def get_mask(self, dev):
return self.get_field(dev, "mask", "255.255.255.0")
@@ -208,14 +216,21 @@ class OpenNebulaNetwork(object):
context stores <dev>_<NAME> (example: eth0_DOMAIN).
an empty string for value will return default."""
- val = self.context.get('_'.join((dev, name,)).upper())
+ val = self.context.get(
+ "_".join(
+ (
+ dev,
+ name,
+ )
+ ).upper()
+ )
# allow empty string to return the default.
return default if val in (None, "") else val
def gen_conf(self):
netconf = {}
- netconf['version'] = 2
- netconf['ethernets'] = {}
+ netconf["version"] = 2
+ netconf["ethernets"] = {}
ethernets = {}
for mac, dev in self.ifaces.items():
@@ -228,46 +243,46 @@ class OpenNebulaNetwork(object):
devconf = {}
# Set MAC address
- devconf['match'] = {'macaddress': mac}
+ devconf["match"] = {"macaddress": mac}
# Set IPv4 address
- devconf['addresses'] = []
+ devconf["addresses"] = []
mask = self.get_mask(c_dev)
- prefix = str(net.mask_to_net_prefix(mask))
- devconf['addresses'].append(
- self.get_ip(c_dev, mac) + '/' + prefix)
+ prefix = str(net.ipv4_mask_to_net_prefix(mask))
+ devconf["addresses"].append(self.get_ip(c_dev, mac) + "/" + prefix)
# Set IPv6 Global and ULA address
addresses6 = self.get_ip6(c_dev)
if addresses6:
prefix6 = self.get_ip6_prefix(c_dev)
- devconf['addresses'].extend(
- [i + '/' + prefix6 for i in addresses6])
+ devconf["addresses"].extend(
+ [i + "/" + prefix6 for i in addresses6]
+ )
# Set IPv4 default gateway
gateway = self.get_gateway(c_dev)
if gateway:
- devconf['gateway4'] = gateway
+ devconf["gateway4"] = gateway
# Set IPv6 default gateway
gateway6 = self.get_gateway6(c_dev)
if gateway6:
- devconf['gateway6'] = gateway6
+ devconf["gateway6"] = gateway6
# Set DNS servers and search domains
nameservers = self.get_nameservers(c_dev)
if nameservers:
- devconf['nameservers'] = nameservers
+ devconf["nameservers"] = nameservers
# Set MTU size
mtu = self.get_mtu(c_dev)
if mtu:
- devconf['mtu'] = mtu
+ devconf["mtu"] = mtu
ethernets[dev] = devconf
- netconf['ethernets'] = ethernets
- return(netconf)
+ netconf["ethernets"] = ethernets
+ return netconf
def find_candidate_devs():
@@ -275,7 +290,7 @@ def find_candidate_devs():
Return a list of devices that may contain the context disk.
"""
combined = []
- for f in ('LABEL=CONTEXT', 'LABEL=CDROM', 'TYPE=iso9660'):
+ for f in ("LABEL=CONTEXT", "LABEL=CDROM", "TYPE=iso9660"):
devs = util.find_devs_with(f)
devs.sort()
for d in devs:
@@ -286,16 +301,17 @@ def find_candidate_devs():
def switch_user_cmd(user):
- return ['sudo', '-u', user]
+ return ["sudo", "-u", user]
-def parse_shell_config(content, keylist=None, bash=None, asuser=None,
- switch_user_cb=None):
+def parse_shell_config(
+ content, keylist=None, bash=None, asuser=None, switch_user_cb=None
+):
if isinstance(bash, str):
bash = [bash]
elif bash is None:
- bash = ['bash', '-e']
+ bash = ["bash", "-e"]
if switch_user_cb is None:
switch_user_cb = switch_user_cmd
@@ -309,17 +325,24 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
keylist = allvars
keylist_in = []
- setup = '\n'.join(('__v="";', '',))
+ setup = "\n".join(
+ (
+ '__v="";',
+ "",
+ )
+ )
def varprinter(vlist):
# output '\0'.join(['_start_', key=value NULL for vars in vlist]
- return '\n'.join((
- 'printf "%s\\0" _start_',
- 'for __v in %s; do' % ' '.join(vlist),
- ' printf "%s=%s\\0" "$__v" "${!__v}";',
- 'done',
- ''
- ))
+ return "\n".join(
+ (
+ 'printf "%s\\0" _start_',
+ "for __v in %s; do" % " ".join(vlist),
+ ' printf "%s=%s\\0" "$__v" "${!__v}";',
+ "done",
+ "",
+ )
+ )
# the rendered 'bcmd' is bash syntax that does
# setup: declare variables we use (so they show up in 'all')
@@ -332,12 +355,15 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
# key=value (for each preset variable)
# literal '_start_'
# key=value (for each post set variable)
- bcmd = ('unset IFS\n' +
- setup +
- varprinter(allvars) +
- '{\n%s\n\n:\n} > /dev/null\n' % content +
- 'unset IFS\n' +
- varprinter(keylist) + "\n")
+ bcmd = (
+ "unset IFS\n"
+ + setup
+ + varprinter(allvars)
+ + "{\n%s\n\n:\n} > /dev/null\n" % content
+ + "unset IFS\n"
+ + varprinter(keylist)
+ + "\n"
+ )
cmd = []
if asuser is not None:
@@ -349,8 +375,14 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
# exclude vars in bash that change on their own or that we used
excluded = (
- "EPOCHREALTIME", "EPOCHSECONDS", "RANDOM", "LINENO", "SECONDS", "_",
- "SRANDOM", "__v",
+ "EPOCHREALTIME",
+ "EPOCHSECONDS",
+ "RANDOM",
+ "LINENO",
+ "SECONDS",
+ "_",
+ "SRANDOM",
+ "__v",
)
preset = {}
ret = {}
@@ -364,8 +396,9 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
(key, val) = line.split("=", 1)
if target is preset:
preset[key] = val
- elif (key not in excluded and
- (key in keylist_in or preset.get(key) != val)):
+ elif key not in excluded and (
+ key in keylist_in or preset.get(key) != val
+ ):
ret[key] = val
except ValueError:
if line != "_start_":
@@ -394,7 +427,7 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
raise NonContextDiskDir("%s: %s" % (source_dir, "no files found"))
context = {}
- results = {'userdata': None, 'metadata': {}}
+ results = {"userdata": None, "metadata": {}}
if "context.sh" in found:
if asuser is not None:
@@ -403,10 +436,11 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
except KeyError as e:
raise BrokenContextDiskDir(
"configured user '{user}' does not exist".format(
- user=asuser)
+ user=asuser
+ )
) from e
try:
- path = os.path.join(source_dir, 'context.sh')
+ path = os.path.join(source_dir, "context.sh")
content = util.load_file(path)
context = parse_shell_config(content, asuser=asuser)
except subp.ProcessExecutionError as e:
@@ -423,7 +457,7 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
if not context:
return results
- results['metadata'] = context
+ results["metadata"] = context
# process single or multiple SSH keys
ssh_key_var = None
@@ -434,40 +468,41 @@ def read_context_disk_dir(source_dir, distro, asuser=None):
if ssh_key_var:
lines = context.get(ssh_key_var).splitlines()
- results['metadata']['public-keys'] = [
+ results["metadata"]["public-keys"] = [
line for line in lines if len(line) and not line.startswith("#")
]
# custom hostname -- try hostname or leave cloud-init
# itself create hostname from IP address later
- for k in ('HOSTNAME', 'PUBLIC_IP', 'IP_PUBLIC', 'ETH0_IP'):
+ for k in ("SET_HOSTNAME", "HOSTNAME", "PUBLIC_IP", "IP_PUBLIC", "ETH0_IP"):
if k in context:
- results['metadata']['local-hostname'] = context[k]
+ results["metadata"]["local-hostname"] = context[k]
break
# raw user data
if "USER_DATA" in context:
- results['userdata'] = context["USER_DATA"]
+ results["userdata"] = context["USER_DATA"]
elif "USERDATA" in context:
- results['userdata'] = context["USERDATA"]
+ results["userdata"] = context["USERDATA"]
# b64decode user data if necessary (default)
- if 'userdata' in results:
- encoding = context.get('USERDATA_ENCODING',
- context.get('USER_DATA_ENCODING'))
+ if "userdata" in results:
+ encoding = context.get(
+ "USERDATA_ENCODING", context.get("USER_DATA_ENCODING")
+ )
if encoding == "base64":
try:
- results['userdata'] = util.b64d(results['userdata'])
+ results["userdata"] = util.b64d(results["userdata"])
except TypeError:
LOG.warning("Failed base64 decoding of userdata")
# generate Network Configuration v2
# only if there are any required context variables
# http://docs.opennebula.org/5.4/operation/references/template.html#context-section
- ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP.*$', k)]
+ ipaddr_keys = [k for k in context if re.match(r"^ETH\d+_IP.*$", k)]
if ipaddr_keys:
onet = OpenNebulaNetwork(context, distro)
- results['network-interfaces'] = onet.gen_conf()
+ results["network-interfaces"] = onet.gen_conf()
return results
@@ -484,7 +519,7 @@ DataSourceOpenNebulaNet = DataSourceOpenNebula
# Used to match classes to dependencies
datasources = [
- (DataSourceOpenNebula, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOpenNebula, (sources.DEP_FILESYSTEM,)),
]
@@ -492,4 +527,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index b3406c67..6878528d 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -8,13 +8,11 @@ import time
from cloudinit import dmi
from cloudinit import log as logging
+from cloudinit import sources, url_helper, util
+from cloudinit.event import EventScope, EventType
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-
-from cloudinit.sources.helpers import openstack
from cloudinit.sources import DataSourceOracle as oracle
+from cloudinit.sources.helpers import openstack
LOG = logging.getLogger(__name__)
@@ -26,13 +24,13 @@ DEFAULT_METADATA = {
}
# OpenStack DMI constants
-DMI_PRODUCT_NOVA = 'OpenStack Nova'
-DMI_PRODUCT_COMPUTE = 'OpenStack Compute'
+DMI_PRODUCT_NOVA = "OpenStack Nova"
+DMI_PRODUCT_COMPUTE = "OpenStack Compute"
VALID_DMI_PRODUCT_NAMES = [DMI_PRODUCT_NOVA, DMI_PRODUCT_COMPUTE]
-DMI_ASSET_TAG_OPENTELEKOM = 'OpenTelekomCloud'
+DMI_ASSET_TAG_OPENTELEKOM = "OpenTelekomCloud"
# See github.com/sapcc/helm-charts/blob/master/openstack/nova/values.yaml
# -> compute.defaults.vmware.smbios_asset_tag for this value
-DMI_ASSET_TAG_SAPCCLOUD = 'SAP CCloud VM'
+DMI_ASSET_TAG_SAPCCLOUD = "SAP CCloud VM"
VALID_DMI_ASSET_TAGS = VALID_DMI_PRODUCT_NAMES
VALID_DMI_ASSET_TAGS += [DMI_ASSET_TAG_OPENTELEKOM, DMI_ASSET_TAG_SAPCCLOUD]
@@ -46,6 +44,15 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
# Whether we want to get network configuration from the metadata service.
perform_dhcp_setup = False
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
+
def __init__(self, sys_cfg, distro, paths):
super(DataSourceOpenStack, self).__init__(sys_cfg, distro, paths)
self.metadata_address = None
@@ -64,8 +71,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
urls = self.ds_cfg.get("metadata_urls", [DEF_MD_URL])
filtered = [x for x in urls if util.is_resolvable_url(x)]
if set(filtered) != set(urls):
- LOG.debug("Removed the following from metadata urls: %s",
- list((set(urls) - set(filtered))))
+ LOG.debug(
+ "Removed the following from metadata urls: %s",
+ list((set(urls) - set(filtered))),
+ )
if len(filtered):
urls = filtered
else:
@@ -75,20 +84,25 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
md_urls = []
url2base = {}
for url in urls:
- md_url = url_helper.combine_url(url, 'openstack')
+ md_url = url_helper.combine_url(url, "openstack")
md_urls.append(md_url)
url2base[md_url] = url
url_params = self.get_url_params()
start_time = time.time()
avail_url, _response = url_helper.wait_for_url(
- urls=md_urls, max_wait=url_params.max_wait_seconds,
- timeout=url_params.timeout_seconds)
+ urls=md_urls,
+ max_wait=url_params.max_wait_seconds,
+ timeout=url_params.timeout_seconds,
+ )
if avail_url:
LOG.debug("Using metadata source: '%s'", url2base[avail_url])
else:
- LOG.debug("Giving up on OpenStack md from %s after %s seconds",
- md_urls, int(time.time() - start_time))
+ LOG.debug(
+ "Giving up on OpenStack md from %s after %s seconds",
+ md_urls,
+ int(time.time() - start_time),
+ )
self.metadata_address = url2base.get(avail_url)
return bool(avail_url)
@@ -106,18 +120,20 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
# RELEASE_BLOCKER: SRU to Xenial and Artful SRU should not provide
# network_config by default unless configured in /etc/cloud/cloud.cfg*.
# Patch Xenial and Artful before release to default to False.
- if util.is_false(self.ds_cfg.get('apply_network_config', True)):
+ if util.is_false(self.ds_cfg.get("apply_network_config", True)):
self._network_config = None
return self._network_config
if self.network_json == sources.UNSET:
# this would happen if get_data hadn't been called. leave as UNSET
LOG.warning(
- 'Unexpected call to network_config when network_json is None.')
+ "Unexpected call to network_config when network_json is None."
+ )
return None
- LOG.debug('network config provided via network_json')
+ LOG.debug("network config provided via network_json")
self._network_config = openstack.convert_net_json(
- self.network_json, known_macs=None)
+ self.network_json, known_macs=None
+ )
return self._network_config
def _get_data(self):
@@ -127,7 +143,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
False when unable to contact metadata service or when metadata
format is invalid or disabled.
"""
- oracle_considered = 'Oracle' in self.sys_cfg.get('datasource_list')
+ oracle_considered = "Oracle" in self.sys_cfg.get("datasource_list")
if not detect_openstack(accept_oracle=not oracle_considered):
return False
@@ -135,8 +151,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
try:
with EphemeralDHCPv4(self.fallback_interface):
results = util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self._crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self._crawl_metadata,
+ )
except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
util.logexc(LOG, str(e))
return False
@@ -147,19 +165,19 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
util.logexc(LOG, str(e))
return False
- self.dsmode = self._determine_dsmode([results.get('dsmode')])
+ self.dsmode = self._determine_dsmode([results.get("dsmode")])
if self.dsmode == sources.DSMODE_DISABLED:
return False
- md = results.get('metadata', {})
+ md = results.get("metadata", {})
md = util.mergemanydict([md, DEFAULT_METADATA])
self.metadata = md
- self.ec2_metadata = results.get('ec2-metadata')
- self.network_json = results.get('networkdata')
- self.userdata_raw = results.get('userdata')
- self.version = results['version']
- self.files.update(results.get('files', {}))
+ self.ec2_metadata = results.get("ec2-metadata")
+ self.network_json = results.get("networkdata")
+ self.userdata_raw = results.get("userdata")
+ self.version = results["version"]
+ self.files.update(results.get("files", {}))
- vd = results.get('vendordata')
+ vd = results.get("vendordata")
self.vendordata_pure = vd
try:
self.vendordata_raw = sources.convert_vendordata(vd)
@@ -167,6 +185,14 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
LOG.warning("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
+ vd2 = results.get("vendordata2")
+ self.vendordata2_pure = vd2
+ try:
+ self.vendordata2_raw = sources.convert_vendordata(vd2)
+ except ValueError as e:
+ LOG.warning("Invalid content in vendor-data2: %s", e)
+ self.vendordata2_raw = None
+
return True
def _crawl_metadata(self):
@@ -179,26 +205,35 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
try:
if not self.wait_for_metadata_service():
raise sources.InvalidMetaDataException(
- 'No active metadata service found')
+ "No active metadata service found"
+ )
except IOError as e:
raise sources.InvalidMetaDataException(
- 'IOError contacting metadata service: {error}'.format(
- error=str(e)))
+ "IOError contacting metadata service: {error}".format(
+ error=str(e)
+ )
+ )
url_params = self.get_url_params()
try:
result = util.log_time(
- LOG.debug, 'Crawl of openstack metadata service',
- read_metadata_service, args=[self.metadata_address],
- kwargs={'ssl_details': self.ssl_details,
- 'retries': url_params.num_retries,
- 'timeout': url_params.timeout_seconds})
+ LOG.debug,
+ "Crawl of openstack metadata service",
+ read_metadata_service,
+ args=[self.metadata_address],
+ kwargs={
+ "ssl_details": self.ssl_details,
+ "retries": url_params.num_retries,
+ "timeout": url_params.timeout_seconds,
+ },
+ )
except openstack.NonReadable as e:
raise sources.InvalidMetaDataException(str(e))
except (openstack.BrokenMetadata, IOError) as e:
- msg = 'Broken metadata address {addr}'.format(
- addr=self.metadata_address)
+ msg = "Broken metadata address {addr}".format(
+ addr=self.metadata_address
+ )
raise sources.InvalidMetaDataException(msg) from e
return result
@@ -215,10 +250,10 @@ class DataSourceOpenStackLocal(DataSourceOpenStack):
perform_dhcp_setup = True # Get metadata network config if present
-def read_metadata_service(base_url, ssl_details=None,
- timeout=5, retries=5):
- reader = openstack.MetadataReader(base_url, ssl_details=ssl_details,
- timeout=timeout, retries=retries)
+def read_metadata_service(base_url, ssl_details=None, timeout=5, retries=5):
+ reader = openstack.MetadataReader(
+ base_url, ssl_details=ssl_details, timeout=timeout, retries=retries
+ )
return reader.read_v2()
@@ -226,14 +261,14 @@ def detect_openstack(accept_oracle=False):
"""Return True when a potential OpenStack platform is detected."""
if not util.is_x86():
return True # Non-Intel cpus don't properly report dmi product names
- product_name = dmi.read_dmi_data('system-product-name')
+ product_name = dmi.read_dmi_data("system-product-name")
if product_name in VALID_DMI_PRODUCT_NAMES:
return True
- elif dmi.read_dmi_data('chassis-asset-tag') in VALID_DMI_ASSET_TAGS:
+ elif dmi.read_dmi_data("chassis-asset-tag") in VALID_DMI_ASSET_TAGS:
return True
elif accept_oracle and oracle._is_platform_viable():
return True
- elif util.get_proc_env(1).get('product_name') == DMI_PRODUCT_NOVA:
+ elif util.get_proc_env(1).get("product_name") == DMI_PRODUCT_NOVA:
return True
return False
@@ -249,4 +284,5 @@ datasources = [
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py
index bf81b10b..6d81be1e 100644
--- a/cloudinit/sources/DataSourceOracle.py
+++ b/cloudinit/sources/DataSourceOracle.py
@@ -32,7 +32,7 @@ LOG = logging.getLogger(__name__)
BUILTIN_DS_CONFIG = {
# Don't use IMDS to configure secondary NICs by default
- 'configure_secondary_nics': False,
+ "configure_secondary_nics": False,
}
CHASSIS_ASSET_TAG = "OracleCloud.com"
METADATA_ROOT = "http://169.254.169.254/opc/v{version}/"
@@ -40,6 +40,7 @@ METADATA_PATTERN = METADATA_ROOT + "{path}/"
# https://docs.cloud.oracle.com/iaas/Content/Network/Troubleshoot/connectionhang.htm#Overview,
# indicates that an MTU of 9000 is used within OCI
MTU = 9000
+V2_HEADERS = {"Authorization": "Bearer Oracle"}
OpcMetadata = namedtuple("OpcMetadata", "version instance_data vnics_data")
@@ -60,50 +61,52 @@ def _ensure_netfailover_safe(network_config):
"""
# ignore anything that's not an actual network-config
- if 'version' not in network_config:
+ if "version" not in network_config:
return
- if network_config['version'] not in [1, 2]:
- LOG.debug('Ignoring unknown network config version: %s',
- network_config['version'])
+ if network_config["version"] not in [1, 2]:
+ LOG.debug(
+ "Ignoring unknown network config version: %s",
+ network_config["version"],
+ )
return
mac_to_name = get_interfaces_by_mac()
- if network_config['version'] == 1:
- for cfg in [c for c in network_config['config'] if 'type' in c]:
- if cfg['type'] == 'physical':
- if 'mac_address' in cfg:
- mac = cfg['mac_address']
+ if network_config["version"] == 1:
+ for cfg in [c for c in network_config["config"] if "type" in c]:
+ if cfg["type"] == "physical":
+ if "mac_address" in cfg:
+ mac = cfg["mac_address"]
cur_name = mac_to_name.get(mac)
if not cur_name:
continue
elif is_netfail_master(cur_name):
- del cfg['mac_address']
+ del cfg["mac_address"]
- elif network_config['version'] == 2:
- for _, cfg in network_config.get('ethernets', {}).items():
- if 'match' in cfg:
- macaddr = cfg.get('match', {}).get('macaddress')
+ elif network_config["version"] == 2:
+ for _, cfg in network_config.get("ethernets", {}).items():
+ if "match" in cfg:
+ macaddr = cfg.get("match", {}).get("macaddress")
if macaddr:
cur_name = mac_to_name.get(macaddr)
if not cur_name:
continue
elif is_netfail_master(cur_name):
- del cfg['match']['macaddress']
- del cfg['set-name']
- cfg['match']['name'] = cur_name
+ del cfg["match"]["macaddress"]
+ del cfg["set-name"]
+ cfg["match"]["name"] = cur_name
class DataSourceOracle(sources.DataSource):
- dsname = 'Oracle'
+ dsname = "Oracle"
system_uuid = None
vendordata_pure = None
network_config_sources = (
sources.NetworkConfigSource.cmdline,
+ sources.NetworkConfigSource.system_cfg,
sources.NetworkConfigSource.ds,
sources.NetworkConfigSource.initramfs,
- sources.NetworkConfigSource.system_cfg,
)
_network_config = sources.UNSET
@@ -112,9 +115,12 @@ class DataSourceOracle(sources.DataSource):
super(DataSourceOracle, self).__init__(sys_cfg, *args, **kwargs)
self._vnics_data = None
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ['datasource', self.dsname], {}),
- BUILTIN_DS_CONFIG])
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", self.dsname], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
def _is_platform_viable(self):
"""Check platform environment to report if this datasource may run."""
@@ -129,12 +135,18 @@ class DataSourceOracle(sources.DataSource):
# network may be configured if iscsi root. If that is the case
# then read_initramfs_config will return non-None.
fetch_vnics_data = self.ds_cfg.get(
- 'configure_secondary_nics',
- BUILTIN_DS_CONFIG["configure_secondary_nics"]
+ "configure_secondary_nics",
+ BUILTIN_DS_CONFIG["configure_secondary_nics"],
)
network_context = noop()
if not _is_iscsi_root():
- network_context = dhcp.EphemeralDHCPv4(net.find_fallback_nic())
+ network_context = dhcp.EphemeralDHCPv4(
+ iface=net.find_fallback_nic(),
+ connectivity_url_data={
+ "url": METADATA_PATTERN.format(version=2, path="instance"),
+ "headers": V2_HEADERS,
+ },
+ )
with network_context:
fetched_metadata = read_opc_metadata(
fetch_vnics_data=fetch_vnics_data
@@ -172,7 +184,7 @@ class DataSourceOracle(sources.DataSource):
return sources.instance_id_matches_system_uuid(self.system_uuid)
def get_public_ssh_keys(self):
- return sources.normalize_pubkey_data(self.metadata.get('public_keys'))
+ return sources.normalize_pubkey_data(self.metadata.get("public_keys"))
@property
def network_config(self):
@@ -189,8 +201,8 @@ class DataSourceOracle(sources.DataSource):
self._network_config = self.distro.generate_fallback_config()
if self.ds_cfg.get(
- 'configure_secondary_nics',
- BUILTIN_DS_CONFIG["configure_secondary_nics"]
+ "configure_secondary_nics",
+ BUILTIN_DS_CONFIG["configure_secondary_nics"],
):
try:
# Mutate self._network_config to include secondary
@@ -198,8 +210,8 @@ class DataSourceOracle(sources.DataSource):
self._add_network_config_from_opc_imds()
except Exception:
util.logexc(
- LOG,
- "Failed to parse secondary network configuration!")
+ LOG, "Failed to parse secondary network configuration!"
+ )
# we need to verify that the nic selected is not a netfail over
# device and, if it is a netfail master, then we need to avoid
@@ -223,11 +235,10 @@ class DataSourceOracle(sources.DataSource):
(if the IMDS returns valid JSON with unexpected contents).
"""
if self._vnics_data is None:
- LOG.warning(
- "Secondary NIC data is UNSET but should not be")
+ LOG.warning("Secondary NIC data is UNSET but should not be")
return
- if 'nicIndex' in self._vnics_data[0]:
+ if "nicIndex" in self._vnics_data[0]:
# TODO: Once configure_secondary_nics defaults to True, lower the
# level of this log message. (Currently, if we're running this
# code at all, someone has explicitly opted-in to secondary
@@ -236,8 +247,8 @@ class DataSourceOracle(sources.DataSource):
# Metal Machine launch, which means INFO or DEBUG would be more
# appropriate.)
LOG.warning(
- 'VNIC metadata indicates this is a bare metal machine; '
- 'skipping secondary VNIC configuration.'
+ "VNIC metadata indicates this is a bare metal machine; "
+ "skipping secondary VNIC configuration."
)
return
@@ -247,39 +258,45 @@ class DataSourceOracle(sources.DataSource):
# We skip the first entry in the response because the primary
# interface is already configured by iSCSI boot; applying
# configuration from the IMDS is not required.
- mac_address = vnic_dict['macAddr'].lower()
+ mac_address = vnic_dict["macAddr"].lower()
if mac_address not in interfaces_by_mac:
- LOG.debug('Interface with MAC %s not found; skipping',
- mac_address)
+ LOG.debug(
+ "Interface with MAC %s not found; skipping", mac_address
+ )
continue
name = interfaces_by_mac[mac_address]
- if self._network_config['version'] == 1:
+ if self._network_config["version"] == 1:
subnet = {
- 'type': 'static',
- 'address': vnic_dict['privateIp'],
+ "type": "static",
+ "address": vnic_dict["privateIp"],
+ }
+ self._network_config["config"].append(
+ {
+ "name": name,
+ "type": "physical",
+ "mac_address": mac_address,
+ "mtu": MTU,
+ "subnets": [subnet],
+ }
+ )
+ elif self._network_config["version"] == 2:
+ self._network_config["ethernets"][name] = {
+ "addresses": [vnic_dict["privateIp"]],
+ "mtu": MTU,
+ "dhcp4": False,
+ "dhcp6": False,
+ "match": {"macaddress": mac_address},
}
- self._network_config['config'].append({
- 'name': name,
- 'type': 'physical',
- 'mac_address': mac_address,
- 'mtu': MTU,
- 'subnets': [subnet],
- })
- elif self._network_config['version'] == 2:
- self._network_config['ethernets'][name] = {
- 'addresses': [vnic_dict['privateIp']],
- 'mtu': MTU, 'dhcp4': False, 'dhcp6': False,
- 'match': {'macaddress': mac_address}}
def _read_system_uuid():
- sys_uuid = dmi.read_dmi_data('system-uuid')
+ sys_uuid = dmi.read_dmi_data("system-uuid")
return None if sys_uuid is None else sys_uuid.lower()
def _is_platform_viable():
- asset_tag = dmi.read_dmi_data('chassis-asset-tag')
+ asset_tag = dmi.read_dmi_data("chassis-asset-tag")
return asset_tag == CHASSIS_ASSET_TAG
@@ -304,11 +321,9 @@ def read_opc_metadata(*, fetch_vnics_data: bool = False):
retries = 2
def _fetch(metadata_version: int, path: str) -> dict:
- headers = {
- "Authorization": "Bearer Oracle"} if metadata_version > 1 else None
return readurl(
url=METADATA_PATTERN.format(version=metadata_version, path=path),
- headers=headers,
+ headers=V2_HEADERS if metadata_version > 1 else None,
retries=retries,
)._response.json()
@@ -324,8 +339,9 @@ def read_opc_metadata(*, fetch_vnics_data: bool = False):
try:
vnics_data = _fetch(metadata_version, path="vnics")
except UrlError:
- util.logexc(LOG,
- "Failed to fetch secondary network configuration!")
+ util.logexc(
+ LOG, "Failed to fetch secondary network configuration!"
+ )
return OpcMetadata(metadata_version, instance_data, vnics_data)
diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py
index 0b8994bf..14ac77e4 100644
--- a/cloudinit/sources/DataSourceRbxCloud.py
+++ b/cloudinit/sources/DataSourceRbxCloud.py
@@ -14,32 +14,34 @@ import os
import os.path
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-from cloudinit.event import EventType
+from cloudinit import sources, subp, util
+from cloudinit.event import EventScope, EventType
LOG = logging.getLogger(__name__)
-ETC_HOSTS = '/etc/hosts'
+ETC_HOSTS = "/etc/hosts"
def get_manage_etc_hosts():
hosts = util.load_file(ETC_HOSTS, quiet=True)
if hosts:
- LOG.debug('/etc/hosts exists - setting manage_etc_hosts to False')
+ LOG.debug("/etc/hosts exists - setting manage_etc_hosts to False")
return False
- LOG.debug('/etc/hosts does not exists - setting manage_etc_hosts to True')
+ LOG.debug("/etc/hosts does not exists - setting manage_etc_hosts to True")
return True
def ip2int(addr):
- parts = addr.split('.')
- return (int(parts[0]) << 24) + (int(parts[1]) << 16) + \
- (int(parts[2]) << 8) + int(parts[3])
+ parts = addr.split(".")
+ return (
+ (int(parts[0]) << 24)
+ + (int(parts[1]) << 16)
+ + (int(parts[2]) << 8)
+ + int(parts[3])
+ )
def int2ip(addr):
- return '.'.join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]])
+ return ".".join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]])
def _sub_arp(cmd):
@@ -48,33 +50,35 @@ def _sub_arp(cmd):
and runs arping. Breaking this to a separate function
for later use in mocking and unittests
"""
- return subp.subp(['arping'] + cmd)
+ return subp.subp(["arping"] + cmd)
def gratuitous_arp(items, distro):
- source_param = '-S'
- if distro.name in ['fedora', 'centos', 'rhel']:
- source_param = '-s'
+ source_param = "-S"
+ if distro.name in ["fedora", "centos", "rhel"]:
+ source_param = "-s"
for item in items:
try:
- _sub_arp([
- '-c', '2',
- source_param, item['source'],
- item['destination']
- ])
+ _sub_arp(
+ ["-c", "2", source_param, item["source"], item["destination"]]
+ )
except subp.ProcessExecutionError as error:
# warning, because the system is able to function properly
# despite no success - some ARP table may be waiting for
# expiration, but the system may continue
- LOG.warning('Failed to arping from "%s" to "%s": %s',
- item['source'], item['destination'], error)
+ LOG.warning(
+ 'Failed to arping from "%s" to "%s": %s',
+ item["source"],
+ item["destination"],
+ error,
+ )
def get_md():
"""Returns False (not found or error) or a dictionary with metadata."""
devices = set(
- util.find_devs_with('LABEL=CLOUDMD') +
- util.find_devs_with('LABEL=cloudmd')
+ util.find_devs_with("LABEL=CLOUDMD")
+ + util.find_devs_with("LABEL=cloudmd")
)
if not devices:
return False
@@ -83,7 +87,7 @@ def get_md():
rbx_data = util.mount_cb(
device=device,
callback=read_user_data_callback,
- mtype=['vfat', 'fat', 'msdosfs']
+ mtype=["vfat", "fat", "msdosfs"],
)
if rbx_data:
return rbx_data
@@ -91,11 +95,13 @@ def get_md():
if err.errno != errno.ENOENT:
raise
except util.MountFailedError:
- util.logexc(LOG, "Failed to mount %s when looking for user "
- "data", device)
+ util.logexc(
+ LOG, "Failed to mount %s when looking for user data", device
+ )
- LOG.debug("Did not find RbxCloud data, searched devices: %s",
- ",".join(devices))
+ LOG.debug(
+ "Did not find RbxCloud data, searched devices: %s", ",".join(devices)
+ )
return False
@@ -107,25 +113,28 @@ def generate_network_config(netadps):
@returns: A dict containing network config
"""
return {
- 'version': 1,
- 'config': [
+ "version": 1,
+ "config": [
{
- 'type': 'physical',
- 'name': 'eth{}'.format(str(i)),
- 'mac_address': netadp['macaddress'].lower(),
- 'subnets': [
+ "type": "physical",
+ "name": "eth{}".format(str(i)),
+ "mac_address": netadp["macaddress"].lower(),
+ "subnets": [
{
- 'type': 'static',
- 'address': ip['address'],
- 'netmask': netadp['network']['netmask'],
- 'control': 'auto',
- 'gateway': netadp['network']['gateway'],
- 'dns_nameservers': netadp['network']['dns'][
- 'nameservers']
- } for ip in netadp['ip']
+ "type": "static",
+ "address": ip["address"],
+ "netmask": netadp["network"]["netmask"],
+ "control": "auto",
+ "gateway": netadp["network"]["gateway"],
+ "dns_nameservers": netadp["network"]["dns"][
+ "nameservers"
+ ],
+ }
+ for ip in netadp["ip"]
],
- } for i, netadp in enumerate(netadps)
- ]
+ }
+ for i, netadp in enumerate(netadps)
+ ],
}
@@ -140,65 +149,60 @@ def read_user_data_callback(mount_dir):
"""
meta_data = util.load_json(
text=util.load_file(
- fname=os.path.join(mount_dir, 'cloud.json'),
- decode=False
+ fname=os.path.join(mount_dir, "cloud.json"), decode=False
)
)
user_data = util.load_file(
- fname=os.path.join(mount_dir, 'user.data'),
- quiet=True
+ fname=os.path.join(mount_dir, "user.data"), quiet=True
)
- if 'vm' not in meta_data or 'netadp' not in meta_data:
+ if "vm" not in meta_data or "netadp" not in meta_data:
util.logexc(LOG, "Failed to load metadata. Invalid format.")
return None
- username = meta_data.get('additionalMetadata', {}).get('username')
- ssh_keys = meta_data.get('additionalMetadata', {}).get('sshKeys', [])
+ username = meta_data.get("additionalMetadata", {}).get("username")
+ ssh_keys = meta_data.get("additionalMetadata", {}).get("sshKeys", [])
hash = None
- if meta_data.get('additionalMetadata', {}).get('password'):
- hash = meta_data['additionalMetadata']['password']['sha512']
+ if meta_data.get("additionalMetadata", {}).get("password"):
+ hash = meta_data["additionalMetadata"]["password"]["sha512"]
- network = generate_network_config(meta_data['netadp'])
+ network = generate_network_config(meta_data["netadp"])
data = {
- 'userdata': user_data,
- 'metadata': {
- 'instance-id': meta_data['vm']['_id'],
- 'local-hostname': meta_data['vm']['name'],
- 'public-keys': []
+ "userdata": user_data,
+ "metadata": {
+ "instance-id": meta_data["vm"]["_id"],
+ "local-hostname": meta_data["vm"]["name"],
+ "public-keys": [],
},
- 'gratuitous_arp': [
- {
- "source": ip["address"],
- "destination": target
- }
- for netadp in meta_data['netadp']
- for ip in netadp['ip']
+ "gratuitous_arp": [
+ {"source": ip["address"], "destination": target}
+ for netadp in meta_data["netadp"]
+ for ip in netadp["ip"]
for target in [
- netadp['network']["gateway"],
- int2ip(ip2int(netadp['network']["gateway"]) + 2),
- int2ip(ip2int(netadp['network']["gateway"]) + 3)
+ netadp["network"]["gateway"],
+ int2ip(ip2int(netadp["network"]["gateway"]) + 2),
+ int2ip(ip2int(netadp["network"]["gateway"]) + 3),
]
],
- 'cfg': {
- 'ssh_pwauth': True,
- 'disable_root': True,
- 'system_info': {
- 'default_user': {
- 'name': username,
- 'gecos': username,
- 'sudo': ['ALL=(ALL) NOPASSWD:ALL'],
- 'passwd': hash,
- 'lock_passwd': False,
- 'ssh_authorized_keys': ssh_keys,
+ "cfg": {
+ "ssh_pwauth": True,
+ "disable_root": True,
+ "system_info": {
+ "default_user": {
+ "name": username,
+ "gecos": username,
+ "sudo": ["ALL=(ALL) NOPASSWD:ALL"],
+ "passwd": hash,
+ "lock_passwd": False,
+ "ssh_authorized_keys": ssh_keys,
}
},
- 'network_config': network,
- 'manage_etc_hosts': get_manage_etc_hosts(),
+ "network_config": network,
+ "manage_etc_hosts": get_manage_etc_hosts(),
},
}
- LOG.debug('returning DATA object:')
+ LOG.debug("returning DATA object:")
LOG.debug(data)
return data
@@ -206,10 +210,13 @@ def read_user_data_callback(mount_dir):
class DataSourceRbxCloud(sources.DataSource):
dsname = "RbxCloud"
- update_events = {'network': [
- EventType.BOOT_NEW_INSTANCE,
- EventType.BOOT
- ]}
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -227,18 +234,18 @@ class DataSourceRbxCloud(sources.DataSource):
rbx_data = get_md()
if rbx_data is False:
return False
- self.userdata_raw = rbx_data['userdata']
- self.metadata = rbx_data['metadata']
- self.gratuitous_arp = rbx_data['gratuitous_arp']
- self.cfg = rbx_data['cfg']
+ self.userdata_raw = rbx_data["userdata"]
+ self.metadata = rbx_data["metadata"]
+ self.gratuitous_arp = rbx_data["gratuitous_arp"]
+ self.cfg = rbx_data["cfg"]
return True
@property
def network_config(self):
- return self.cfg['network_config']
+ return self.cfg["network_config"]
def get_public_ssh_keys(self):
- return self.metadata['public-keys']
+ return self.metadata["public-keys"]
def get_userdata_raw(self):
return self.userdata_raw
diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py
index 41be7665..8e5dd82c 100644
--- a/cloudinit/sources/DataSourceScaleway.py
+++ b/cloudinit/sources/DataSourceScaleway.py
@@ -27,21 +27,18 @@ from requests.packages.urllib3.poolmanager import PoolManager
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import sources
-from cloudinit import url_helper
-from cloudinit import util
-from cloudinit import net
+from cloudinit import net, sources, url_helper, util
+from cloudinit.event import EventScope, EventType
from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
-from cloudinit.event import EventType
LOG = logging.getLogger(__name__)
-DS_BASE_URL = 'http://169.254.42.42'
+DS_BASE_URL = "http://169.254.42.42"
BUILTIN_DS_CONFIG = {
- 'metadata_url': DS_BASE_URL + '/conf?format=json',
- 'userdata_url': DS_BASE_URL + '/user_data/cloud-init',
- 'vendordata_url': DS_BASE_URL + '/vendor_data/cloud-init'
+ "metadata_url": DS_BASE_URL + "/conf?format=json",
+ "userdata_url": DS_BASE_URL + "/user_data/cloud-init",
+ "vendordata_url": DS_BASE_URL + "/vendor_data/cloud-init",
}
DEF_MD_RETRIES = 5
@@ -57,15 +54,15 @@ def on_scaleway():
* the initrd created the file /var/run/scaleway.
* "scaleway" is in the kernel cmdline.
"""
- vendor_name = dmi.read_dmi_data('system-manufacturer')
- if vendor_name == 'Scaleway':
+ vendor_name = dmi.read_dmi_data("system-manufacturer")
+ if vendor_name == "Scaleway":
return True
- if os.path.exists('/var/run/scaleway'):
+ if os.path.exists("/var/run/scaleway"):
return True
cmdline = util.get_cmdline()
- if 'scaleway' in cmdline:
+ if "scaleway" in cmdline:
return True
return False
@@ -75,6 +72,7 @@ class SourceAddressAdapter(requests.adapters.HTTPAdapter):
"""
Adapter for requests to choose the local address to bind to.
"""
+
def __init__(self, source_address, **kwargs):
self.source_address = source_address
super(SourceAddressAdapter, self).__init__(**kwargs)
@@ -83,11 +81,13 @@ class SourceAddressAdapter(requests.adapters.HTTPAdapter):
socket_options = HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
]
- self.poolmanager = PoolManager(num_pools=connections,
- maxsize=maxsize,
- block=block,
- source_address=self.source_address,
- socket_options=socket_options)
+ self.poolmanager = PoolManager(
+ num_pools=connections,
+ maxsize=maxsize,
+ block=block,
+ source_address=self.source_address,
+ socket_options=socket_options,
+ )
def query_data_api_once(api_address, timeout, requests_session):
@@ -117,9 +117,10 @@ def query_data_api_once(api_address, timeout, requests_session):
session=requests_session,
# If the error is a HTTP/404 or a ConnectionError, go into raise
# block below and don't bother retrying.
- exception_cb=lambda _, exc: exc.code != 404 and (
+ exception_cb=lambda _, exc: exc.code != 404
+ and (
not isinstance(exc.cause, requests.exceptions.ConnectionError)
- )
+ ),
)
return util.decode_binary(resp.contents)
except url_helper.UrlError as exc:
@@ -143,25 +144,22 @@ def query_data_api(api_type, api_address, retries, timeout):
for port in range(1, max(retries, 2)):
try:
LOG.debug(
- 'Trying to get %s data (bind on port %d)...',
- api_type, port
+ "Trying to get %s data (bind on port %d)...", api_type, port
)
requests_session = requests.Session()
requests_session.mount(
- 'http://',
- SourceAddressAdapter(source_address=('0.0.0.0', port))
+ "http://",
+ SourceAddressAdapter(source_address=("0.0.0.0", port)),
)
data = query_data_api_once(
- api_address,
- timeout=timeout,
- requests_session=requests_session
+ api_address, timeout=timeout, requests_session=requests_session
)
- LOG.debug('%s-data downloaded', api_type)
+ LOG.debug("%s-data downloaded", api_type)
return data
except url_helper.UrlError as exc:
# Local port already in use or HTTP/429.
- LOG.warning('Error while trying to get %s data: %s', api_type, exc)
+ LOG.warning("Error while trying to get %s data: %s", api_type, exc)
time.sleep(5)
last_exc = exc
continue
@@ -172,38 +170,44 @@ def query_data_api(api_type, api_address, retries, timeout):
class DataSourceScaleway(sources.DataSource):
dsname = "Scaleway"
- update_events = {'network': [EventType.BOOT_NEW_INSTANCE, EventType.BOOT]}
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths)
- self.ds_cfg = util.mergemanydict([
- util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}),
- BUILTIN_DS_CONFIG
- ])
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
- self.metadata_address = self.ds_cfg['metadata_url']
- self.userdata_address = self.ds_cfg['userdata_url']
- self.vendordata_address = self.ds_cfg['vendordata_url']
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.userdata_address = self.ds_cfg["userdata_url"]
+ self.vendordata_address = self.ds_cfg["vendordata_url"]
- self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES))
- self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT))
+ self.retries = int(self.ds_cfg.get("retries", DEF_MD_RETRIES))
+ self.timeout = int(self.ds_cfg.get("timeout", DEF_MD_TIMEOUT))
self._fallback_interface = None
self._network_config = sources.UNSET
def _crawl_metadata(self):
- resp = url_helper.readurl(self.metadata_address,
- timeout=self.timeout,
- retries=self.retries)
+ resp = url_helper.readurl(
+ self.metadata_address, timeout=self.timeout, retries=self.retries
+ )
self.metadata = json.loads(util.decode_binary(resp.contents))
self.userdata_raw = query_data_api(
- 'user-data', self.userdata_address,
- self.retries, self.timeout
+ "user-data", self.userdata_address, self.retries, self.timeout
)
self.vendordata_raw = query_data_api(
- 'vendor-data', self.vendordata_address,
- self.retries, self.timeout
+ "vendor-data", self.vendordata_address, self.retries, self.timeout
)
def _get_data(self):
@@ -215,8 +219,10 @@ class DataSourceScaleway(sources.DataSource):
try:
with EphemeralDHCPv4(self._fallback_interface):
util.log_time(
- logfunc=LOG.debug, msg='Crawl of metadata service',
- func=self._crawl_metadata)
+ logfunc=LOG.debug,
+ msg="Crawl of metadata service",
+ func=self._crawl_metadata,
+ )
except (NoDHCPLeaseError) as e:
util.logexc(LOG, str(e))
return False
@@ -229,8 +235,10 @@ class DataSourceScaleway(sources.DataSource):
metadata API.
"""
if self._network_config is None:
- LOG.warning('Found None as cached _network_config. '
- 'Resetting to %s', sources.UNSET)
+ LOG.warning(
+ "Found None as cached _network_config. Resetting to %s",
+ sources.UNSET,
+ )
self._network_config = sources.UNSET
if self._network_config != sources.UNSET:
@@ -239,16 +247,19 @@ class DataSourceScaleway(sources.DataSource):
if self._fallback_interface is None:
self._fallback_interface = net.find_fallback_nic()
- netcfg = {'type': 'physical', 'name': '%s' % self._fallback_interface}
- subnets = [{'type': 'dhcp4'}]
- if self.metadata['ipv6']:
- subnets += [{'type': 'static',
- 'address': '%s' % self.metadata['ipv6']['address'],
- 'gateway': '%s' % self.metadata['ipv6']['gateway'],
- 'netmask': '%s' % self.metadata['ipv6']['netmask'],
- }]
- netcfg['subnets'] = subnets
- self._network_config = {'version': 1, 'config': [netcfg]}
+ netcfg = {"type": "physical", "name": "%s" % self._fallback_interface}
+ subnets = [{"type": "dhcp4"}]
+ if self.metadata["ipv6"]:
+ subnets += [
+ {
+ "type": "static",
+ "address": "%s" % self.metadata["ipv6"]["address"],
+ "gateway": "%s" % self.metadata["ipv6"]["gateway"],
+ "netmask": "%s" % self.metadata["ipv6"]["netmask"],
+ }
+ ]
+ netcfg["subnets"] = subnets
+ self._network_config = {"version": 1, "config": [netcfg]}
return self._network_config
@property
@@ -256,14 +267,14 @@ class DataSourceScaleway(sources.DataSource):
return None
def get_instance_id(self):
- return self.metadata['id']
+ return self.metadata["id"]
def get_public_ssh_keys(self):
- ssh_keys = [key['key'] for key in self.metadata['ssh_public_keys']]
+ ssh_keys = [key["key"] for key in self.metadata["ssh_public_keys"]]
akeypre = "AUTHORIZED_KEY="
plen = len(akeypre)
- for tag in self.metadata.get('tags', []):
+ for tag in self.metadata.get("tags", []):
if not tag.startswith(akeypre):
continue
ssh_keys.append(tag[:plen].replace("_", " "))
@@ -271,7 +282,7 @@ class DataSourceScaleway(sources.DataSource):
return ssh_keys
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
- return self.metadata['hostname']
+ return self.metadata["hostname"]
@property
def availability_zone(self):
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index fd292baa..40f915fa 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -32,55 +32,51 @@ import socket
from cloudinit import dmi
from cloudinit import log as logging
-from cloudinit import serial
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import util
-from cloudinit.event import EventType
+from cloudinit import serial, sources, subp, util
+from cloudinit.event import EventScope, EventType
LOG = logging.getLogger(__name__)
SMARTOS_ATTRIB_MAP = {
# Cloud-init Key : (SmartOS Key, Strip line endings)
- 'instance-id': ('sdc:uuid', True),
- 'local-hostname': ('hostname', True),
- 'public-keys': ('root_authorized_keys', True),
- 'user-script': ('user-script', False),
- 'legacy-user-data': ('user-data', False),
- 'user-data': ('cloud-init:user-data', False),
- 'iptables_disable': ('iptables_disable', True),
- 'motd_sys_info': ('motd_sys_info', True),
- 'availability_zone': ('sdc:datacenter_name', True),
- 'vendor-data': ('sdc:vendor-data', False),
- 'operator-script': ('sdc:operator-script', False),
- 'hostname': ('sdc:hostname', True),
- 'dns_domain': ('sdc:dns_domain', True),
+ "instance-id": ("sdc:uuid", True),
+ "local-hostname": ("hostname", True),
+ "public-keys": ("root_authorized_keys", True),
+ "user-script": ("user-script", False),
+ "legacy-user-data": ("user-data", False),
+ "user-data": ("cloud-init:user-data", False),
+ "iptables_disable": ("iptables_disable", True),
+ "motd_sys_info": ("motd_sys_info", True),
+ "availability_zone": ("sdc:datacenter_name", True),
+ "vendor-data": ("sdc:vendor-data", False),
+ "operator-script": ("sdc:operator-script", False),
+ "hostname": ("sdc:hostname", True),
+ "dns_domain": ("sdc:dns_domain", True),
}
SMARTOS_ATTRIB_JSON = {
# Cloud-init Key : (SmartOS Key known JSON)
- 'network-data': 'sdc:nics',
- 'dns_servers': 'sdc:resolvers',
- 'routes': 'sdc:routes',
+ "network-data": "sdc:nics",
+ "dns_servers": "sdc:resolvers",
+ "routes": "sdc:routes",
}
SMARTOS_ENV_LX_BRAND = "lx-brand"
SMARTOS_ENV_KVM = "kvm"
-DS_NAME = 'SmartOS'
-DS_CFG_PATH = ['datasource', DS_NAME]
+DS_NAME = "SmartOS"
+DS_CFG_PATH = ["datasource", DS_NAME]
NO_BASE64_DECODE = [
- 'iptables_disable',
- 'motd_sys_info',
- 'root_authorized_keys',
- 'sdc:datacenter_name',
- 'sdc:uuid'
- 'user-data',
- 'user-script',
+ "iptables_disable",
+ "motd_sys_info",
+ "root_authorized_keys",
+ "sdc:datacenter_name",
+ "sdc:uuiduser-data",
+ "user-script",
]
-METADATA_SOCKFILE = '/native/.zonecontrol/metadata.sock'
-SERIAL_DEVICE = '/dev/ttyS1'
+METADATA_SOCKFILE = "/native/.zonecontrol/metadata.sock"
+SERIAL_DEVICE = "/dev/ttyS1"
SERIAL_TIMEOUT = 60
# BUILT-IN DATASOURCE CONFIGURATION
@@ -98,24 +94,26 @@ SERIAL_TIMEOUT = 60
# fs_setup: describes how to format the ephemeral drive
#
BUILTIN_DS_CONFIG = {
- 'serial_device': SERIAL_DEVICE,
- 'serial_timeout': SERIAL_TIMEOUT,
- 'metadata_sockfile': METADATA_SOCKFILE,
- 'no_base64_decode': NO_BASE64_DECODE,
- 'base64_keys': [],
- 'base64_all': False,
- 'disk_aliases': {'ephemeral0': '/dev/vdb'},
+ "serial_device": SERIAL_DEVICE,
+ "serial_timeout": SERIAL_TIMEOUT,
+ "metadata_sockfile": METADATA_SOCKFILE,
+ "no_base64_decode": NO_BASE64_DECODE,
+ "base64_keys": [],
+ "base64_all": False,
+ "disk_aliases": {"ephemeral0": "/dev/vdb"},
}
BUILTIN_CLOUD_CONFIG = {
- 'disk_setup': {
- 'ephemeral0': {'table_type': 'mbr',
- 'layout': False,
- 'overwrite': False}
+ "disk_setup": {
+ "ephemeral0": {
+ "table_type": "mbr",
+ "layout": False,
+ "overwrite": False,
+ }
},
- 'fs_setup': [{'label': 'ephemeral0',
- 'filesystem': 'ext4',
- 'device': 'ephemeral0'}],
+ "fs_setup": [
+ {"label": "ephemeral0", "filesystem": "ext4", "device": "ephemeral0"}
+ ],
}
# builtin vendor-data is a boothook that writes a script into
@@ -170,18 +168,27 @@ class DataSourceSmartOS(sources.DataSource):
smartos_type = sources.UNSET
md_client = sources.UNSET
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ }
+ }
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
- self.ds_cfg = util.mergemanydict([
- self.ds_cfg,
- util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
- BUILTIN_DS_CONFIG])
+ self.ds_cfg = util.mergemanydict(
+ [
+ self.ds_cfg,
+ util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
self.metadata = {}
self.network_data = None
self._network_config = None
- self.update_events['network'].add(EventType.BOOT)
self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
@@ -200,25 +207,28 @@ class DataSourceSmartOS(sources.DataSource):
if self.md_client == sources.UNSET:
self.md_client = jmc_client_factory(
smartos_type=self.smartos_type,
- metadata_sockfile=self.ds_cfg['metadata_sockfile'],
- serial_device=self.ds_cfg['serial_device'],
- serial_timeout=self.ds_cfg['serial_timeout'])
+ metadata_sockfile=self.ds_cfg["metadata_sockfile"],
+ serial_device=self.ds_cfg["serial_device"],
+ serial_timeout=self.ds_cfg["serial_timeout"],
+ )
def _set_provisioned(self):
- '''Mark the instance provisioning state as successful.
+ """Mark the instance provisioning state as successful.
When run in a zone, the host OS will look for /var/svc/provisioning
to be renamed as /var/svc/provision_success. This should be done
after meta-data is successfully retrieved and from this point
the host considers the provision of the zone to be a success and
keeps the zone running.
- '''
+ """
- LOG.debug('Instance provisioning state set as successful')
- svc_path = '/var/svc'
- if os.path.exists('/'.join([svc_path, 'provisioning'])):
- os.rename('/'.join([svc_path, 'provisioning']),
- '/'.join([svc_path, 'provision_success']))
+ LOG.debug("Instance provisioning state set as successful")
+ svc_path = "/var/svc"
+ if os.path.exists("/".join([svc_path, "provisioning"])):
+ os.rename(
+ "/".join([svc_path, "provisioning"]),
+ "/".join([svc_path, "provision_success"]),
+ )
def _get_data(self):
self._init()
@@ -231,8 +241,10 @@ class DataSourceSmartOS(sources.DataSource):
return False
if not self.md_client.exists():
- LOG.debug("No metadata device '%r' found for SmartOS datasource",
- self.md_client)
+ LOG.debug(
+ "No metadata device '%r' found for SmartOS datasource",
+ self.md_client,
+ )
return False
# Open once for many requests, rather than once for each request
@@ -255,24 +267,33 @@ class DataSourceSmartOS(sources.DataSource):
# We write 'user-script' and 'operator-script' into the
# instance/data directory. The default vendor-data then handles
# executing them later.
- data_d = os.path.join(self.paths.get_cpath(), 'instances',
- md['instance-id'], 'data')
- user_script = os.path.join(data_d, 'user-script')
+ data_d = os.path.join(
+ self.paths.get_cpath(), "instances", md["instance-id"], "data"
+ )
+ user_script = os.path.join(data_d, "user-script")
u_script_l = "%s/user-script" % LEGACY_USER_D
- write_boot_content(md.get('user-script'), content_f=user_script,
- link=u_script_l, shebang=True, mode=0o700)
-
- operator_script = os.path.join(data_d, 'operator-script')
- write_boot_content(md.get('operator-script'),
- content_f=operator_script, shebang=False,
- mode=0o700)
+ write_boot_content(
+ md.get("user-script"),
+ content_f=user_script,
+ link=u_script_l,
+ shebang=True,
+ mode=0o700,
+ )
+
+ operator_script = os.path.join(data_d, "operator-script")
+ write_boot_content(
+ md.get("operator-script"),
+ content_f=operator_script,
+ shebang=False,
+ mode=0o700,
+ )
# @datadictionary: This key has no defined format, but its value
# is written to the file /var/db/mdata-user-data on each boot prior
# to the phase that runs user-script. This file is not to be executed.
# This allows a configuration file of some kind to be injected into
# the machine to be consumed by the user-script when it runs.
- u_data = md.get('legacy-user-data')
+ u_data = md.get("legacy-user-data")
u_data_f = "%s/mdata-user-data" % LEGACY_USER_D
write_boot_content(u_data, u_data_f)
@@ -280,38 +301,39 @@ class DataSourceSmartOS(sources.DataSource):
# The hostname may or may not be qualified with the local domain name.
# This follows section 3.14 of RFC 2132.
- if not md['local-hostname']:
- if md['hostname']:
- md['local-hostname'] = md['hostname']
+ if not md["local-hostname"]:
+ if md["hostname"]:
+ md["local-hostname"] = md["hostname"]
else:
- md['local-hostname'] = md['instance-id']
+ md["local-hostname"] = md["instance-id"]
ud = None
- if md['user-data']:
- ud = md['user-data']
-
- if not md['vendor-data']:
- md['vendor-data'] = BUILTIN_VENDOR_DATA % {
- 'user_script': user_script,
- 'operator_script': operator_script,
- 'per_boot_d': os.path.join(self.paths.get_cpath("scripts"),
- 'per-boot'),
+ if md["user-data"]:
+ ud = md["user-data"]
+
+ if not md["vendor-data"]:
+ md["vendor-data"] = BUILTIN_VENDOR_DATA % {
+ "user_script": user_script,
+ "operator_script": operator_script,
+ "per_boot_d": os.path.join(
+ self.paths.get_cpath("scripts"), "per-boot"
+ ),
}
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
- self.vendordata_raw = md['vendor-data']
- self.network_data = md['network-data']
- self.routes_data = md['routes']
+ self.vendordata_raw = md["vendor-data"]
+ self.network_data = md["network-data"]
+ self.routes_data = md["routes"]
self._set_provisioned()
return True
def _get_subplatform(self):
- return 'serial (%s)' % SERIAL_DEVICE
+ return "serial (%s)" % SERIAL_DEVICE
def device_name_to_device(self, name):
- return self.ds_cfg['disk_aliases'].get(name)
+ return self.ds_cfg["disk_aliases"].get(name)
def get_config_obj(self):
if self.smartos_type == SMARTOS_ENV_KVM:
@@ -319,7 +341,7 @@ class DataSourceSmartOS(sources.DataSource):
return {}
def get_instance_id(self):
- return self.metadata['instance-id']
+ return self.metadata["instance-id"]
@property
def network_config(self):
@@ -329,12 +351,12 @@ class DataSourceSmartOS(sources.DataSource):
if self._network_config is None:
if self.network_data is not None:
- self._network_config = (
- convert_smartos_network_data(
- network_data=self.network_data,
- dns_servers=self.metadata['dns_servers'],
- dns_domain=self.metadata['dns_domain'],
- routes=self.routes_data))
+ self._network_config = convert_smartos_network_data(
+ network_data=self.network_data,
+ dns_servers=self.metadata["dns_servers"],
+ dns_domain=self.metadata["dns_domain"],
+ routes=self.routes_data,
+ )
return self._network_config
@@ -353,10 +375,12 @@ class JoyentMetadataClient(object):
The full specification can be found at
http://eng.joyent.com/mdata/protocol.html
"""
+
line_regex = re.compile(
- r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)'
- r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
- r'( (?P<payload>.+))?)')
+ r"V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)"
+ r" (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)"
+ r"( (?P<payload>.+))?)"
+ )
def __init__(self, smartos_type=None, fp=None):
if smartos_type is None:
@@ -365,43 +389,50 @@ class JoyentMetadataClient(object):
self.fp = fp
def _checksum(self, body):
- return '{0:08x}'.format(
- binascii.crc32(body.encode('utf-8')) & 0xffffffff)
+ return "{0:08x}".format(
+ binascii.crc32(body.encode("utf-8")) & 0xFFFFFFFF
+ )
def _get_value_from_frame(self, expected_request_id, frame):
frame_data = self.line_regex.match(frame).groupdict()
- if int(frame_data['length']) != len(frame_data['body']):
+ if int(frame_data["length"]) != len(frame_data["body"]):
raise JoyentMetadataFetchException(
- 'Incorrect frame length given ({0} != {1}).'.format(
- frame_data['length'], len(frame_data['body'])))
- expected_checksum = self._checksum(frame_data['body'])
- if frame_data['checksum'] != expected_checksum:
+ "Incorrect frame length given ({0} != {1}).".format(
+ frame_data["length"], len(frame_data["body"])
+ )
+ )
+ expected_checksum = self._checksum(frame_data["body"])
+ if frame_data["checksum"] != expected_checksum:
raise JoyentMetadataFetchException(
- 'Invalid checksum (expected: {0}; got {1}).'.format(
- expected_checksum, frame_data['checksum']))
- if frame_data['request_id'] != expected_request_id:
+ "Invalid checksum (expected: {0}; got {1}).".format(
+ expected_checksum, frame_data["checksum"]
+ )
+ )
+ if frame_data["request_id"] != expected_request_id:
raise JoyentMetadataFetchException(
- 'Request ID mismatch (expected: {0}; got {1}).'.format(
- expected_request_id, frame_data['request_id']))
- if not frame_data.get('payload', None):
- LOG.debug('No value found.')
+ "Request ID mismatch (expected: {0}; got {1}).".format(
+ expected_request_id, frame_data["request_id"]
+ )
+ )
+ if not frame_data.get("payload", None):
+ LOG.debug("No value found.")
return None
- value = util.b64d(frame_data['payload'])
+ value = util.b64d(frame_data["payload"])
LOG.debug('Value "%s" found.', value)
return value
def _readline(self):
"""
- Reads a line a byte at a time until \n is encountered. Returns an
- ascii string with the trailing newline removed.
+ Reads a line a byte at a time until \n is encountered. Returns an
+ ascii string with the trailing newline removed.
- If a timeout (per-byte) is set and it expires, a
- JoyentMetadataFetchException will be thrown.
+ If a timeout (per-byte) is set and it expires, a
+ JoyentMetadataFetchException will be thrown.
"""
response = []
def as_ascii():
- return b''.join(response).decode('ascii')
+ return b"".join(response).decode("ascii")
msg = "Partial response: '%s'"
while True:
@@ -409,7 +440,7 @@ class JoyentMetadataClient(object):
byte = self.fp.read(1)
if len(byte) == 0:
raise JoyentMetadataTimeoutException(msg % as_ascii())
- if byte == b'\n':
+ if byte == b"\n":
return as_ascii()
response.append(byte)
except OSError as exc:
@@ -420,26 +451,33 @@ class JoyentMetadataClient(object):
raise
def _write(self, msg):
- self.fp.write(msg.encode('ascii'))
+ self.fp.write(msg.encode("ascii"))
self.fp.flush()
def _negotiate(self):
- LOG.debug('Negotiating protocol V2')
- self._write('NEGOTIATE V2\n')
+ LOG.debug("Negotiating protocol V2")
+ self._write("NEGOTIATE V2\n")
response = self._readline()
LOG.debug('read "%s"', response)
- if response != 'V2_OK':
+ if response != "V2_OK":
raise JoyentMetadataFetchException(
- 'Invalid response "%s" to "NEGOTIATE V2"' % response)
- LOG.debug('Negotiation complete')
+ 'Invalid response "%s" to "NEGOTIATE V2"' % response
+ )
+ LOG.debug("Negotiation complete")
def request(self, rtype, param=None):
- request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
- message_body = ' '.join((request_id, rtype,))
+ request_id = "{0:08x}".format(random.randint(0, 0xFFFFFFFF))
+ message_body = " ".join(
+ (
+ request_id,
+ rtype,
+ )
+ )
if param:
- message_body += ' ' + base64.b64encode(param.encode()).decode()
- msg = 'V2 {0} {1} {2}\n'.format(
- len(message_body), self._checksum(message_body), message_body)
+ message_body += " " + base64.b64encode(param.encode()).decode()
+ msg = "V2 {0} {1} {2}\n".format(
+ len(message_body), self._checksum(message_body), message_body
+ )
LOG.debug('Writing "%s" to metadata transport.', msg)
need_close = False
@@ -454,14 +492,14 @@ class JoyentMetadataClient(object):
LOG.debug('Read "%s" from metadata transport.', response)
- if 'SUCCESS' not in response:
+ if "SUCCESS" not in response:
return None
value = self._get_value_from_frame(request_id, response)
return value
def get(self, key, default=None, strip=False):
- result = self.request(rtype='GET', param=key)
+ result = self.request(rtype="GET", param=key)
if result is None:
return default
if result and strip:
@@ -475,18 +513,19 @@ class JoyentMetadataClient(object):
return json.loads(result)
def list(self):
- result = self.request(rtype='KEYS')
+ result = self.request(rtype="KEYS")
if not result:
return []
- return result.split('\n')
+ return result.split("\n")
def put(self, key, val):
- param = b' '.join([base64.b64encode(i.encode())
- for i in (key, val)]).decode()
- return self.request(rtype='PUT', param=param)
+ param = b" ".join(
+ [base64.b64encode(i.encode()) for i in (key, val)]
+ ).decode()
+ return self.request(rtype="PUT", param=param)
def delete(self, key):
- return self.request(rtype='DELETE', param=key)
+ return self.request(rtype="DELETE", param=key)
def close_transport(self):
if self.fp:
@@ -515,7 +554,7 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
def open_transport(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.socketpath)
- self.fp = sock.makefile('rwb')
+ self.fp = sock.makefile("rwb")
self._negotiate()
def exists(self):
@@ -526,8 +565,9 @@ class JoyentMetadataSocketClient(JoyentMetadataClient):
class JoyentMetadataSerialClient(JoyentMetadataClient):
- def __init__(self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM,
- fp=None):
+ def __init__(
+ self, device, timeout=10, smartos_type=SMARTOS_ENV_KVM, fp=None
+ ):
super(JoyentMetadataSerialClient, self).__init__(smartos_type, fp)
self.device = device
self.timeout = timeout
@@ -546,7 +586,7 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
self._negotiate()
def _flush(self):
- LOG.debug('Flushing input')
+ LOG.debug("Flushing input")
# Read any pending data
timeout = self.fp.timeout
self.fp.timeout = 0.1
@@ -555,7 +595,7 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
self._readline()
except JoyentMetadataTimeoutException:
break
- LOG.debug('Input empty')
+ LOG.debug("Input empty")
# Send a newline and expect "invalid command". Keep trying until
# successful. Retry rather frequently so that the "Is the host
@@ -567,24 +607,29 @@ class JoyentMetadataSerialClient(JoyentMetadataClient):
self.fp.timeout = timeout
while True:
LOG.debug('Writing newline, expecting "invalid command"')
- self._write('\n')
+ self._write("\n")
try:
response = self._readline()
- if response == 'invalid command':
+ if response == "invalid command":
break
- if response == 'FAILURE':
+ if response == "FAILURE":
LOG.debug('Got "FAILURE". Retrying.')
continue
LOG.warning('Unexpected response "%s" during flush', response)
except JoyentMetadataTimeoutException:
- LOG.warning('Timeout while initializing metadata client. '
- 'Is the host metadata service running?')
+ LOG.warning(
+ "Timeout while initializing metadata client. "
+ "Is the host metadata service running?"
+ )
LOG.debug('Got "invalid command". Flush complete.')
self.fp.timeout = timeout
def __repr__(self):
return "%s(device=%s, timeout=%s)" % (
- self.__class__.__name__, self.device, self.timeout)
+ self.__class__.__name__,
+ self.device,
+ self.timeout,
+ )
class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
@@ -616,7 +661,7 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
keys = None
if self.base64_all is None:
keys = self.list()
- if 'base64_all' in keys:
+ if "base64_all" in keys:
self.base64_all = util.is_true(self._get("base64_all"))
else:
self.base64_all = False
@@ -629,7 +674,7 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
if keys is None:
keys = self.list()
b64_keys = set()
- if 'base64_keys' in keys:
+ if "base64_keys" in keys:
b64_keys = set(self._get("base64_keys").split(","))
# now add any b64-<keyname> that has a true value
@@ -643,8 +688,9 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
self.base64_keys = b64_keys
def _get(self, key, default=None, strip=False):
- return (super(JoyentMetadataLegacySerialClient, self).
- get(key, default=default, strip=strip))
+ return super(JoyentMetadataLegacySerialClient, self).get(
+ key, default=default, strip=strip
+ )
def is_b64_encoded(self, key, reset=False):
if key in NO_BASE64_DECODE:
@@ -676,9 +722,12 @@ class JoyentMetadataLegacySerialClient(JoyentMetadataSerialClient):
def jmc_client_factory(
- smartos_type=None, metadata_sockfile=METADATA_SOCKFILE,
- serial_device=SERIAL_DEVICE, serial_timeout=SERIAL_TIMEOUT,
- uname_version=None):
+ smartos_type=None,
+ metadata_sockfile=METADATA_SOCKFILE,
+ serial_device=SERIAL_DEVICE,
+ serial_timeout=SERIAL_TIMEOUT,
+ uname_version=None,
+):
if smartos_type is None:
smartos_type = get_smartos_environ(uname_version)
@@ -687,11 +736,14 @@ def jmc_client_factory(
return None
elif smartos_type == SMARTOS_ENV_KVM:
return JoyentMetadataLegacySerialClient(
- device=serial_device, timeout=serial_timeout,
- smartos_type=smartos_type)
+ device=serial_device,
+ timeout=serial_timeout,
+ smartos_type=smartos_type,
+ )
elif smartos_type == SMARTOS_ENV_LX_BRAND:
- return JoyentMetadataSocketClient(socketpath=metadata_sockfile,
- smartos_type=smartos_type)
+ return JoyentMetadataSocketClient(
+ socketpath=metadata_sockfile, smartos_type=smartos_type
+ )
raise ValueError("Unknown value for smartos_type: %s" % smartos_type)
@@ -704,12 +756,14 @@ def identify_file(content_f):
LOG.debug("script %s mime type is %s", content_f, f_type)
except subp.ProcessExecutionError as e:
util.logexc(
- LOG, ("Failed to identify script type for %s" % content_f, e))
+ LOG, ("Failed to identify script type for %s" % content_f, e)
+ )
return None if f_type is None else f_type.strip()
-def write_boot_content(content, content_f, link=None, shebang=False,
- mode=0o400):
+def write_boot_content(
+ content, content_f, link=None, shebang=False, mode=0o400
+):
"""
Write the content to content_f. Under the following rules:
1. If no content, remove the file
@@ -743,7 +797,8 @@ def write_boot_content(content, content_f, link=None, shebang=False,
f_type = identify_file(content_f)
if f_type == "text/plain":
util.write_file(
- content_f, "\n".join(["#!/bin/bash", content]), mode=mode)
+ content_f, "\n".join(["#!/bin/bash", content]), mode=mode
+ )
LOG.debug("added shebang to file %s", content_f)
if link:
@@ -764,7 +819,7 @@ def get_smartos_environ(uname_version=None, product_name=None):
# report 'BrandZ virtual linux' as the kernel version
if uname_version is None:
uname_version = uname[3]
- if uname_version == 'BrandZ virtual linux':
+ if uname_version == "BrandZ virtual linux":
return SMARTOS_ENV_LX_BRAND
if product_name is None:
@@ -772,16 +827,16 @@ def get_smartos_environ(uname_version=None, product_name=None):
else:
system_type = product_name
- if system_type and system_type.startswith('SmartDC'):
+ if system_type and system_type.startswith("SmartDC"):
return SMARTOS_ENV_KVM
return None
# Convert SMARTOS 'sdc:nics' data to network_config yaml
-def convert_smartos_network_data(network_data=None,
- dns_servers=None, dns_domain=None,
- routes=None):
+def convert_smartos_network_data(
+ network_data=None, dns_servers=None, dns_domain=None, routes=None
+):
"""Return a dictionary of network_config by parsing provided
SMARTOS sdc:nics configuration data
@@ -806,28 +861,28 @@ def convert_smartos_network_data(network_data=None,
"""
valid_keys = {
- 'physical': [
- 'mac_address',
- 'mtu',
- 'name',
- 'params',
- 'subnets',
- 'type',
+ "physical": [
+ "mac_address",
+ "mtu",
+ "name",
+ "params",
+ "subnets",
+ "type",
],
- 'subnet': [
- 'address',
- 'broadcast',
- 'dns_nameservers',
- 'dns_search',
- 'metric',
- 'pointopoint',
- 'routes',
- 'scope',
- 'type',
+ "subnet": [
+ "address",
+ "broadcast",
+ "dns_nameservers",
+ "dns_search",
+ "metric",
+ "pointopoint",
+ "routes",
+ "scope",
+ "type",
],
- 'route': [
- 'network',
- 'gateway',
+ "route": [
+ "network",
+ "gateway",
],
}
@@ -847,56 +902,64 @@ def convert_smartos_network_data(network_data=None,
routes = []
def is_valid_ipv4(addr):
- return '.' in addr
+ return "." in addr
def is_valid_ipv6(addr):
- return ':' in addr
+ return ":" in addr
pgws = {
- 'ipv4': {'match': is_valid_ipv4, 'gw': None},
- 'ipv6': {'match': is_valid_ipv6, 'gw': None},
+ "ipv4": {"match": is_valid_ipv4, "gw": None},
+ "ipv6": {"match": is_valid_ipv6, "gw": None},
}
config = []
for nic in network_data:
- cfg = dict((k, v) for k, v in nic.items()
- if k in valid_keys['physical'])
- cfg.update({
- 'type': 'physical',
- 'name': nic['interface']})
- if 'mac' in nic:
- cfg.update({'mac_address': nic['mac']})
+ cfg = dict(
+ (k, v) for k, v in nic.items() if k in valid_keys["physical"]
+ )
+ cfg.update({"type": "physical", "name": nic["interface"]})
+ if "mac" in nic:
+ cfg.update({"mac_address": nic["mac"]})
subnets = []
- for ip in nic.get('ips', []):
+ for ip in nic.get("ips", []):
if ip == "dhcp":
- subnet = {'type': 'dhcp4'}
+ subnet = {"type": "dhcp4"}
else:
routeents = []
- subnet = dict((k, v) for k, v in nic.items()
- if k in valid_keys['subnet'])
- subnet.update({
- 'type': 'static',
- 'address': ip,
- })
-
- proto = 'ipv4' if is_valid_ipv4(ip) else 'ipv6'
+ subnet = dict(
+ (k, v) for k, v in nic.items() if k in valid_keys["subnet"]
+ )
+ subnet.update(
+ {
+ "type": "static",
+ "address": ip,
+ }
+ )
+
+ proto = "ipv4" if is_valid_ipv4(ip) else "ipv6"
# Only use gateways for 'primary' nics
- if 'primary' in nic and nic.get('primary', False):
+ if "primary" in nic and nic.get("primary", False):
# the ips and gateways list may be N to M, here
# we map the ip index into the gateways list,
# and handle the case that we could have more ips
# than gateways. we only consume the first gateway
- if not pgws[proto]['gw']:
- gateways = [gw for gw in nic.get('gateways', [])
- if pgws[proto]['match'](gw)]
+ if not pgws[proto]["gw"]:
+ gateways = [
+ gw
+ for gw in nic.get("gateways", [])
+ if pgws[proto]["match"](gw)
+ ]
if len(gateways):
- pgws[proto]['gw'] = gateways[0]
- subnet.update({'gateway': pgws[proto]['gw']})
+ pgws[proto]["gw"] = gateways[0]
+ subnet.update({"gateway": pgws[proto]["gw"]})
for route in routes:
- rcfg = dict((k, v) for k, v in route.items()
- if k in valid_keys['route'])
+ rcfg = dict(
+ (k, v)
+ for k, v in route.items()
+ if k in valid_keys["route"]
+ )
# Linux uses the value of 'gateway' to determine
# automatically if the route is a forward/next-hop
# (non-local IP for gateway) or an interface/resolver
@@ -909,25 +972,29 @@ def convert_smartos_network_data(network_data=None,
# to see if it's in the prefix. We can then smartly
# add or not-add this route. But for now,
# when in doubt, use brute force! Routes for everyone!
- rcfg.update({'network': route['dst']})
+ rcfg.update({"network": route["dst"]})
routeents.append(rcfg)
- subnet.update({'routes': routeents})
+ subnet.update({"routes": routeents})
subnets.append(subnet)
- cfg.update({'subnets': subnets})
+ cfg.update({"subnets": subnets})
config.append(cfg)
if dns_servers:
config.append(
- {'type': 'nameserver', 'address': dns_servers,
- 'search': dns_domain})
+ {
+ "type": "nameserver",
+ "address": dns_servers,
+ "search": dns_domain,
+ }
+ )
- return {'version': 1, 'config': config}
+ return {"version": 1, "config": config}
# Used to match classes to dependencies
datasources = [
- (DataSourceSmartOS, (sources.DEP_FILESYSTEM, )),
+ (DataSourceSmartOS, (sources.DEP_FILESYSTEM,)),
]
@@ -938,13 +1005,17 @@ def get_datasource_list(depends):
if __name__ == "__main__":
import sys
+
jmc = jmc_client_factory()
if jmc is None:
print("Do not appear to be on smartos.")
sys.exit(1)
if len(sys.argv) == 1:
- keys = (list(SMARTOS_ATTRIB_JSON.keys()) +
- list(SMARTOS_ATTRIB_MAP.keys()) + ['network_config'])
+ keys = (
+ list(SMARTOS_ATTRIB_JSON.keys())
+ + list(SMARTOS_ATTRIB_MAP.keys())
+ + ["network_config"]
+ )
else:
keys = sys.argv[1:]
@@ -956,14 +1027,19 @@ if __name__ == "__main__":
keyname = SMARTOS_ATTRIB_JSON[key]
data[key] = client.get_json(keyname)
elif key == "network_config":
- for depkey in ('network-data', 'dns_servers', 'dns_domain',
- 'routes'):
+ for depkey in (
+ "network-data",
+ "dns_servers",
+ "dns_domain",
+ "routes",
+ ):
load_key(client, depkey, data)
data[key] = convert_smartos_network_data(
- network_data=data['network-data'],
- dns_servers=data['dns_servers'],
- dns_domain=data['dns_domain'],
- routes=data['routes'])
+ network_data=data["network-data"],
+ dns_servers=data["dns_servers"],
+ dns_domain=data["dns_domain"],
+ routes=data["routes"],
+ )
else:
if key in SMARTOS_ATTRIB_MAP:
keyname, strip = SMARTOS_ATTRIB_MAP[key]
@@ -977,7 +1053,6 @@ if __name__ == "__main__":
for key in keys:
load_key(client=jmc, key=key, data=data)
- print(json.dumps(data, indent=1, sort_keys=True,
- separators=(',', ': ')))
+ print(json.dumps(data, indent=1, sort_keys=True, separators=(",", ": ")))
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceUpCloud.py b/cloudinit/sources/DataSourceUpCloud.py
new file mode 100644
index 00000000..f4b78da5
--- /dev/null
+++ b/cloudinit/sources/DataSourceUpCloud.py
@@ -0,0 +1,162 @@
+# Author: Antti Myyrä <antti.myyra@upcloud.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# UpCloud server metadata API:
+# https://developers.upcloud.com/1.3/8-servers/#metadata-service
+
+from cloudinit import log as logging
+from cloudinit import net as cloudnet
+from cloudinit import sources, util
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+from cloudinit.sources.helpers import upcloud as uc_helper
+
+LOG = logging.getLogger(__name__)
+
+BUILTIN_DS_CONFIG = {"metadata_url": "http://169.254.169.254/metadata/v1.json"}
+
+# Wait for a up to a minute, retrying the meta-data server
+# every 2 seconds.
+MD_RETRIES = 30
+MD_TIMEOUT = 2
+MD_WAIT_RETRY = 2
+
+
+class DataSourceUpCloud(sources.DataSource):
+
+ dsname = "UpCloud"
+
+ # We'll perform DHCP setup only in init-local, see DataSourceUpCloudLocal
+ perform_dhcp_setup = False
+
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.distro = distro
+ self.metadata = dict()
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "UpCloud"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+ self.metadata_address = self.ds_cfg["metadata_url"]
+ self.retries = self.ds_cfg.get("retries", MD_RETRIES)
+ self.timeout = self.ds_cfg.get("timeout", MD_TIMEOUT)
+ self.wait_retry = self.ds_cfg.get("wait_retry", MD_WAIT_RETRY)
+ self._network_config = None
+
+ def _get_sysinfo(self):
+ return uc_helper.read_sysinfo()
+
+ def _read_metadata(self):
+ return uc_helper.read_metadata(
+ self.metadata_address,
+ timeout=self.timeout,
+ sec_between=self.wait_retry,
+ retries=self.retries,
+ )
+
+ def _get_data(self):
+ (is_upcloud, server_uuid) = self._get_sysinfo()
+
+ # only proceed if we know we are on UpCloud
+ if not is_upcloud:
+ return False
+
+ LOG.info("Running on UpCloud. server_uuid=%s", server_uuid)
+
+ if self.perform_dhcp_setup: # Setup networking in init-local stage.
+ try:
+ LOG.debug("Finding a fallback NIC")
+ nic = cloudnet.find_fallback_nic()
+ LOG.debug("Discovering metadata via DHCP interface %s", nic)
+ with EphemeralDHCPv4(nic):
+ md = util.log_time(
+ logfunc=LOG.debug,
+ msg="Reading from metadata service",
+ func=self._read_metadata,
+ )
+ except (NoDHCPLeaseError, sources.InvalidMetaDataException) as e:
+ util.logexc(LOG, str(e))
+ return False
+ else:
+ try:
+ LOG.debug(
+ "Discovering metadata without DHCP-configured networking"
+ )
+ md = util.log_time(
+ logfunc=LOG.debug,
+ msg="Reading from metadata service",
+ func=self._read_metadata,
+ )
+ except sources.InvalidMetaDataException as e:
+ util.logexc(LOG, str(e))
+ LOG.info(
+ "No DHCP-enabled interfaces available, "
+ "unable to fetch metadata for %s",
+ server_uuid,
+ )
+ return False
+
+ self.metadata_full = md
+ self.metadata["instance-id"] = md.get("instance_id", server_uuid)
+ self.metadata["local-hostname"] = md.get("hostname")
+ self.metadata["network"] = md.get("network")
+ self.metadata["public-keys"] = md.get("public_keys")
+ self.metadata["availability_zone"] = md.get("region", "default")
+ self.vendordata_raw = md.get("vendor_data", None)
+ self.userdata_raw = md.get("user_data", None)
+
+ return True
+
+ def check_instance_id(self, sys_cfg):
+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
+
+ @property
+ def network_config(self):
+ """
+ Configure the networking. This needs to be done each boot,
+ since the IP and interface information might have changed
+ due to reconfiguration.
+ """
+
+ if self._network_config:
+ return self._network_config
+
+ raw_network_config = self.metadata.get("network")
+ if not raw_network_config:
+ raise Exception("Unable to get network meta-data from server....")
+
+ self._network_config = uc_helper.convert_network_config(
+ raw_network_config,
+ )
+
+ return self._network_config
+
+
+class DataSourceUpCloudLocal(DataSourceUpCloud):
+ """
+ Run in init-local using a DHCP discovery prior to metadata crawl.
+
+ In init-local, no network is available. This subclass sets up minimal
+ networking with dhclient on a viable nic so that it can talk to the
+ metadata service. If the metadata service provides network configuration
+ then render the network configuration for that instance based on metadata.
+ """
+
+ perform_dhcp_setup = True # Get metadata network config if present
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceUpCloudLocal, (sources.DEP_FILESYSTEM,)),
+ (DataSourceUpCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py
new file mode 100644
index 00000000..6ef7c9d5
--- /dev/null
+++ b/cloudinit/sources/DataSourceVMware.py
@@ -0,0 +1,869 @@
+# Cloud-Init DataSource for VMware
+#
+# Copyright (c) 2018-2021 VMware, Inc. All Rights Reserved.
+#
+# Authors: Anish Swaminathan <anishs@vmware.com>
+# Andrew Kutz <akutz@vmware.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""Cloud-Init DataSource for VMware
+
+This module provides a cloud-init datasource for VMware systems and supports
+multiple transports types, including:
+
+ * EnvVars
+ * GuestInfo
+
+Netifaces (https://github.com/al45tair/netifaces)
+
+ Please note this module relies on the netifaces project to introspect the
+ runtime, network configuration of the host on which this datasource is
+ running. This is in contrast to the rest of cloud-init which uses the
+ cloudinit/netinfo module.
+
+ The reasons for using netifaces include:
+
+ * Netifaces is built in C and is more portable across multiple systems
+ and more deterministic than shell exec'ing local network commands and
+ parsing their output.
+
+ * Netifaces provides a stable way to determine the view of the host's
+ network after DHCP has brought the network online. Unlike most other
+ datasources, this datasource still provides support for JINJA queries
+ based on networking information even when the network is based on a
+ DHCP lease. While this does not tie this datasource directly to
+ netifaces, it does mean the ability to consistently obtain the
+ correct information is paramount.
+
+ * It is currently possible to execute this datasource on macOS
+ (which many developers use today) to print the output of the
+ get_host_info function. This function calls netifaces to obtain
+ the same runtime network configuration that the datasource would
+ persist to the local system's instance data.
+
+ However, the netinfo module fails on macOS. The result is either a
+ hung operation that requires a SIGINT to return control to the user,
+ or, if brew is used to install iproute2mac, the ip commands are used
+ but produce output the netinfo module is unable to parse.
+
+ While macOS is not a target of cloud-init, this feature is quite
+ useful when working on this datasource.
+
+ For more information about this behavior, please see the following
+ PR comment, https://bit.ly/3fG7OVh.
+
+ The authors of this datasource are not opposed to moving away from
+ netifaces. The goal may be to eventually do just that. This proviso was
+ added to the top of this module as a way to remind future-us and others
+ why netifaces was used in the first place in order to either smooth the
+ transition away from netifaces or embrace it further up the cloud-init
+ stack.
+"""
+
+import collections
+import copy
+import ipaddress
+import json
+import os
+import socket
+import time
+
+import netifaces
+
+from cloudinit import dmi
+from cloudinit import log as logging
+from cloudinit import sources, util
+from cloudinit.subp import ProcessExecutionError, subp, which
+
+PRODUCT_UUID_FILE_PATH = "/sys/class/dmi/id/product_uuid"
+
+LOG = logging.getLogger(__name__)
+NOVAL = "No value found"
+
+DATA_ACCESS_METHOD_ENVVAR = "envvar"
+DATA_ACCESS_METHOD_GUESTINFO = "guestinfo"
+
+VMWARE_RPCTOOL = which("vmware-rpctool")
+REDACT = "redact"
+CLEANUP_GUESTINFO = "cleanup-guestinfo"
+VMX_GUESTINFO = "VMX_GUESTINFO"
+GUESTINFO_EMPTY_YAML_VAL = "---"
+
+LOCAL_IPV4 = "local-ipv4"
+LOCAL_IPV6 = "local-ipv6"
+WAIT_ON_NETWORK = "wait-on-network"
+WAIT_ON_NETWORK_IPV4 = "ipv4"
+WAIT_ON_NETWORK_IPV6 = "ipv6"
+
+
+class DataSourceVMware(sources.DataSource):
+ """
+ Setting the hostname:
+ The hostname is set by way of the metadata key "local-hostname".
+
+ Setting the instance ID:
+ The instance ID may be set by way of the metadata key "instance-id".
+ However, if this value is absent then the instance ID is read
+ from the file /sys/class/dmi/id/product_uuid.
+
+ Configuring the network:
+ The network is configured by setting the metadata key "network"
+ with a value consistent with Network Config Versions 1 or 2,
+ depending on the Linux distro's version of cloud-init:
+
+ Network Config Version 1 - http://bit.ly/cloudinit-net-conf-v1
+ Network Config Version 2 - http://bit.ly/cloudinit-net-conf-v2
+
+ For example, CentOS 7's official cloud-init package is version
+ 0.7.9 and does not support Network Config Version 2. However,
+ this datasource still supports supplying Network Config Version 2
+ data as long as the Linux distro's cloud-init package is new
+ enough to parse the data.
+
+ The metadata key "network.encoding" may be used to indicate the
+ format of the metadata key "network". Valid encodings are base64
+ and gzip+base64.
+ """
+
+ dsname = "VMware"
+
+ def __init__(self, sys_cfg, distro, paths, ud_proc=None):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc)
+
+ self.data_access_method = None
+ self.vmware_rpctool = VMWARE_RPCTOOL
+
+ def _get_data(self):
+ """
+ _get_data loads the metadata, userdata, and vendordata from one of
+ the following locations in the given order:
+
+ * envvars
+ * guestinfo
+
+ Please note when updating this function with support for new data
+ transports, the order should match the order in the dscheck_VMware
+ function from the file ds-identify.
+ """
+
+ # Initialize the locally scoped metadata, userdata, and vendordata
+ # variables. They are assigned below depending on the detected data
+ # access method.
+ md, ud, vd = None, None, None
+
+ # First check to see if there is data via env vars.
+ if os.environ.get(VMX_GUESTINFO, ""):
+ md = guestinfo_envvar("metadata")
+ ud = guestinfo_envvar("userdata")
+ vd = guestinfo_envvar("vendordata")
+
+ if md or ud or vd:
+ self.data_access_method = DATA_ACCESS_METHOD_ENVVAR
+
+ # At this point, all additional data transports are valid only on
+ # a VMware platform.
+ if not self.data_access_method:
+ system_type = dmi.read_dmi_data("system-product-name")
+ if system_type is None:
+ LOG.debug("No system-product-name found")
+ return False
+ if "vmware" not in system_type.lower():
+ LOG.debug("Not a VMware platform")
+ return False
+
+ # If no data was detected, check the guestinfo transport next.
+ if not self.data_access_method:
+ if self.vmware_rpctool:
+ md = guestinfo("metadata", self.vmware_rpctool)
+ ud = guestinfo("userdata", self.vmware_rpctool)
+ vd = guestinfo("vendordata", self.vmware_rpctool)
+
+ if md or ud or vd:
+ self.data_access_method = DATA_ACCESS_METHOD_GUESTINFO
+
+ if not self.data_access_method:
+ LOG.error("failed to find a valid data access method")
+ return False
+
+ LOG.info("using data access method %s", self._get_subplatform())
+
+ # Get the metadata.
+ self.metadata = process_metadata(load_json_or_yaml(md))
+
+ # Get the user data.
+ self.userdata_raw = ud
+
+ # Get the vendor data.
+ self.vendordata_raw = vd
+
+ # Redact any sensitive information.
+ self.redact_keys()
+
+ # get_data returns true if there is any available metadata,
+ # userdata, or vendordata.
+ if self.metadata or self.userdata_raw or self.vendordata_raw:
+ return True
+ else:
+ return False
+
+ def setup(self, is_new_instance):
+ """setup(is_new_instance)
+
+ This is called before user-data and vendor-data have been processed.
+
+ Unless the datasource has set mode to 'local', then networking
+ per 'fallback' or per 'network_config' will have been written and
+ brought up the OS at this point.
+ """
+
+ host_info = wait_on_network(self.metadata)
+ LOG.info("got host-info: %s", host_info)
+
+ # Reflect any possible local IPv4 or IPv6 addresses in the guest
+ # info.
+ advertise_local_ip_addrs(host_info)
+
+ # Ensure the metadata gets updated with information about the
+ # host, including the network interfaces, default IP addresses,
+ # etc.
+ self.metadata = util.mergemanydict([self.metadata, host_info])
+
+ # Persist the instance data for versions of cloud-init that support
+ # doing so. This occurs here rather than in the get_data call in
+ # order to ensure that the network interfaces are up and can be
+ # persisted with the metadata.
+ self.persist_instance_data()
+
+ def _get_subplatform(self):
+ get_key_name_fn = None
+ if self.data_access_method == DATA_ACCESS_METHOD_ENVVAR:
+ get_key_name_fn = get_guestinfo_envvar_key_name
+ elif self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO:
+ get_key_name_fn = get_guestinfo_key_name
+ else:
+ return sources.METADATA_UNKNOWN
+
+ return "%s (%s)" % (
+ self.data_access_method,
+ get_key_name_fn("metadata"),
+ )
+
+ @property
+ def network_config(self):
+ if "network" in self.metadata:
+ LOG.debug("using metadata network config")
+ else:
+ LOG.debug("using fallback network config")
+ self.metadata["network"] = {
+ "config": self.distro.generate_fallback_config(),
+ }
+ return self.metadata["network"]["config"]
+
+ def get_instance_id(self):
+ # Pull the instance ID out of the metadata if present. Otherwise
+ # read the file /sys/class/dmi/id/product_uuid for the instance ID.
+ if self.metadata and "instance-id" in self.metadata:
+ return self.metadata["instance-id"]
+ with open(PRODUCT_UUID_FILE_PATH, "r") as id_file:
+ self.metadata["instance-id"] = str(id_file.read()).rstrip().lower()
+ return self.metadata["instance-id"]
+
+ def get_public_ssh_keys(self):
+ for key_name in (
+ "public-keys-data",
+ "public_keys_data",
+ "public-keys",
+ "public_keys",
+ ):
+ if key_name in self.metadata:
+ return sources.normalize_pubkey_data(self.metadata[key_name])
+ return []
+
+ def redact_keys(self):
+ # Determine if there are any keys to redact.
+ keys_to_redact = None
+ if REDACT in self.metadata:
+ keys_to_redact = self.metadata[REDACT]
+ elif CLEANUP_GUESTINFO in self.metadata:
+ # This is for backwards compatibility.
+ keys_to_redact = self.metadata[CLEANUP_GUESTINFO]
+
+ if self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO:
+ guestinfo_redact_keys(keys_to_redact, self.vmware_rpctool)
+
+
+def decode(key, enc_type, data):
+ """
+ decode returns the decoded string value of data
+ key is a string used to identify the data being decoded in log messages
+ """
+ LOG.debug("Getting encoded data for key=%s, enc=%s", key, enc_type)
+
+ raw_data = None
+ if enc_type in ["gzip+base64", "gz+b64"]:
+ LOG.debug("Decoding %s format %s", enc_type, key)
+ raw_data = util.decomp_gzip(util.b64d(data))
+ elif enc_type in ["base64", "b64"]:
+ LOG.debug("Decoding %s format %s", enc_type, key)
+ raw_data = util.b64d(data)
+ else:
+ LOG.debug("Plain-text data %s", key)
+ raw_data = data
+
+ return util.decode_binary(raw_data)
+
+
+def get_none_if_empty_val(val):
+ """
+ get_none_if_empty_val returns None if the provided value, once stripped
+ of its trailing whitespace, is empty or equal to GUESTINFO_EMPTY_YAML_VAL.
+
+ The return value is always a string, regardless of whether the input is
+ a bytes class or a string.
+ """
+
+ # If the provided value is a bytes class, convert it to a string to
+ # simplify the rest of this function's logic.
+ val = util.decode_binary(val)
+ val = val.rstrip()
+ if len(val) == 0 or val == GUESTINFO_EMPTY_YAML_VAL:
+ return None
+ return val
+
+
+def advertise_local_ip_addrs(host_info):
+ """
+ advertise_local_ip_addrs gets the local IP address information from
+ the provided host_info map and sets the addresses in the guestinfo
+ namespace
+ """
+ if not host_info:
+ return
+
+ # Reflect any possible local IPv4 or IPv6 addresses in the guest
+ # info.
+ local_ipv4 = host_info.get(LOCAL_IPV4)
+ if local_ipv4:
+ guestinfo_set_value(LOCAL_IPV4, local_ipv4)
+ LOG.info("advertised local ipv4 address %s in guestinfo", local_ipv4)
+
+ local_ipv6 = host_info.get(LOCAL_IPV6)
+ if local_ipv6:
+ guestinfo_set_value(LOCAL_IPV6, local_ipv6)
+ LOG.info("advertised local ipv6 address %s in guestinfo", local_ipv6)
+
+
+def handle_returned_guestinfo_val(key, val):
+ """
+ handle_returned_guestinfo_val returns the provided value if it is
+ not empty or set to GUESTINFO_EMPTY_YAML_VAL, otherwise None is
+ returned
+ """
+ val = get_none_if_empty_val(val)
+ if val:
+ return val
+ LOG.debug("No value found for key %s", key)
+ return None
+
+
+def get_guestinfo_key_name(key):
+ return "guestinfo." + key
+
+
+def get_guestinfo_envvar_key_name(key):
+ return ("vmx." + get_guestinfo_key_name(key)).upper().replace(".", "_", -1)
+
+
+def guestinfo_envvar(key):
+ val = guestinfo_envvar_get_value(key)
+ if not val:
+ return None
+ enc_type = guestinfo_envvar_get_value(key + ".encoding")
+ return decode(get_guestinfo_envvar_key_name(key), enc_type, val)
+
+
+def guestinfo_envvar_get_value(key):
+ env_key = get_guestinfo_envvar_key_name(key)
+ return handle_returned_guestinfo_val(key, os.environ.get(env_key, ""))
+
+
+def guestinfo(key, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ guestinfo returns the guestinfo value for the provided key, decoding
+ the value when required
+ """
+ val = guestinfo_get_value(key, vmware_rpctool)
+ if not val:
+ return None
+ enc_type = guestinfo_get_value(key + ".encoding", vmware_rpctool)
+ return decode(get_guestinfo_key_name(key), enc_type, val)
+
+
+def guestinfo_get_value(key, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ Returns a guestinfo value for the specified key.
+ """
+ LOG.debug("Getting guestinfo value for key %s", key)
+
+ try:
+ (stdout, stderr) = subp(
+ [
+ vmware_rpctool,
+ "info-get " + get_guestinfo_key_name(key),
+ ]
+ )
+ if stderr == NOVAL:
+ LOG.debug("No value found for key %s", key)
+ elif not stdout:
+ LOG.error("Failed to get guestinfo value for key %s", key)
+ return handle_returned_guestinfo_val(key, stdout)
+ except ProcessExecutionError as error:
+ if error.stderr == NOVAL:
+ LOG.debug("No value found for key %s", key)
+ else:
+ util.logexc(
+ LOG,
+ "Failed to get guestinfo value for key %s: %s",
+ key,
+ error,
+ )
+ except Exception:
+ util.logexc(
+ LOG,
+ "Unexpected error while trying to get "
+ + "guestinfo value for key %s",
+ key,
+ )
+
+ return None
+
+
+def guestinfo_set_value(key, value, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ Sets a guestinfo value for the specified key. Set value to an empty string
+ to clear an existing guestinfo key.
+ """
+
+ # If value is an empty string then set it to a single space as it is not
+ # possible to set a guestinfo key to an empty string. Setting a guestinfo
+ # key to a single space is as close as it gets to clearing an existing
+ # guestinfo key.
+ if value == "":
+ value = " "
+
+ LOG.debug("Setting guestinfo key=%s to value=%s", key, value)
+
+ try:
+ subp(
+ [
+ vmware_rpctool,
+ "info-set %s %s" % (get_guestinfo_key_name(key), value),
+ ]
+ )
+ return True
+ except ProcessExecutionError as error:
+ util.logexc(
+ LOG,
+ "Failed to set guestinfo key=%s to value=%s: %s",
+ key,
+ value,
+ error,
+ )
+ except Exception:
+ util.logexc(
+ LOG,
+ "Unexpected error while trying to set "
+ + "guestinfo key=%s to value=%s",
+ key,
+ value,
+ )
+
+ return None
+
+
+def guestinfo_redact_keys(keys, vmware_rpctool=VMWARE_RPCTOOL):
+ """
+ guestinfo_redact_keys redacts guestinfo of all of the keys in the given
+ list. each key will have its value set to "---". Since the value is valid
+ YAML, cloud-init can still read it if it tries.
+ """
+ if not keys:
+ return
+ if not type(keys) in (list, tuple):
+ keys = [keys]
+ for key in keys:
+ key_name = get_guestinfo_key_name(key)
+ LOG.info("clearing %s", key_name)
+ if not guestinfo_set_value(
+ key, GUESTINFO_EMPTY_YAML_VAL, vmware_rpctool
+ ):
+ LOG.error("failed to clear %s", key_name)
+ LOG.info("clearing %s.encoding", key_name)
+ if not guestinfo_set_value(key + ".encoding", "", vmware_rpctool):
+ LOG.error("failed to clear %s.encoding", key_name)
+
+
+def load_json_or_yaml(data):
+ """
+ load first attempts to unmarshal the provided data as JSON, and if
+ that fails then attempts to unmarshal the data as YAML. If data is
+ None then a new dictionary is returned.
+ """
+ if not data:
+ return {}
+ try:
+ return util.load_json(data)
+ except (json.JSONDecodeError, TypeError):
+ return util.load_yaml(data)
+
+
+def process_metadata(data):
+ """
+ process_metadata processes metadata and loads the optional network
+ configuration.
+ """
+ network = None
+ if "network" in data:
+ network = data["network"]
+ del data["network"]
+
+ network_enc = None
+ if "network.encoding" in data:
+ network_enc = data["network.encoding"]
+ del data["network.encoding"]
+
+ if network:
+ if isinstance(network, collections.abc.Mapping):
+ LOG.debug("network data copied to 'config' key")
+ network = {"config": copy.deepcopy(network)}
+ else:
+ LOG.debug("network data to be decoded %s", network)
+ dec_net = decode("metadata.network", network_enc, network)
+ network = {
+ "config": load_json_or_yaml(dec_net),
+ }
+
+ LOG.debug("network data %s", network)
+ data["network"] = network
+
+ return data
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceVMware, (sources.DEP_FILESYSTEM,)), # Run at init-local
+ (DataSourceVMware, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+def get_datasource_list(depends):
+ """
+ Return a list of data sources that match this set of dependencies
+ """
+ return sources.list_from_depends(depends, datasources)
+
+
+def get_default_ip_addrs():
+ """
+ Returns the default IPv4 and IPv6 addresses based on the device(s) used for
+ the default route. Please note that None may be returned for either address
+ family if that family has no default route or if there are multiple
+ addresses associated with the device used by the default route for a given
+ address.
+ """
+ # TODO(promote and use netifaces in cloudinit.net* modules)
+ gateways = netifaces.gateways()
+ if "default" not in gateways:
+ return None, None
+
+ default_gw = gateways["default"]
+ if (
+ netifaces.AF_INET not in default_gw
+ and netifaces.AF_INET6 not in default_gw
+ ):
+ return None, None
+
+ ipv4 = None
+ ipv6 = None
+
+ gw4 = default_gw.get(netifaces.AF_INET)
+ if gw4:
+ _, dev4 = gw4
+ addr4_fams = netifaces.ifaddresses(dev4)
+ if addr4_fams:
+ af_inet4 = addr4_fams.get(netifaces.AF_INET)
+ if af_inet4:
+ if len(af_inet4) > 1:
+ LOG.warning(
+ "device %s has more than one ipv4 address: %s",
+ dev4,
+ af_inet4,
+ )
+ elif "addr" in af_inet4[0]:
+ ipv4 = af_inet4[0]["addr"]
+
+ # Try to get the default IPv6 address by first seeing if there is a default
+ # IPv6 route.
+ gw6 = default_gw.get(netifaces.AF_INET6)
+ if gw6:
+ _, dev6 = gw6
+ addr6_fams = netifaces.ifaddresses(dev6)
+ if addr6_fams:
+ af_inet6 = addr6_fams.get(netifaces.AF_INET6)
+ if af_inet6:
+ if len(af_inet6) > 1:
+ LOG.warning(
+ "device %s has more than one ipv6 address: %s",
+ dev6,
+ af_inet6,
+ )
+ elif "addr" in af_inet6[0]:
+ ipv6 = af_inet6[0]["addr"]
+
+ # If there is a default IPv4 address but not IPv6, then see if there is a
+ # single IPv6 address associated with the same device associated with the
+ # default IPv4 address.
+ if ipv4 and not ipv6:
+ af_inet6 = addr4_fams.get(netifaces.AF_INET6)
+ if af_inet6:
+ if len(af_inet6) > 1:
+ LOG.warning(
+ "device %s has more than one ipv6 address: %s",
+ dev4,
+ af_inet6,
+ )
+ elif "addr" in af_inet6[0]:
+ ipv6 = af_inet6[0]["addr"]
+
+ # If there is a default IPv6 address but not IPv4, then see if there is a
+ # single IPv4 address associated with the same device associated with the
+ # default IPv6 address.
+ if not ipv4 and ipv6:
+ af_inet4 = addr6_fams.get(netifaces.AF_INET)
+ if af_inet4:
+ if len(af_inet4) > 1:
+ LOG.warning(
+ "device %s has more than one ipv4 address: %s",
+ dev6,
+ af_inet4,
+ )
+ elif "addr" in af_inet4[0]:
+ ipv4 = af_inet4[0]["addr"]
+
+ return ipv4, ipv6
+
+
+# patched socket.getfqdn() - see https://bugs.python.org/issue5004
+
+
+def getfqdn(name=""):
+ """Get fully qualified domain name from name.
+ An empty argument is interpreted as meaning the local host.
+ """
+ # TODO(may want to promote this function to util.getfqdn)
+ # TODO(may want to extend util.get_hostname to accept fqdn=True param)
+ name = name.strip()
+ if not name or name == "0.0.0.0":
+ name = util.get_hostname()
+ try:
+ addrs = socket.getaddrinfo(
+ name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME
+ )
+ except socket.error:
+ pass
+ else:
+ for addr in addrs:
+ if addr[3]:
+ name = addr[3]
+ break
+ return name
+
+
+def is_valid_ip_addr(val):
+ """
+ Returns false if the address is loopback, link local or unspecified;
+ otherwise true is returned.
+ """
+ # TODO(extend cloudinit.net.is_ip_addr exclude link_local/loopback etc)
+ # TODO(migrate to use cloudinit.net.is_ip_addr)#
+
+ addr = None
+ try:
+ addr = ipaddress.ip_address(val)
+ except ipaddress.AddressValueError:
+ addr = ipaddress.ip_address(str(val))
+ except Exception:
+ return None
+
+ if addr.is_link_local or addr.is_loopback or addr.is_unspecified:
+ return False
+ return True
+
+
+def get_host_info():
+ """
+ Returns host information such as the host name and network interfaces.
+ """
+ # TODO(look to promote netifices use up in cloud-init netinfo funcs)
+ host_info = {
+ "network": {
+ "interfaces": {
+ "by-mac": collections.OrderedDict(),
+ "by-ipv4": collections.OrderedDict(),
+ "by-ipv6": collections.OrderedDict(),
+ },
+ },
+ }
+ hostname = getfqdn(util.get_hostname())
+ if hostname:
+ host_info["hostname"] = hostname
+ host_info["local-hostname"] = hostname
+ host_info["local_hostname"] = hostname
+
+ default_ipv4, default_ipv6 = get_default_ip_addrs()
+ if default_ipv4:
+ host_info[LOCAL_IPV4] = default_ipv4
+ if default_ipv6:
+ host_info[LOCAL_IPV6] = default_ipv6
+
+ by_mac = host_info["network"]["interfaces"]["by-mac"]
+ by_ipv4 = host_info["network"]["interfaces"]["by-ipv4"]
+ by_ipv6 = host_info["network"]["interfaces"]["by-ipv6"]
+
+ ifaces = netifaces.interfaces()
+ for dev_name in ifaces:
+ addr_fams = netifaces.ifaddresses(dev_name)
+ af_link = addr_fams.get(netifaces.AF_LINK)
+ af_inet4 = addr_fams.get(netifaces.AF_INET)
+ af_inet6 = addr_fams.get(netifaces.AF_INET6)
+
+ mac = None
+ if af_link and "addr" in af_link[0]:
+ mac = af_link[0]["addr"]
+
+ # Do not bother recording localhost
+ if mac == "00:00:00:00:00:00":
+ continue
+
+ if mac and (af_inet4 or af_inet6):
+ key = mac
+ val = {}
+ if af_inet4:
+ af_inet4_vals = []
+ for ip_info in af_inet4:
+ if not is_valid_ip_addr(ip_info["addr"]):
+ continue
+ af_inet4_vals.append(ip_info)
+ val["ipv4"] = af_inet4_vals
+ if af_inet6:
+ af_inet6_vals = []
+ for ip_info in af_inet6:
+ if not is_valid_ip_addr(ip_info["addr"]):
+ continue
+ af_inet6_vals.append(ip_info)
+ val["ipv6"] = af_inet6_vals
+ by_mac[key] = val
+
+ if af_inet4:
+ for ip_info in af_inet4:
+ key = ip_info["addr"]
+ if not is_valid_ip_addr(key):
+ continue
+ val = copy.deepcopy(ip_info)
+ del val["addr"]
+ if mac:
+ val["mac"] = mac
+ by_ipv4[key] = val
+
+ if af_inet6:
+ for ip_info in af_inet6:
+ key = ip_info["addr"]
+ if not is_valid_ip_addr(key):
+ continue
+ val = copy.deepcopy(ip_info)
+ del val["addr"]
+ if mac:
+ val["mac"] = mac
+ by_ipv6[key] = val
+
+ return host_info
+
+
+def wait_on_network(metadata):
+ # Determine whether we need to wait on the network coming online.
+ wait_on_ipv4 = False
+ wait_on_ipv6 = False
+ if WAIT_ON_NETWORK in metadata:
+ wait_on_network = metadata[WAIT_ON_NETWORK]
+ if WAIT_ON_NETWORK_IPV4 in wait_on_network:
+ wait_on_ipv4_val = wait_on_network[WAIT_ON_NETWORK_IPV4]
+ if isinstance(wait_on_ipv4_val, bool):
+ wait_on_ipv4 = wait_on_ipv4_val
+ else:
+ wait_on_ipv4 = util.translate_bool(wait_on_ipv4_val)
+ if WAIT_ON_NETWORK_IPV6 in wait_on_network:
+ wait_on_ipv6_val = wait_on_network[WAIT_ON_NETWORK_IPV6]
+ if isinstance(wait_on_ipv6_val, bool):
+ wait_on_ipv6 = wait_on_ipv6_val
+ else:
+ wait_on_ipv6 = util.translate_bool(wait_on_ipv6_val)
+
+ # Get information about the host.
+ host_info = None
+ while host_info is None:
+ # This loop + sleep results in two logs every second while waiting
+ # for either ipv4 or ipv6 up. Do we really need to log each iteration
+ # or can we log once and log on successful exit?
+ host_info = get_host_info()
+
+ network = host_info.get("network") or {}
+ interfaces = network.get("interfaces") or {}
+ by_ipv4 = interfaces.get("by-ipv4") or {}
+ by_ipv6 = interfaces.get("by-ipv6") or {}
+
+ if wait_on_ipv4:
+ ipv4_ready = len(by_ipv4) > 0 if by_ipv4 else False
+ if not ipv4_ready:
+ host_info = None
+
+ if wait_on_ipv6:
+ ipv6_ready = len(by_ipv6) > 0 if by_ipv6 else False
+ if not ipv6_ready:
+ host_info = None
+
+ if host_info is None:
+ LOG.debug(
+ "waiting on network: wait4=%s, ready4=%s, wait6=%s, ready6=%s",
+ wait_on_ipv4,
+ ipv4_ready,
+ wait_on_ipv6,
+ ipv6_ready,
+ )
+ time.sleep(1)
+
+ LOG.debug("waiting on network complete")
+ return host_info
+
+
+def main():
+ """
+ Executed when this file is used as a program.
+ """
+ try:
+ logging.setupBasicLogging()
+ except Exception:
+ pass
+ metadata = {
+ "wait-on-network": {"ipv4": True, "ipv6": "false"},
+ "network": {"config": {"dhcp": True}},
+ }
+ host_info = wait_on_network(metadata)
+ metadata = util.mergemanydict([metadata, host_info])
+ print(util.json_dumps(metadata))
+
+
+if __name__ == "__main__":
+ main()
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py
new file mode 100644
index 00000000..8c2e82c2
--- /dev/null
+++ b/cloudinit/sources/DataSourceVultr.py
@@ -0,0 +1,157 @@
+# Author: Eric Benner <ebenner@vultr.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+# Vultr Metadata API:
+# https://www.vultr.com/metadata/
+
+import cloudinit.sources.helpers.vultr as vultr
+from cloudinit import log as log
+from cloudinit import sources, util, version
+
+LOG = log.getLogger(__name__)
+BUILTIN_DS_CONFIG = {
+ "url": "http://169.254.169.254",
+ "retries": 30,
+ "timeout": 10,
+ "wait": 5,
+ "user-agent": "Cloud-Init/%s - OS: %s Variant: %s"
+ % (
+ version.version_string(),
+ util.system_info()["system"],
+ util.system_info()["variant"],
+ ),
+}
+
+
+class DataSourceVultr(sources.DataSource):
+
+ dsname = "Vultr"
+
+ def __init__(self, sys_cfg, distro, paths):
+ super(DataSourceVultr, self).__init__(sys_cfg, distro, paths)
+ self.ds_cfg = util.mergemanydict(
+ [
+ util.get_cfg_by_path(sys_cfg, ["datasource", "Vultr"], {}),
+ BUILTIN_DS_CONFIG,
+ ]
+ )
+
+ # Initiate data and check if Vultr
+ def _get_data(self):
+ LOG.debug("Detecting if machine is a Vultr instance")
+ if not vultr.is_vultr():
+ LOG.debug("Machine is not a Vultr instance")
+ return False
+
+ LOG.debug("Machine is a Vultr instance")
+
+ # Fetch metadata
+ self.metadata = self.get_metadata()
+ self.metadata["instance-id"] = self.metadata["instance-v2-id"]
+ self.metadata["local-hostname"] = self.metadata["hostname"]
+ region = self.metadata["region"]["regioncode"]
+ if "countrycode" in self.metadata["region"]:
+ region = self.metadata["region"]["countrycode"]
+ self.metadata["region"] = region.lower()
+ self.userdata_raw = self.metadata["user-data"]
+
+ # Generate config and process data
+ self.get_datasource_data(self.metadata)
+
+ # Dump some data so diagnosing failures is manageable
+ LOG.debug("Vultr Vendor Config:")
+ LOG.debug(util.json_dumps(self.metadata["vendor-data"]))
+ LOG.debug("SUBID: %s", self.metadata["instance-id"])
+ LOG.debug("Hostname: %s", self.metadata["local-hostname"])
+ if self.userdata_raw is not None:
+ LOG.debug("User-Data:")
+ LOG.debug(self.userdata_raw)
+
+ return True
+
+ # Process metadata
+ def get_datasource_data(self, md):
+ # Generate network config
+ if "cloud_interfaces" in md:
+ # In the future we will just drop pre-configured
+ # network configs into the array. They need names though.
+ self.netcfg = vultr.add_interface_names(md["cloud_interfaces"])
+ else:
+ self.netcfg = vultr.generate_network_config(md["interfaces"])
+
+ # Grab vendordata
+ self.vendordata_raw = md["vendor-data"]
+
+ # Default hostname is "guest" for whitelabel
+ if self.metadata["local-hostname"] == "":
+ self.metadata["local-hostname"] = "guest"
+
+ self.userdata_raw = md["user-data"]
+ if self.userdata_raw == "":
+ self.userdata_raw = None
+
+ # Get the metadata by flag
+ def get_metadata(self):
+ return vultr.get_metadata(
+ self.ds_cfg["url"],
+ self.ds_cfg["timeout"],
+ self.ds_cfg["retries"],
+ self.ds_cfg["wait"],
+ self.ds_cfg["user-agent"],
+ )
+
+ # Compare subid as instance id
+ def check_instance_id(self, sys_cfg):
+ if not vultr.is_vultr():
+ return False
+
+ # Baremetal has no way to implement this in local
+ if vultr.is_baremetal():
+ return False
+
+ subid = vultr.get_sysinfo()["subid"]
+ return sources.instance_id_matches_system_uuid(subid)
+
+ # Currently unsupported
+ @property
+ def launch_index(self):
+ return None
+
+ @property
+ def network_config(self):
+ return self.netcfg
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceVultr, (sources.DEP_FILESYSTEM,)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
+
+
+if __name__ == "__main__":
+ import sys
+
+ if not vultr.is_vultr():
+ print("Machine is not a Vultr instance")
+ sys.exit(1)
+
+ md = vultr.get_metadata(
+ BUILTIN_DS_CONFIG["url"],
+ BUILTIN_DS_CONFIG["timeout"],
+ BUILTIN_DS_CONFIG["retries"],
+ BUILTIN_DS_CONFIG["wait"],
+ BUILTIN_DS_CONFIG["user-agent"],
+ )
+ config = md["vendor-data"]
+ sysinfo = vultr.get_sysinfo()
+
+ print(util.json_dumps(sysinfo))
+ print(util.json_dumps(config))
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 9dccc687..88028cfa 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -13,17 +13,18 @@ import copy
import json
import os
from collections import namedtuple
+from typing import Dict, List # noqa: F401
-from cloudinit import dmi
-from cloudinit import importer
+from cloudinit import dmi, importer
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import type_utils
+from cloudinit import net, type_utils
from cloudinit import user_data as ud
from cloudinit import util
from cloudinit.atomic_helper import write_json
-from cloudinit.event import EventType
+from cloudinit.distros import Distro
+from cloudinit.event import EventScope, EventType
from cloudinit.filters import launch_index
+from cloudinit.persistence import CloudInitPickleMixin
from cloudinit.reporting import events
DSMODE_DISABLED = "disabled"
@@ -35,42 +36,48 @@ VALID_DSMODES = [DSMODE_DISABLED, DSMODE_LOCAL, DSMODE_NETWORK]
DEP_FILESYSTEM = "FILESYSTEM"
DEP_NETWORK = "NETWORK"
-DS_PREFIX = 'DataSource'
+DS_PREFIX = "DataSource"
EXPERIMENTAL_TEXT = (
"EXPERIMENTAL: The structure and format of content scoped under the 'ds'"
- " key may change in subsequent releases of cloud-init.")
+ " key may change in subsequent releases of cloud-init."
+)
# File in which public available instance meta-data is written
# security-sensitive key values are redacted from this world-readable file
-INSTANCE_JSON_FILE = 'instance-data.json'
+INSTANCE_JSON_FILE = "instance-data.json"
# security-sensitive key values are present in this root-readable file
-INSTANCE_JSON_SENSITIVE_FILE = 'instance-data-sensitive.json'
-REDACT_SENSITIVE_VALUE = 'redacted for non-root user'
+INSTANCE_JSON_SENSITIVE_FILE = "instance-data-sensitive.json"
+REDACT_SENSITIVE_VALUE = "redacted for non-root user"
# Key which can be provide a cloud's official product name to cloud-init
-METADATA_CLOUD_NAME_KEY = 'cloud-name'
+METADATA_CLOUD_NAME_KEY = "cloud-name"
UNSET = "_unset"
-METADATA_UNKNOWN = 'unknown'
+METADATA_UNKNOWN = "unknown"
LOG = logging.getLogger(__name__)
# CLOUD_ID_REGION_PREFIX_MAP format is:
# <region-match-prefix>: (<new-cloud-id>: <test_allowed_cloud_callable>)
CLOUD_ID_REGION_PREFIX_MAP = {
- 'cn-': ('aws-china', lambda c: c == 'aws'), # only change aws regions
- 'us-gov-': ('aws-gov', lambda c: c == 'aws'), # only change aws regions
- 'china': ('azure-china', lambda c: c == 'azure'), # only change azure
+ "cn-": ("aws-china", lambda c: c == "aws"), # only change aws regions
+ "us-gov-": ("aws-gov", lambda c: c == "aws"), # only change aws regions
+ "china": ("azure-china", lambda c: c == "azure"), # only change azure
}
# NetworkConfigSource represents the canonical list of network config sources
# that cloud-init knows about. (Python 2.7 lacks PEP 435, so use a singleton
# namedtuple as an enum; see https://stackoverflow.com/a/6971002)
-_NETCFG_SOURCE_NAMES = ('cmdline', 'ds', 'system_cfg', 'fallback', 'initramfs')
-NetworkConfigSource = namedtuple('NetworkConfigSource',
- _NETCFG_SOURCE_NAMES)(*_NETCFG_SOURCE_NAMES)
+_NETCFG_SOURCE_NAMES = ("cmdline", "ds", "system_cfg", "fallback", "initramfs")
+NetworkConfigSource = namedtuple("NetworkConfigSource", _NETCFG_SOURCE_NAMES)(
+ *_NETCFG_SOURCE_NAMES
+)
+
+
+class DatasourceUnpickleUserDataError(Exception):
+ """Raised when userdata is unable to be unpickled due to python upgrades"""
class DataSourceNotFoundException(Exception):
@@ -81,7 +88,7 @@ class InvalidMetaDataException(Exception):
"""Raised when metadata is broken, unavailable or disabled."""
-def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
+def process_instance_metadata(metadata, key_path="", sensitive_keys=()):
"""Process all instance metadata cleaning it up for persisting as json.
Strip ci-b64 prefix and catalog any 'base64_encoded_keys' as a list
@@ -93,22 +100,23 @@ def process_instance_metadata(metadata, key_path='', sensitive_keys=()):
sens_keys = []
for key, val in metadata.items():
if key_path:
- sub_key_path = key_path + '/' + key
+ sub_key_path = key_path + "/" + key
else:
sub_key_path = key
if key in sensitive_keys or sub_key_path in sensitive_keys:
sens_keys.append(sub_key_path)
- if isinstance(val, str) and val.startswith('ci-b64:'):
+ if isinstance(val, str) and val.startswith("ci-b64:"):
base64_encoded_keys.append(sub_key_path)
- md_copy[key] = val.replace('ci-b64:', '')
+ md_copy[key] = val.replace("ci-b64:", "")
if isinstance(val, dict):
return_val = process_instance_metadata(
- val, sub_key_path, sensitive_keys)
- base64_encoded_keys.extend(return_val.pop('base64_encoded_keys'))
- sens_keys.extend(return_val.pop('sensitive_keys'))
+ val, sub_key_path, sensitive_keys
+ )
+ base64_encoded_keys.extend(return_val.pop("base64_encoded_keys"))
+ sens_keys.extend(return_val.pop("sensitive_keys"))
md_copy[key] = return_val
- md_copy['base64_encoded_keys'] = sorted(base64_encoded_keys)
- md_copy['sensitive_keys'] = sorted(sens_keys)
+ md_copy["base64_encoded_keys"] = sorted(base64_encoded_keys)
+ md_copy["sensitive_keys"] = sorted(sens_keys)
return md_copy
@@ -117,11 +125,11 @@ def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
Replace any keys values listed in 'sensitive_keys' with redact_value.
"""
- if not metadata.get('sensitive_keys', []):
+ if not metadata.get("sensitive_keys", []):
return metadata
md_copy = copy.deepcopy(metadata)
- for key_path in metadata.get('sensitive_keys'):
- path_parts = key_path.split('/')
+ for key_path in metadata.get("sensitive_keys"):
+ path_parts = key_path.split("/")
obj = md_copy
for path in path_parts:
if isinstance(obj[path], dict) and path != path_parts[-1]:
@@ -131,17 +139,24 @@ def redact_sensitive_keys(metadata, redact_value=REDACT_SENSITIVE_VALUE):
URLParams = namedtuple(
- 'URLParms', ['max_wait_seconds', 'timeout_seconds', 'num_retries'])
+ "URLParms",
+ [
+ "max_wait_seconds",
+ "timeout_seconds",
+ "num_retries",
+ "sec_between_retries",
+ ],
+)
-class DataSource(metaclass=abc.ABCMeta):
+class DataSource(CloudInitPickleMixin, metaclass=abc.ABCMeta):
dsmode = DSMODE_NETWORK
- default_locale = 'en_US.UTF-8'
+ default_locale = "en_US.UTF-8"
# Datasource name needs to be set by subclasses to determine which
# cloud-config datasource key is loaded
- dsname = '_undef'
+ dsname = "_undef"
# Cached cloud_name as determined by _get_cloud_name
_cloud_name = None
@@ -162,40 +177,71 @@ class DataSource(metaclass=abc.ABCMeta):
# configuration will be used without considering any that follow.) This
# should always be a subset of the members of NetworkConfigSource with no
# duplicate entries.
- network_config_sources = (NetworkConfigSource.cmdline,
- NetworkConfigSource.initramfs,
- NetworkConfigSource.system_cfg,
- NetworkConfigSource.ds)
+ network_config_sources = (
+ NetworkConfigSource.cmdline,
+ NetworkConfigSource.initramfs,
+ NetworkConfigSource.system_cfg,
+ NetworkConfigSource.ds,
+ )
# read_url_params
- url_max_wait = -1 # max_wait < 0 means do not wait
- url_timeout = 10 # timeout for each metadata url read attempt
- url_retries = 5 # number of times to retry url upon 404
+ url_max_wait = -1 # max_wait < 0 means do not wait
+ url_timeout = 10 # timeout for each metadata url read attempt
+ url_retries = 5 # number of times to retry url upon 404
+ url_sec_between_retries = 1 # amount of seconds to wait between retries
# The datasource defines a set of supported EventTypes during which
# the datasource can react to changes in metadata and regenerate
- # network configuration on metadata changes.
- # A datasource which supports writing network config on each system boot
- # would call update_events['network'].add(EventType.BOOT).
+ # network configuration on metadata changes. These are defined in
+ # `supported_network_events`.
+ # The datasource also defines a set of default EventTypes that the
+ # datasource can react to. These are the event types that will be used
+ # if not overridden by the user.
+ # A datasource requiring to write network config on each system boot
+ # would call default_update_events['network'].add(EventType.BOOT).
# Default: generate network config on new instance id (first boot).
- update_events = {'network': set([EventType.BOOT_NEW_INSTANCE])}
+ supported_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ EventType.BOOT,
+ EventType.BOOT_LEGACY,
+ EventType.HOTPLUG,
+ }
+ }
+ default_update_events = {
+ EventScope.NETWORK: {
+ EventType.BOOT_NEW_INSTANCE,
+ }
+ }
# N-tuple listing default values for any metadata-related class
# attributes cached on an instance by a process_data runs. These attribute
# values are reset via clear_cached_attrs during any update_metadata call.
cached_attr_defaults = (
- ('ec2_metadata', UNSET), ('network_json', UNSET),
- ('metadata', {}), ('userdata', None), ('userdata_raw', None),
- ('vendordata', None), ('vendordata_raw', None))
+ ("ec2_metadata", UNSET),
+ ("network_json", UNSET),
+ ("metadata", {}),
+ ("userdata", None),
+ ("userdata_raw", None),
+ ("vendordata", None),
+ ("vendordata_raw", None),
+ ("vendordata2", None),
+ ("vendordata2_raw", None),
+ )
_dirty_cache = False
# N-tuple of keypaths or keynames redact from instance-data.json for
# non-root users
- sensitive_metadata_keys = ('merged_cfg', 'security-credentials',)
+ sensitive_metadata_keys = (
+ "merged_cfg",
+ "security-credentials",
+ )
+
+ _ci_pkl_version = 1
- def __init__(self, sys_cfg, distro, paths, ud_proc=None):
+ def __init__(self, sys_cfg, distro: Distro, paths, ud_proc=None):
self.sys_cfg = sys_cfg
self.distro = distro
self.paths = paths
@@ -203,10 +249,13 @@ class DataSource(metaclass=abc.ABCMeta):
self.metadata = {}
self.userdata_raw = None
self.vendordata = None
+ self.vendordata2 = None
self.vendordata_raw = None
+ self.vendordata2_raw = None
self.ds_cfg = util.get_cfg_by_path(
- self.sys_cfg, ("datasource", self.dsname), {})
+ self.sys_cfg, ("datasource", self.dsname), {}
+ )
if not self.ds_cfg:
self.ds_cfg = {}
@@ -215,6 +264,28 @@ class DataSource(metaclass=abc.ABCMeta):
else:
self.ud_proc = ud_proc
+ def _unpickle(self, ci_pkl_version: int) -> None:
+ """Perform deserialization fixes for Paths."""
+ if not hasattr(self, "vendordata2"):
+ self.vendordata2 = None
+ if not hasattr(self, "vendordata2_raw"):
+ self.vendordata2_raw = None
+ if hasattr(self, "userdata") and self.userdata is not None:
+ # If userdata stores MIME data, on < python3.6 it will be
+ # missing the 'policy' attribute that exists on >=python3.6.
+ # Calling str() on the userdata will attempt to access this
+ # policy attribute. This will raise an exception, causing
+ # the pickle load to fail, so cloud-init will discard the cache
+ try:
+ str(self.userdata)
+ except AttributeError as e:
+ LOG.debug(
+ "Unable to unpickle datasource: %s."
+ " Ignoring current cache.",
+ e,
+ )
+ raise DatasourceUnpickleUserDataError() from e
+
def __str__(self):
return type_utils.obj_name(self)
@@ -228,28 +299,33 @@ class DataSource(metaclass=abc.ABCMeta):
# metadata to discover that content
sysinfo = instance_data["sys_info"]
return {
- 'v1': {
- '_beta_keys': ['subplatform'],
- 'availability-zone': availability_zone,
- 'availability_zone': availability_zone,
- 'cloud-name': self.cloud_name,
- 'cloud_name': self.cloud_name,
- 'distro': sysinfo["dist"][0],
- 'distro_version': sysinfo["dist"][1],
- 'distro_release': sysinfo["dist"][2],
- 'platform': self.platform_type,
- 'public_ssh_keys': self.get_public_ssh_keys(),
- 'python_version': sysinfo["python"],
- 'instance-id': instance_id,
- 'instance_id': instance_id,
- 'kernel_release': sysinfo["uname"][2],
- 'local-hostname': local_hostname,
- 'local_hostname': local_hostname,
- 'machine': sysinfo["uname"][4],
- 'region': self.region,
- 'subplatform': self.subplatform,
- 'system_platform': sysinfo["platform"],
- 'variant': sysinfo["variant"]}}
+ "v1": {
+ "_beta_keys": ["subplatform"],
+ "availability-zone": availability_zone,
+ "availability_zone": availability_zone,
+ "cloud_id": canonical_cloud_id(
+ self.cloud_name, self.region, self.platform_type
+ ),
+ "cloud-name": self.cloud_name,
+ "cloud_name": self.cloud_name,
+ "distro": sysinfo["dist"][0],
+ "distro_version": sysinfo["dist"][1],
+ "distro_release": sysinfo["dist"][2],
+ "platform": self.platform_type,
+ "public_ssh_keys": self.get_public_ssh_keys(),
+ "python_version": sysinfo["python"],
+ "instance-id": instance_id,
+ "instance_id": instance_id,
+ "kernel_release": sysinfo["uname"][2],
+ "local-hostname": local_hostname,
+ "local_hostname": local_hostname,
+ "machine": sysinfo["uname"][4],
+ "region": self.region,
+ "subplatform": self.subplatform,
+ "system_platform": sysinfo["platform"],
+ "variant": sysinfo["variant"],
+ }
+ }
def clear_cached_attrs(self, attr_defaults=()):
"""Reset any cached metadata attributes to datasource defaults.
@@ -290,48 +366,61 @@ class DataSource(metaclass=abc.ABCMeta):
@return True on successful write, False otherwise.
"""
- if hasattr(self, '_crawled_metadata'):
+ if hasattr(self, "_crawled_metadata"):
# Any datasource with _crawled_metadata will best represent
# most recent, 'raw' metadata
crawled_metadata = copy.deepcopy(
- getattr(self, '_crawled_metadata'))
- crawled_metadata.pop('user-data', None)
- crawled_metadata.pop('vendor-data', None)
- instance_data = {'ds': crawled_metadata}
+ getattr(self, "_crawled_metadata")
+ )
+ crawled_metadata.pop("user-data", None)
+ crawled_metadata.pop("vendor-data", None)
+ instance_data = {"ds": crawled_metadata}
else:
- instance_data = {'ds': {'meta_data': self.metadata}}
- if hasattr(self, 'network_json'):
- network_json = getattr(self, 'network_json')
+ instance_data = {"ds": {"meta_data": self.metadata}}
+ if hasattr(self, "network_json"):
+ network_json = getattr(self, "network_json")
if network_json != UNSET:
- instance_data['ds']['network_json'] = network_json
- if hasattr(self, 'ec2_metadata'):
- ec2_metadata = getattr(self, 'ec2_metadata')
+ instance_data["ds"]["network_json"] = network_json
+ if hasattr(self, "ec2_metadata"):
+ ec2_metadata = getattr(self, "ec2_metadata")
if ec2_metadata != UNSET:
- instance_data['ds']['ec2_metadata'] = ec2_metadata
- instance_data['ds']['_doc'] = EXPERIMENTAL_TEXT
+ instance_data["ds"]["ec2_metadata"] = ec2_metadata
+ instance_data["ds"]["_doc"] = EXPERIMENTAL_TEXT
# Add merged cloud.cfg and sys info for jinja templates and cli query
- instance_data['merged_cfg'] = copy.deepcopy(self.sys_cfg)
- instance_data['merged_cfg']['_doc'] = (
- 'Merged cloud-init system config from /etc/cloud/cloud.cfg and'
- ' /etc/cloud/cloud.cfg.d/')
- instance_data['sys_info'] = util.system_info()
- instance_data.update(
- self._get_standardized_metadata(instance_data))
+ instance_data["merged_cfg"] = copy.deepcopy(self.sys_cfg)
+ instance_data["merged_cfg"]["_doc"] = (
+ "Merged cloud-init system config from /etc/cloud/cloud.cfg and"
+ " /etc/cloud/cloud.cfg.d/"
+ )
+ instance_data["sys_info"] = util.system_info()
+ instance_data.update(self._get_standardized_metadata(instance_data))
try:
# Process content base64encoding unserializable values
content = util.json_dumps(instance_data)
# Strip base64: prefix and set base64_encoded_keys list.
processed_data = process_instance_metadata(
json.loads(content),
- sensitive_keys=self.sensitive_metadata_keys)
+ sensitive_keys=self.sensitive_metadata_keys,
+ )
except TypeError as e:
- LOG.warning('Error persisting instance-data.json: %s', str(e))
+ LOG.warning("Error persisting instance-data.json: %s", str(e))
return False
except UnicodeDecodeError as e:
- LOG.warning('Error persisting instance-data.json: %s', str(e))
+ LOG.warning("Error persisting instance-data.json: %s", str(e))
return False
- json_sensitive_file = os.path.join(self.paths.run_dir,
- INSTANCE_JSON_SENSITIVE_FILE)
+ json_sensitive_file = os.path.join(
+ self.paths.run_dir, INSTANCE_JSON_SENSITIVE_FILE
+ )
+ cloud_id = instance_data["v1"].get("cloud_id", "none")
+ cloud_id_file = os.path.join(self.paths.run_dir, "cloud-id")
+ util.write_file(f"{cloud_id_file}-{cloud_id}", f"{cloud_id}\n")
+ if os.path.exists(cloud_id_file):
+ prev_cloud_id_file = os.path.realpath(cloud_id_file)
+ else:
+ prev_cloud_id_file = cloud_id_file
+ util.sym_link(f"{cloud_id_file}-{cloud_id}", cloud_id_file, force=True)
+ if prev_cloud_id_file != cloud_id_file:
+ util.del_file(prev_cloud_id_file)
write_json(json_sensitive_file, processed_data, mode=0o600)
json_file = os.path.join(self.paths.run_dir, INSTANCE_JSON_FILE)
# World readable
@@ -341,8 +430,9 @@ class DataSource(metaclass=abc.ABCMeta):
def _get_data(self):
"""Walk metadata sources, process crawled data and save attributes."""
raise NotImplementedError(
- 'Subclasses of DataSource must implement _get_data which'
- ' sets self.metadata, vendordata_raw and userdata_raw.')
+ "Subclasses of DataSource must implement _get_data which"
+ " sets self.metadata, vendordata_raw and userdata_raw."
+ )
def get_url_params(self):
"""Return the Datasource's prefered url_read parameters.
@@ -357,28 +447,52 @@ class DataSource(metaclass=abc.ABCMeta):
max_wait = int(self.ds_cfg.get("max_wait", self.url_max_wait))
except ValueError:
util.logexc(
- LOG, "Config max_wait '%s' is not an int, using default '%s'",
- self.ds_cfg.get("max_wait"), max_wait)
+ LOG,
+ "Config max_wait '%s' is not an int, using default '%s'",
+ self.ds_cfg.get("max_wait"),
+ max_wait,
+ )
timeout = self.url_timeout
try:
- timeout = max(
- 0, int(self.ds_cfg.get("timeout", self.url_timeout)))
+ timeout = max(0, int(self.ds_cfg.get("timeout", self.url_timeout)))
except ValueError:
timeout = self.url_timeout
util.logexc(
- LOG, "Config timeout '%s' is not an int, using default '%s'",
- self.ds_cfg.get('timeout'), timeout)
+ LOG,
+ "Config timeout '%s' is not an int, using default '%s'",
+ self.ds_cfg.get("timeout"),
+ timeout,
+ )
retries = self.url_retries
try:
retries = int(self.ds_cfg.get("retries", self.url_retries))
except Exception:
util.logexc(
- LOG, "Config retries '%s' is not an int, using default '%s'",
- self.ds_cfg.get('retries'), retries)
+ LOG,
+ "Config retries '%s' is not an int, using default '%s'",
+ self.ds_cfg.get("retries"),
+ retries,
+ )
- return URLParams(max_wait, timeout, retries)
+ sec_between_retries = self.url_sec_between_retries
+ try:
+ sec_between_retries = int(
+ self.ds_cfg.get(
+ "sec_between_retries", self.url_sec_between_retries
+ )
+ )
+ except Exception:
+ util.logexc(
+ LOG,
+ "Config sec_between_retries '%s' is not an int,"
+ " using default '%s'",
+ self.ds_cfg.get("sec_between_retries"),
+ sec_between_retries,
+ )
+
+ return URLParams(max_wait, timeout, retries, sec_between_retries)
def get_userdata(self, apply_filter=False):
if self.userdata is None:
@@ -392,6 +506,11 @@ class DataSource(metaclass=abc.ABCMeta):
self.vendordata = self.ud_proc.process(self.get_vendordata_raw())
return self.vendordata
+ def get_vendordata2(self):
+ if self.vendordata2 is None:
+ self.vendordata2 = self.ud_proc.process(self.get_vendordata2_raw())
+ return self.vendordata2
+
@property
def fallback_interface(self):
"""Determine the network interface used during local network config."""
@@ -399,13 +518,13 @@ class DataSource(metaclass=abc.ABCMeta):
self._fallback_interface = net.find_fallback_nic()
if self._fallback_interface is None:
LOG.warning(
- "Did not find a fallback interface on %s.",
- self.cloud_name)
+ "Did not find a fallback interface on %s.", self.cloud_name
+ )
return self._fallback_interface
@property
def platform_type(self):
- if not hasattr(self, '_platform_type'):
+ if not hasattr(self, "_platform_type"):
# Handle upgrade path where pickled datasource has no _platform.
self._platform_type = self.dsname.lower()
if not self._platform_type:
@@ -424,7 +543,7 @@ class DataSource(metaclass=abc.ABCMeta):
nocloud: seed-dir (/seed/dir/path)
lxd: nocloud (/seed/dir/path)
"""
- if not hasattr(self, '_subplatform'):
+ if not hasattr(self, "_subplatform"):
# Handle upgrade path where pickled datasource has no _platform.
self._subplatform = self._get_subplatform()
if not self._subplatform:
@@ -433,8 +552,8 @@ class DataSource(metaclass=abc.ABCMeta):
def _get_subplatform(self):
"""Subclasses should implement to return a "slug (detail)" string."""
- if hasattr(self, 'metadata_address'):
- return 'metadata (%s)' % getattr(self, 'metadata_address')
+ if hasattr(self, "metadata_address"):
+ return "metadata (%s)" % getattr(self, "metadata_address")
return METADATA_UNKNOWN
@property
@@ -453,8 +572,10 @@ class DataSource(metaclass=abc.ABCMeta):
else:
self._cloud_name = self._get_cloud_name().lower()
LOG.debug(
- 'Ignoring metadata provided key %s: non-string type %s',
- METADATA_CLOUD_NAME_KEY, type(cloud_name))
+ "Ignoring metadata provided key %s: non-string type %s",
+ METADATA_CLOUD_NAME_KEY,
+ type(cloud_name),
+ )
else:
self._cloud_name = self._get_cloud_name().lower()
return self._cloud_name
@@ -471,8 +592,8 @@ class DataSource(metaclass=abc.ABCMeta):
def launch_index(self):
if not self.metadata:
return None
- if 'launch-index' in self.metadata:
- return self.metadata['launch-index']
+ if "launch-index" in self.metadata:
+ return self.metadata["launch-index"]
return None
def _filter_xdata(self, processed_ud):
@@ -494,6 +615,9 @@ class DataSource(metaclass=abc.ABCMeta):
def get_vendordata_raw(self):
return self.vendordata_raw
+ def get_vendordata2_raw(self):
+ return self.vendordata2_raw
+
# the data sources' config_obj is a cloud-config formated
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere
@@ -501,7 +625,7 @@ class DataSource(metaclass=abc.ABCMeta):
return {}
def get_public_ssh_keys(self):
- return normalize_pubkey_data(self.metadata.get('public-keys'))
+ return normalize_pubkey_data(self.metadata.get("public-keys"))
def publish_host_keys(self, hostkeys):
"""Publish the public SSH host keys (found in /etc/ssh/*.pub).
@@ -523,7 +647,7 @@ class DataSource(metaclass=abc.ABCMeta):
if not short_name.startswith(nfrom):
continue
for nto in tlist:
- cand = "/dev/%s%s" % (nto, short_name[len(nfrom):])
+ cand = "/dev/%s%s" % (nto, short_name[len(nfrom) :])
if os.path.exists(cand):
return cand
return None
@@ -548,20 +672,21 @@ class DataSource(metaclass=abc.ABCMeta):
@property
def availability_zone(self):
top_level_az = self.metadata.get(
- 'availability-zone', self.metadata.get('availability_zone'))
+ "availability-zone", self.metadata.get("availability_zone")
+ )
if top_level_az:
return top_level_az
- return self.metadata.get('placement', {}).get('availability-zone')
+ return self.metadata.get("placement", {}).get("availability-zone")
@property
def region(self):
- return self.metadata.get('region')
+ return self.metadata.get("region")
def get_instance_id(self):
- if not self.metadata or 'instance-id' not in self.metadata:
+ if not self.metadata or "instance-id" not in self.metadata:
# Return a magic not really instance id string
return "iid-datasource"
- return str(self.metadata['instance-id'])
+ return str(self.metadata["instance-id"])
def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False):
"""Get hostname or fqdn from the datasource. Look it up if desired.
@@ -579,7 +704,7 @@ class DataSource(metaclass=abc.ABCMeta):
defhost = "localhost"
domain = defdomain
- if not self.metadata or not self.metadata.get('local-hostname'):
+ if not self.metadata or not self.metadata.get("local-hostname"):
if metadata_only:
return None
# this is somewhat questionable really.
@@ -600,14 +725,14 @@ class DataSource(metaclass=abc.ABCMeta):
else:
# if there is an ipv4 address in 'local-hostname', then
# make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
- lhost = self.metadata['local-hostname']
+ lhost = self.metadata["local-hostname"]
if net.is_ipv4_address(lhost):
toks = []
if resolve_ip:
toks = util.gethostbyaddr(lhost)
if toks:
- toks = str(toks).split('.')
+ toks = str(toks).split(".")
else:
toks = ["ip-%s" % lhost.replace(".", "-")]
else:
@@ -615,7 +740,7 @@ class DataSource(metaclass=abc.ABCMeta):
if len(toks) > 1:
hostname = toks[0]
- domain = '.'.join(toks[1:])
+ domain = ".".join(toks[1:])
else:
hostname = toks[0]
@@ -627,10 +752,25 @@ class DataSource(metaclass=abc.ABCMeta):
def get_package_mirror_info(self):
return self.distro.get_package_mirror_info(data_source=self)
- def update_metadata(self, source_event_types):
+ def get_supported_events(self, source_event_types: List[EventType]):
+ supported_events = {} # type: Dict[EventScope, set]
+ for event in source_event_types:
+ for (
+ update_scope,
+ update_events,
+ ) in self.supported_update_events.items():
+ if event in update_events:
+ if not supported_events.get(update_scope):
+ supported_events[update_scope] = set()
+ supported_events[update_scope].add(event)
+ return supported_events
+
+ def update_metadata_if_supported(
+ self, source_event_types: List[EventType]
+ ) -> bool:
"""Refresh cached metadata if the datasource supports this event.
- The datasource has a list of update_events which
+ The datasource has a list of supported_update_events which
trigger refreshing all cached metadata as well as refreshing the
network configuration.
@@ -640,28 +780,27 @@ class DataSource(metaclass=abc.ABCMeta):
@return True if the datasource did successfully update cached metadata
due to source_event_type.
"""
- supported_events = {}
- for event in source_event_types:
- for update_scope, update_events in self.update_events.items():
- if event in update_events:
- if not supported_events.get(update_scope):
- supported_events[update_scope] = set()
- supported_events[update_scope].add(event)
+ supported_events = self.get_supported_events(source_event_types)
for scope, matched_events in supported_events.items():
LOG.debug(
"Update datasource metadata and %s config due to events: %s",
- scope, ', '.join(matched_events))
+ scope.value,
+ ", ".join([event.value for event in matched_events]),
+ )
# Each datasource has a cached config property which needs clearing
# Once cleared that config property will be regenerated from
# current metadata.
- self.clear_cached_attrs((('_%s_config' % scope, UNSET),))
+ self.clear_cached_attrs((("_%s_config" % scope, UNSET),))
if supported_events:
self.clear_cached_attrs()
result = self.get_data()
if result:
return True
- LOG.debug("Datasource %s not updated for events: %s", self,
- ', '.join(source_event_types))
+ LOG.debug(
+ "Datasource %s not updated for events: %s",
+ self,
+ ", ".join([event.value for event in source_event_types]),
+ )
return False
def check_instance_id(self, sys_cfg):
@@ -683,8 +822,9 @@ class DataSource(metaclass=abc.ABCMeta):
if candidate in valid:
return candidate
else:
- LOG.warning("invalid dsmode '%s', using default=%s",
- candidate, default)
+ LOG.warning(
+ "invalid dsmode '%s', using default=%s", candidate, default
+ )
return default
return default
@@ -763,19 +903,23 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
name="search-%s" % name.replace("DataSource", ""),
description="searching for %s data from %s" % (mode, name),
message="no %s data found from %s" % (mode, name),
- parent=reporter)
+ parent=reporter,
+ )
try:
with myrep:
LOG.debug("Seeing if we can get any data from %s", cls)
s = cls(sys_cfg, distro, paths)
- if s.update_metadata([EventType.BOOT_NEW_INSTANCE]):
+ if s.update_metadata_if_supported(
+ [EventType.BOOT_NEW_INSTANCE]
+ ):
myrep.message = "found %s data from %s" % (mode, name)
return (s, type_utils.obj_name(cls))
except Exception:
util.logexc(LOG, "Getting data from %s failed", cls)
- msg = ("Did not find any data source,"
- " searched classes: (%s)") % (", ".join(ds_names))
+ msg = "Did not find any data source, searched classes: (%s)" % ", ".join(
+ ds_names
+ )
raise DataSourceNotFoundException(msg)
@@ -785,15 +929,25 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
# Return an ordered list of classes that match (if any)
def list_sources(cfg_list, depends, pkg_list):
src_list = []
- LOG.debug(("Looking for data source in: %s,"
- " via packages %s that matches dependencies %s"),
- cfg_list, pkg_list, depends)
+ LOG.debug(
+ "Looking for data source in: %s,"
+ " via packages %s that matches dependencies %s",
+ cfg_list,
+ pkg_list,
+ depends,
+ )
for ds_name in cfg_list:
if not ds_name.startswith(DS_PREFIX):
- ds_name = '%s%s' % (DS_PREFIX, ds_name)
- m_locs, _looked_locs = importer.find_module(ds_name,
- pkg_list,
- ['get_datasource_list'])
+ ds_name = "%s%s" % (DS_PREFIX, ds_name)
+ m_locs, _looked_locs = importer.find_module(
+ ds_name, pkg_list, ["get_datasource_list"]
+ )
+ if not m_locs:
+ LOG.error(
+ "Could not import %s. Does the DataSource exist and "
+ "is it importable?",
+ ds_name,
+ )
for m_loc in m_locs:
mod = importer.import_module(m_loc)
lister = getattr(mod, "get_datasource_list")
@@ -804,7 +958,7 @@ def list_sources(cfg_list, depends, pkg_list):
return src_list
-def instance_id_matches_system_uuid(instance_id, field='system-uuid'):
+def instance_id_matches_system_uuid(instance_id, field="system-uuid"):
# quickly (local check only) if self.instance_id is still valid
# we check kernel command line or files.
if not instance_id:
@@ -854,8 +1008,7 @@ def convert_vendordata(data, recurse=True):
return copy.deepcopy(data)
if isinstance(data, dict):
if recurse is True:
- return convert_vendordata(data.get('cloud-init'),
- recurse=False)
+ return convert_vendordata(data.get("cloud-init"), recurse=False)
raise ValueError("vendordata['cloud-init'] cannot be dict")
raise ValueError("Unknown data type for vendordata: %s" % type(data))
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index d3055d08..d07dc3c0 100755
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -6,27 +6,28 @@ import os
import re
import socket
import struct
-import time
import textwrap
+import time
import zlib
-from errno import ENOENT
-
-from cloudinit.settings import CFG_BUILTIN
-from cloudinit.net import dhcp
-from cloudinit import stages
-from cloudinit import temp_utils
from contextlib import contextmanager
+from datetime import datetime
+from errno import ENOENT
+from typing import List, Optional
from xml.etree import ElementTree
from xml.sax.saxutils import escape
-from cloudinit import subp
-from cloudinit import url_helper
-from cloudinit import util
-from cloudinit import version
-from cloudinit import distros
+from cloudinit import (
+ distros,
+ stages,
+ subp,
+ temp_utils,
+ url_helper,
+ util,
+ version,
+)
+from cloudinit.net import dhcp
from cloudinit.reporting import events
-from cloudinit.net.dhcp import EphemeralDHCPv4
-from datetime import datetime
+from cloudinit.settings import CFG_BUILTIN
LOG = logging.getLogger(__name__)
@@ -34,10 +35,10 @@ LOG = logging.getLogger(__name__)
# value is applied if the endpoint can't be found within a lease file
DEFAULT_WIRESERVER_ENDPOINT = "a8:3f:81:10"
-BOOT_EVENT_TYPE = 'boot-telemetry'
-SYSTEMINFO_EVENT_TYPE = 'system-info'
-DIAGNOSTIC_EVENT_TYPE = 'diagnostic'
-COMPRESSED_EVENT_TYPE = 'compressed'
+BOOT_EVENT_TYPE = "boot-telemetry"
+SYSTEMINFO_EVENT_TYPE = "system-info"
+DIAGNOSTIC_EVENT_TYPE = "diagnostic"
+COMPRESSED_EVENT_TYPE = "compressed"
# Maximum number of bytes of the cloud-init.log file that can be dumped to KVP
# at once. This number is based on the analysis done on a large sample of
# cloud-init.log files where the P95 of the file sizes was 537KB and the time
@@ -45,25 +46,29 @@ COMPRESSED_EVENT_TYPE = 'compressed'
MAX_LOG_TO_KVP_LENGTH = 512000
# File to store the last byte of cloud-init.log that was pushed to KVP. This
# file will be deleted with every VM reboot.
-LOG_PUSHED_TO_KVP_INDEX_FILE = '/run/cloud-init/log_pushed_to_kvp_index'
+LOG_PUSHED_TO_KVP_INDEX_FILE = "/run/cloud-init/log_pushed_to_kvp_index"
azure_ds_reporter = events.ReportEventStack(
name="azure-ds",
description="initialize reporter for azure ds",
- reporting_enabled=True)
+ reporting_enabled=True,
+)
DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE = (
- 'The VM encountered an error during deployment. '
- 'Please visit https://aka.ms/linuxprovisioningerror '
- 'for more information on remediation.')
+ "The VM encountered an error during deployment. "
+ "Please visit https://aka.ms/linuxprovisioningerror "
+ "for more information on remediation."
+)
def azure_ds_telemetry_reporter(func):
def impl(*args, **kwargs):
with events.ReportEventStack(
- name=func.__name__,
- description=func.__name__,
- parent=azure_ds_reporter):
+ name=func.__name__,
+ description=func.__name__,
+ parent=azure_ds_reporter,
+ ):
return func(*args, **kwargs)
+
return impl
@@ -79,16 +84,16 @@ def is_byte_swapped(previous_id, current_id):
def swap_bytestring(s, width=2):
dd = [byte for byte in textwrap.wrap(s, 2)]
dd.reverse()
- return ''.join(dd)
+ return "".join(dd)
- parts = current_id.split('-')
- swapped_id = '-'.join(
+ parts = current_id.split("-")
+ swapped_id = "-".join(
[
swap_bytestring(parts[0]),
swap_bytestring(parts[1]),
swap_bytestring(parts[2]),
parts[3],
- parts[4]
+ parts[4],
]
)
@@ -98,31 +103,29 @@ def is_byte_swapped(previous_id, current_id):
@azure_ds_telemetry_reporter
def get_boot_telemetry():
"""Report timestamps related to kernel initialization and systemd
- activation of cloud-init"""
+ activation of cloud-init"""
if not distros.uses_systemd():
- raise RuntimeError(
- "distro not using systemd, skipping boot telemetry")
+ raise RuntimeError("distro not using systemd, skipping boot telemetry")
LOG.debug("Collecting boot telemetry")
try:
kernel_start = float(time.time()) - float(util.uptime())
except ValueError as e:
- raise RuntimeError(
- "Failed to determine kernel start timestamp"
- ) from e
+ raise RuntimeError("Failed to determine kernel start timestamp") from e
try:
- out, _ = subp.subp(['/bin/systemctl',
- 'show', '-p',
- 'UserspaceTimestampMonotonic'],
- capture=True)
+ out, _ = subp.subp(
+ ["/bin/systemctl", "show", "-p", "UserspaceTimestampMonotonic"],
+ capture=True,
+ )
tsm = None
- if out and '=' in out:
+ if out and "=" in out:
tsm = out.split("=")[1]
if not tsm:
- raise RuntimeError("Failed to parse "
- "UserspaceTimestampMonotonic from systemd")
+ raise RuntimeError(
+ "Failed to parse UserspaceTimestampMonotonic from systemd"
+ )
user_start = kernel_start + (float(tsm) / 1000000)
except subp.ProcessExecutionError as e:
@@ -135,16 +138,23 @@ def get_boot_telemetry():
) from e
try:
- out, _ = subp.subp(['/bin/systemctl', 'show',
- 'cloud-init-local', '-p',
- 'InactiveExitTimestampMonotonic'],
- capture=True)
+ out, _ = subp.subp(
+ [
+ "/bin/systemctl",
+ "show",
+ "cloud-init-local",
+ "-p",
+ "InactiveExitTimestampMonotonic",
+ ],
+ capture=True,
+ )
tsm = None
- if out and '=' in out:
+ if out and "=" in out:
tsm = out.split("=")[1]
if not tsm:
- raise RuntimeError("Failed to parse "
- "InactiveExitTimestampMonotonic from systemd")
+ raise RuntimeError(
+ "Failed to parse InactiveExitTimestampMonotonic from systemd"
+ )
cloudinit_activation = kernel_start + (float(tsm) / 1000000)
except subp.ProcessExecutionError as e:
@@ -158,12 +168,16 @@ def get_boot_telemetry():
) from e
evt = events.ReportingEvent(
- BOOT_EVENT_TYPE, 'boot-telemetry',
- "kernel_start=%s user_start=%s cloudinit_activation=%s" %
- (datetime.utcfromtimestamp(kernel_start).isoformat() + 'Z',
- datetime.utcfromtimestamp(user_start).isoformat() + 'Z',
- datetime.utcfromtimestamp(cloudinit_activation).isoformat() + 'Z'),
- events.DEFAULT_EVENT_ORIGIN)
+ BOOT_EVENT_TYPE,
+ "boot-telemetry",
+ "kernel_start=%s user_start=%s cloudinit_activation=%s"
+ % (
+ datetime.utcfromtimestamp(kernel_start).isoformat() + "Z",
+ datetime.utcfromtimestamp(user_start).isoformat() + "Z",
+ datetime.utcfromtimestamp(cloudinit_activation).isoformat() + "Z",
+ ),
+ events.DEFAULT_EVENT_ORIGIN,
+ )
events.report_event(evt)
# return the event for unit testing purpose
@@ -175,13 +189,22 @@ def get_system_info():
"""Collect and report system information"""
info = util.system_info()
evt = events.ReportingEvent(
- SYSTEMINFO_EVENT_TYPE, 'system information',
+ SYSTEMINFO_EVENT_TYPE,
+ "system information",
"cloudinit_version=%s, kernel_version=%s, variant=%s, "
"distro_name=%s, distro_version=%s, flavor=%s, "
- "python_version=%s" %
- (version.version_string(), info['release'], info['variant'],
- info['dist'][0], info['dist'][1], info['dist'][2],
- info['python']), events.DEFAULT_EVENT_ORIGIN)
+ "python_version=%s"
+ % (
+ version.version_string(),
+ info["release"],
+ info["variant"],
+ info["dist"][0],
+ info["dist"][1],
+ info["dist"][2],
+ info["python"],
+ ),
+ events.DEFAULT_EVENT_ORIGIN,
+ )
events.report_event(evt)
# return the event for unit testing purpose
@@ -189,13 +212,17 @@ def get_system_info():
def report_diagnostic_event(
- msg: str, *, logger_func=None) -> events.ReportingEvent:
+ msg: str, *, logger_func=None
+) -> events.ReportingEvent:
"""Report a diagnostic event"""
if callable(logger_func):
logger_func(msg)
evt = events.ReportingEvent(
- DIAGNOSTIC_EVENT_TYPE, 'diagnostic message',
- msg, events.DEFAULT_EVENT_ORIGIN)
+ DIAGNOSTIC_EVENT_TYPE,
+ "diagnostic message",
+ msg,
+ events.DEFAULT_EVENT_ORIGIN,
+ )
events.report_event(evt, excluded_handler_types={"log"})
# return the event for unit testing purpose
@@ -205,21 +232,26 @@ def report_diagnostic_event(
def report_compressed_event(event_name, event_content):
"""Report a compressed event"""
compressed_data = base64.encodebytes(zlib.compress(event_content))
- event_data = {"encoding": "gz+b64",
- "data": compressed_data.decode('ascii')}
+ event_data = {
+ "encoding": "gz+b64",
+ "data": compressed_data.decode("ascii"),
+ }
evt = events.ReportingEvent(
- COMPRESSED_EVENT_TYPE, event_name,
+ COMPRESSED_EVENT_TYPE,
+ event_name,
json.dumps(event_data),
- events.DEFAULT_EVENT_ORIGIN)
- events.report_event(evt,
- excluded_handler_types={"log", "print", "webhook"})
+ events.DEFAULT_EVENT_ORIGIN,
+ )
+ events.report_event(
+ evt, excluded_handler_types={"log", "print", "webhook"}
+ )
# return the event for unit testing purpose
return evt
@azure_ds_telemetry_reporter
-def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']):
+def push_log_to_kvp(file_name=CFG_BUILTIN["def_log_file"]):
"""Push a portion of cloud-init.log file or the whole file to KVP
based on the file size.
The first time this function is called after VM boot, It will push the last
@@ -237,23 +269,26 @@ def push_log_to_kvp(file_name=CFG_BUILTIN['def_log_file']):
report_diagnostic_event(
"Dumping last {0} bytes of cloud-init.log file to KVP starting"
" from index: {1}".format(f.tell() - seek_index, seek_index),
- logger_func=LOG.debug)
+ logger_func=LOG.debug,
+ )
f.seek(seek_index, os.SEEK_SET)
report_compressed_event("cloud-init.log", f.read())
util.write_file(LOG_PUSHED_TO_KVP_INDEX_FILE, str(f.tell()))
except Exception as ex:
report_diagnostic_event(
"Exception when dumping log file: %s" % repr(ex),
- logger_func=LOG.warning)
+ logger_func=LOG.warning,
+ )
LOG.debug("Dumping dmesg log to KVP")
try:
- out, _ = subp.subp(['dmesg'], decode=False, capture=True)
+ out, _ = subp.subp(["dmesg"], decode=False, capture=True)
report_compressed_event("dmesg", out)
except Exception as ex:
report_diagnostic_event(
"Exception when dumping dmesg log: %s" % repr(ex),
- logger_func=LOG.warning)
+ logger_func=LOG.warning,
+ )
@azure_ds_telemetry_reporter
@@ -263,16 +298,20 @@ def get_last_log_byte_pushed_to_kvp_index():
return int(f.read())
except IOError as e:
if e.errno != ENOENT:
- report_diagnostic_event("Reading LOG_PUSHED_TO_KVP_INDEX_FILE"
- " failed: %s." % repr(e),
- logger_func=LOG.warning)
+ report_diagnostic_event(
+ "Reading LOG_PUSHED_TO_KVP_INDEX_FILE failed: %s." % repr(e),
+ logger_func=LOG.warning,
+ )
except ValueError as e:
- report_diagnostic_event("Invalid value in LOG_PUSHED_TO_KVP_INDEX_FILE"
- ": %s." % repr(e),
- logger_func=LOG.warning)
+ report_diagnostic_event(
+ "Invalid value in LOG_PUSHED_TO_KVP_INDEX_FILE: %s." % repr(e),
+ logger_func=LOG.warning,
+ )
except Exception as e:
- report_diagnostic_event("Failed to get the last log byte pushed to KVP"
- ": %s." % repr(e), logger_func=LOG.warning)
+ report_diagnostic_event(
+ "Failed to get the last log byte pushed to KVP: %s." % repr(e),
+ logger_func=LOG.warning,
+ )
return 0
@@ -295,58 +334,97 @@ def _get_dhcp_endpoint_option_name():
@azure_ds_telemetry_reporter
-def http_with_retries(url, **kwargs) -> str:
+def http_with_retries(url, **kwargs) -> url_helper.UrlResponse:
"""Wrapper around url_helper.readurl() with custom telemetry logging
that url_helper.readurl() does not provide.
"""
- exc = None
-
max_readurl_attempts = 240
default_readurl_timeout = 5
+ sleep_duration_between_retries = 5
periodic_logging_attempts = 12
- if 'timeout' not in kwargs:
- kwargs['timeout'] = default_readurl_timeout
+ if "timeout" not in kwargs:
+ kwargs["timeout"] = default_readurl_timeout
# remove kwargs that cause url_helper.readurl to retry,
# since we are already implementing our own retry logic.
- if kwargs.pop('retries', None):
+ if kwargs.pop("retries", None):
LOG.warning(
- 'Ignoring retries kwarg passed in for '
- 'communication with Azure endpoint.')
- if kwargs.pop('infinite', None):
+ "Ignoring retries kwarg passed in for "
+ "communication with Azure endpoint."
+ )
+ if kwargs.pop("infinite", None):
LOG.warning(
- 'Ignoring infinite kwarg passed in for communication '
- 'with Azure endpoint.')
+ "Ignoring infinite kwarg passed in for communication "
+ "with Azure endpoint."
+ )
for attempt in range(1, max_readurl_attempts + 1):
try:
ret = url_helper.readurl(url, **kwargs)
report_diagnostic_event(
- 'Successful HTTP request with Azure endpoint %s after '
- '%d attempts' % (url, attempt),
- logger_func=LOG.debug)
+ "Successful HTTP request with Azure endpoint %s after "
+ "%d attempts" % (url, attempt),
+ logger_func=LOG.debug,
+ )
return ret
except Exception as e:
- exc = e
if attempt % periodic_logging_attempts == 0:
report_diagnostic_event(
- 'Failed HTTP request with Azure endpoint %s during '
- 'attempt %d with exception: %s' %
- (url, attempt, e),
- logger_func=LOG.debug)
-
- raise exc
+ "Failed HTTP request with Azure endpoint %s during "
+ "attempt %d with exception: %s" % (url, attempt, e),
+ logger_func=LOG.debug,
+ )
+ if attempt == max_readurl_attempts:
+ raise
+
+ time.sleep(sleep_duration_between_retries)
+
+ raise RuntimeError("Failed to return in http_with_retries")
+
+
+def build_minimal_ovf(
+ username: str, hostname: str, disableSshPwd: str
+) -> bytes:
+ OVF_ENV_TEMPLATE = textwrap.dedent(
+ """\
+ <ns0:Environment xmlns:ns0="http://schemas.dmtf.org/ovf/environment/1"
+ xmlns:ns1="http://schemas.microsoft.com/windowsazure"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
+ <ns1:ProvisioningSection>
+ <ns1:Version>1.0</ns1:Version>
+ <ns1:LinuxProvisioningConfigurationSet>
+ <ns1:ConfigurationSetType>LinuxProvisioningConfiguration
+ </ns1:ConfigurationSetType>
+ <ns1:UserName>{username}</ns1:UserName>
+ <ns1:DisableSshPasswordAuthentication>{disableSshPwd}
+ </ns1:DisableSshPasswordAuthentication>
+ <ns1:HostName>{hostname}</ns1:HostName>
+ </ns1:LinuxProvisioningConfigurationSet>
+ </ns1:ProvisioningSection>
+ <ns1:PlatformSettingsSection>
+ <ns1:Version>1.0</ns1:Version>
+ <ns1:PlatformSettings>
+ <ns1:ProvisionGuestAgent>true</ns1:ProvisionGuestAgent>
+ </ns1:PlatformSettings>
+ </ns1:PlatformSettingsSection>
+ </ns0:Environment>
+ """
+ )
+ ret = OVF_ENV_TEMPLATE.format(
+ username=username, hostname=hostname, disableSshPwd=disableSshPwd
+ )
+ return ret.encode("utf-8")
class AzureEndpointHttpClient:
headers = {
- 'x-ms-agent-name': 'WALinuxAgent',
- 'x-ms-version': '2012-11-30',
+ "x-ms-agent-name": "WALinuxAgent",
+ "x-ms-version": "2012-11-30",
}
def __init__(self, certificate):
@@ -355,20 +433,21 @@ class AzureEndpointHttpClient:
"x-ms-guest-agent-public-x509-cert": certificate,
}
- def get(self, url, secure=False):
+ def get(self, url, secure=False) -> url_helper.UrlResponse:
headers = self.headers
if secure:
headers = self.headers.copy()
headers.update(self.extra_secure_headers)
return http_with_retries(url, headers=headers)
- def post(self, url, data=None, extra_headers=None):
+ def post(
+ self, url, data=None, extra_headers=None
+ ) -> url_helper.UrlResponse:
headers = self.headers
if extra_headers is not None:
headers = self.headers.copy()
headers.update(extra_headers)
- return http_with_retries(
- url, data=data, headers=headers)
+ return http_with_retries(url, data=data, headers=headers)
class InvalidGoalStateXMLException(Exception):
@@ -376,12 +455,12 @@ class InvalidGoalStateXMLException(Exception):
class GoalState:
-
def __init__(
- self,
- unparsed_xml: str,
- azure_endpoint_client: AzureEndpointHttpClient,
- need_certificate: bool = True) -> None:
+ self,
+ unparsed_xml: str,
+ azure_endpoint_client: AzureEndpointHttpClient,
+ need_certificate: bool = True,
+ ) -> None:
"""Parses a GoalState XML string and returns a GoalState object.
@param unparsed_xml: string representing a GoalState XML.
@@ -395,36 +474,41 @@ class GoalState:
self.root = ElementTree.fromstring(unparsed_xml)
except ElementTree.ParseError as e:
report_diagnostic_event(
- 'Failed to parse GoalState XML: %s' % e,
- logger_func=LOG.warning)
+ "Failed to parse GoalState XML: %s" % e,
+ logger_func=LOG.warning,
+ )
raise
- self.container_id = self._text_from_xpath('./Container/ContainerId')
+ self.container_id = self._text_from_xpath("./Container/ContainerId")
self.instance_id = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance/InstanceId')
- self.incarnation = self._text_from_xpath('./Incarnation')
+ "./Container/RoleInstanceList/RoleInstance/InstanceId"
+ )
+ self.incarnation = self._text_from_xpath("./Incarnation")
for attr in ("container_id", "instance_id", "incarnation"):
if getattr(self, attr) is None:
- msg = 'Missing %s in GoalState XML' % attr
+ msg = "Missing %s in GoalState XML" % attr
report_diagnostic_event(msg, logger_func=LOG.warning)
raise InvalidGoalStateXMLException(msg)
self.certificates_xml = None
url = self._text_from_xpath(
- './Container/RoleInstanceList/RoleInstance'
- '/Configuration/Certificates')
+ "./Container/RoleInstanceList/RoleInstance"
+ "/Configuration/Certificates"
+ )
if url is not None and need_certificate:
with events.ReportEventStack(
- name="get-certificates-xml",
- description="get certificates xml",
- parent=azure_ds_reporter):
- self.certificates_xml = \
- self.azure_endpoint_client.get(
- url, secure=True).contents
+ name="get-certificates-xml",
+ description="get certificates xml",
+ parent=azure_ds_reporter,
+ ):
+ self.certificates_xml = self.azure_endpoint_client.get(
+ url, secure=True
+ ).contents
if self.certificates_xml is None:
raise InvalidGoalStateXMLException(
- 'Azure endpoint returned empty certificates xml.')
+ "Azure endpoint returned empty certificates xml."
+ )
def _text_from_xpath(self, xpath):
element = self.root.find(xpath)
@@ -436,8 +520,8 @@ class GoalState:
class OpenSSLManager:
certificate_names = {
- 'private_key': 'TransportPrivate.pem',
- 'certificate': 'TransportCert.pem',
+ "private_key": "TransportPrivate.pem",
+ "certificate": "TransportCert.pem",
}
def __init__(self):
@@ -458,35 +542,47 @@ class OpenSSLManager:
@azure_ds_telemetry_reporter
def generate_certificate(self):
- LOG.debug('Generating certificate for communication with fabric...')
+ LOG.debug("Generating certificate for communication with fabric...")
if self.certificate is not None:
- LOG.debug('Certificate already generated.')
+ LOG.debug("Certificate already generated.")
return
with cd(self.tmpdir):
- subp.subp([
- 'openssl', 'req', '-x509', '-nodes', '-subj',
- '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
- '-keyout', self.certificate_names['private_key'],
- '-out', self.certificate_names['certificate'],
- ])
- certificate = ''
- for line in open(self.certificate_names['certificate']):
+ subp.subp(
+ [
+ "openssl",
+ "req",
+ "-x509",
+ "-nodes",
+ "-subj",
+ "/CN=LinuxTransport",
+ "-days",
+ "32768",
+ "-newkey",
+ "rsa:2048",
+ "-keyout",
+ self.certificate_names["private_key"],
+ "-out",
+ self.certificate_names["certificate"],
+ ]
+ )
+ certificate = ""
+ for line in open(self.certificate_names["certificate"]):
if "CERTIFICATE" not in line:
certificate += line.rstrip()
self.certificate = certificate
- LOG.debug('New certificate generated.')
+ LOG.debug("New certificate generated.")
@staticmethod
@azure_ds_telemetry_reporter
def _run_x509_action(action, cert):
- cmd = ['openssl', 'x509', '-noout', action]
+ cmd = ["openssl", "x509", "-noout", action]
result, _ = subp.subp(cmd, data=cert)
return result
@azure_ds_telemetry_reporter
def _get_ssh_key_from_cert(self, certificate):
- pub_key = self._run_x509_action('-pubkey', certificate)
- keygen_cmd = ['ssh-keygen', '-i', '-m', 'PKCS8', '-f', '/dev/stdin']
+ pub_key = self._run_x509_action("-pubkey", certificate)
+ keygen_cmd = ["ssh-keygen", "-i", "-m", "PKCS8", "-f", "/dev/stdin"]
ssh_key, _ = subp.subp(keygen_cmd, data=pub_key)
return ssh_key
@@ -499,48 +595,50 @@ class OpenSSLManager:
Azure control plane passes that fingerprint as so:
'073E19D14D1C799224C6A0FD8DDAB6A8BF27D473'
"""
- raw_fp = self._run_x509_action('-fingerprint', certificate)
- eq = raw_fp.find('=')
- octets = raw_fp[eq+1:-1].split(':')
- return ''.join(octets)
+ raw_fp = self._run_x509_action("-fingerprint", certificate)
+ eq = raw_fp.find("=")
+ octets = raw_fp[eq + 1 : -1].split(":")
+ return "".join(octets)
@azure_ds_telemetry_reporter
def _decrypt_certs_from_xml(self, certificates_xml):
"""Decrypt the certificates XML document using the our private key;
- return the list of certs and private keys contained in the doc.
+ return the list of certs and private keys contained in the doc.
"""
- tag = ElementTree.fromstring(certificates_xml).find('.//Data')
+ tag = ElementTree.fromstring(certificates_xml).find(".//Data")
certificates_content = tag.text
lines = [
- b'MIME-Version: 1.0',
+ b"MIME-Version: 1.0",
b'Content-Disposition: attachment; filename="Certificates.p7m"',
b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"',
- b'Content-Transfer-Encoding: base64',
- b'',
- certificates_content.encode('utf-8'),
+ b"Content-Transfer-Encoding: base64",
+ b"",
+ certificates_content.encode("utf-8"),
]
with cd(self.tmpdir):
out, _ = subp.subp(
- 'openssl cms -decrypt -in /dev/stdin -inkey'
- ' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
- ' -password pass:'.format(**self.certificate_names),
- shell=True, data=b'\n'.join(lines))
+ "openssl cms -decrypt -in /dev/stdin -inkey"
+ " {private_key} -recip {certificate} | openssl pkcs12 -nodes"
+ " -password pass:".format(**self.certificate_names),
+ shell=True,
+ data=b"\n".join(lines),
+ )
return out
@azure_ds_telemetry_reporter
def parse_certificates(self, certificates_xml):
"""Given the Certificates XML document, return a dictionary of
- fingerprints and associated SSH keys derived from the certs."""
+ fingerprints and associated SSH keys derived from the certs."""
out = self._decrypt_certs_from_xml(certificates_xml)
current = []
keys = {}
for line in out.splitlines():
current.append(line)
- if re.match(r'[-]+END .*?KEY[-]+$', line):
+ if re.match(r"[-]+END .*?KEY[-]+$", line):
# ignore private_keys
current = []
- elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
- certificate = '\n'.join(current)
+ elif re.match(r"[-]+END .*?CERTIFICATE[-]+$", line):
+ certificate = "\n".join(current)
ssh_key = self._get_ssh_key_from_cert(certificate)
fingerprint = self._get_fingerprint_from_cert(certificate)
keys[fingerprint] = ssh_key
@@ -550,7 +648,8 @@ class OpenSSLManager:
class GoalStateHealthReporter:
- HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent('''\
+ HEALTH_REPORT_XML_TEMPLATE = textwrap.dedent(
+ """\
<?xml version="1.0" encoding="utf-8"?>
<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
@@ -568,25 +667,30 @@ class GoalStateHealthReporter:
</RoleInstanceList>
</Container>
</Health>
- ''')
+ """
+ )
- HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent('''\
+ HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE = textwrap.dedent(
+ """\
<Details>
<SubStatus>{health_substatus}</SubStatus>
<Description>{health_description}</Description>
</Details>
- ''')
+ """
+ )
- PROVISIONING_SUCCESS_STATUS = 'Ready'
- PROVISIONING_NOT_READY_STATUS = 'NotReady'
- PROVISIONING_FAILURE_SUBSTATUS = 'ProvisioningFailed'
+ PROVISIONING_SUCCESS_STATUS = "Ready"
+ PROVISIONING_NOT_READY_STATUS = "NotReady"
+ PROVISIONING_FAILURE_SUBSTATUS = "ProvisioningFailed"
HEALTH_REPORT_DESCRIPTION_TRIM_LEN = 512
def __init__(
- self, goal_state: GoalState,
- azure_endpoint_client: AzureEndpointHttpClient,
- endpoint: str) -> None:
+ self,
+ goal_state: GoalState,
+ azure_endpoint_client: AzureEndpointHttpClient,
+ endpoint: str,
+ ) -> None:
"""Creates instance that will report provisioning status to an endpoint
@param goal_state: An instance of class GoalState that contains
@@ -608,17 +712,19 @@ class GoalStateHealthReporter:
incarnation=self._goal_state.incarnation,
container_id=self._goal_state.container_id,
instance_id=self._goal_state.instance_id,
- status=self.PROVISIONING_SUCCESS_STATUS)
- LOG.debug('Reporting ready to Azure fabric.')
+ status=self.PROVISIONING_SUCCESS_STATUS,
+ )
+ LOG.debug("Reporting ready to Azure fabric.")
try:
self._post_health_report(document=document)
except Exception as e:
report_diagnostic_event(
"exception while reporting ready: %s" % e,
- logger_func=LOG.error)
+ logger_func=LOG.error,
+ )
raise
- LOG.info('Reported ready to Azure fabric.')
+ LOG.info("Reported ready to Azure fabric.")
@azure_ds_telemetry_reporter
def send_failure_signal(self, description: str) -> None:
@@ -628,7 +734,8 @@ class GoalStateHealthReporter:
instance_id=self._goal_state.instance_id,
status=self.PROVISIONING_NOT_READY_STATUS,
substatus=self.PROVISIONING_FAILURE_SUBSTATUS,
- description=description)
+ description=description,
+ )
try:
self._post_health_report(document=document)
except Exception as e:
@@ -636,24 +743,33 @@ class GoalStateHealthReporter:
report_diagnostic_event(msg, logger_func=LOG.error)
raise
- LOG.warning('Reported failure to Azure fabric.')
+ LOG.warning("Reported failure to Azure fabric.")
def build_report(
- self, incarnation: str, container_id: str, instance_id: str,
- status: str, substatus=None, description=None) -> str:
- health_detail = ''
+ self,
+ incarnation: str,
+ container_id: str,
+ instance_id: str,
+ status: str,
+ substatus=None,
+ description=None,
+ ) -> str:
+ health_detail = ""
if substatus is not None:
health_detail = self.HEALTH_DETAIL_SUBSECTION_XML_TEMPLATE.format(
health_substatus=escape(substatus),
health_description=escape(
- description[:self.HEALTH_REPORT_DESCRIPTION_TRIM_LEN]))
+ description[: self.HEALTH_REPORT_DESCRIPTION_TRIM_LEN]
+ ),
+ )
health_report = self.HEALTH_REPORT_XML_TEMPLATE.format(
incarnation=escape(str(incarnation)),
container_id=escape(container_id),
instance_id=escape(instance_id),
health_status=escape(status),
- health_detail_subsection=health_detail)
+ health_detail_subsection=health_detail,
+ )
return health_report
@@ -681,20 +797,22 @@ class GoalStateHealthReporter:
# reporting handler that writes to the special KVP files.
time.sleep(0)
- LOG.debug('Sending health report to Azure fabric.')
+ LOG.debug("Sending health report to Azure fabric.")
url = "http://{}/machine?comp=health".format(self._endpoint)
self._azure_endpoint_client.post(
url,
data=document,
- extra_headers={'Content-Type': 'text/xml; charset=utf-8'})
- LOG.debug('Successfully sent health report to Azure fabric')
+ extra_headers={"Content-Type": "text/xml; charset=utf-8"},
+ )
+ LOG.debug("Successfully sent health report to Azure fabric")
class WALinuxAgentShim:
-
def __init__(self, fallback_lease_file=None, dhcp_options=None):
- LOG.debug('WALinuxAgentShim instantiated, fallback_lease_file=%s',
- fallback_lease_file)
+ LOG.debug(
+ "WALinuxAgentShim instantiated, fallback_lease_file=%s",
+ fallback_lease_file,
+ )
self.dhcpoptions = dhcp_options
self._endpoint = None
self.openssl_manager = None
@@ -713,30 +831,33 @@ class WALinuxAgentShim:
@property
def endpoint(self):
if self._endpoint is None:
- self._endpoint = self.find_endpoint(self.lease_file,
- self.dhcpoptions)
+ self._endpoint = self.find_endpoint(
+ self.lease_file, self.dhcpoptions
+ )
return self._endpoint
@staticmethod
def get_ip_from_lease_value(fallback_lease_value):
- unescaped_value = fallback_lease_value.replace('\\', '')
+ unescaped_value = fallback_lease_value.replace("\\", "")
if len(unescaped_value) > 4:
- hex_string = ''
- for hex_pair in unescaped_value.split(':'):
+ hex_string = ""
+ for hex_pair in unescaped_value.split(":"):
if len(hex_pair) == 1:
- hex_pair = '0' + hex_pair
+ hex_pair = "0" + hex_pair
hex_string += hex_pair
packed_bytes = struct.pack(
- '>L', int(hex_string.replace(':', ''), 16))
+ ">L", int(hex_string.replace(":", ""), 16)
+ )
else:
- packed_bytes = unescaped_value.encode('utf-8')
+ packed_bytes = unescaped_value.encode("utf-8")
return socket.inet_ntoa(packed_bytes)
@staticmethod
@azure_ds_telemetry_reporter
def _networkd_get_value_from_leases(leases_d=None):
return dhcp.networkd_get_option_from_leases(
- 'OPTION_245', leases_d=leases_d)
+ "OPTION_245", leases_d=leases_d
+ )
@staticmethod
@azure_ds_telemetry_reporter
@@ -754,7 +875,7 @@ class WALinuxAgentShim:
if option_name in line:
# Example line from Ubuntu
# option unknown-245 a8:3f:81:10;
- leases.append(line.strip(' ').split(' ', 2)[-1].strip(';\n"'))
+ leases.append(line.strip(" ").split(" ", 2)[-1].strip(';\n"'))
# Return the "most recent" one in the list
if len(leases) < 1:
return None
@@ -769,15 +890,16 @@ class WALinuxAgentShim:
if not os.path.exists(hooks_dir):
LOG.debug("%s not found.", hooks_dir)
return None
- hook_files = [os.path.join(hooks_dir, x)
- for x in os.listdir(hooks_dir)]
+ hook_files = [
+ os.path.join(hooks_dir, x) for x in os.listdir(hooks_dir)
+ ]
for hook_file in hook_files:
try:
- name = os.path.basename(hook_file).replace('.json', '')
+ name = os.path.basename(hook_file).replace(".json", "")
dhcp_options[name] = json.loads(util.load_file((hook_file)))
except ValueError as e:
raise ValueError(
- '{_file} is not valid JSON data'.format(_file=hook_file)
+ "{_file} is not valid JSON data".format(_file=hook_file)
) from e
return dhcp_options
@@ -789,7 +911,7 @@ class WALinuxAgentShim:
# the MS endpoint server is given to us as DHPC option 245
_value = None
for interface in dhcp_options:
- _value = dhcp_options[interface].get('unknown_245', None)
+ _value = dhcp_options[interface].get("unknown_245", None)
if _value is not None:
LOG.debug("Endpoint server found in dhclient options")
break
@@ -819,51 +941,73 @@ class WALinuxAgentShim:
LOG.debug("Using Azure Endpoint from dhcp options")
if value is None:
report_diagnostic_event(
- 'No Azure endpoint from dhcp options. '
- 'Finding Azure endpoint from networkd...',
- logger_func=LOG.debug)
+ "No Azure endpoint from dhcp options. "
+ "Finding Azure endpoint from networkd...",
+ logger_func=LOG.debug,
+ )
value = WALinuxAgentShim._networkd_get_value_from_leases()
if value is None:
# Option-245 stored in /run/cloud-init/dhclient.hooks/<ifc>.json
# a dhclient exit hook that calls cloud-init-dhclient-hook
report_diagnostic_event(
- 'No Azure endpoint from networkd. '
- 'Finding Azure endpoint from hook json...',
- logger_func=LOG.debug)
+ "No Azure endpoint from networkd. "
+ "Finding Azure endpoint from hook json...",
+ logger_func=LOG.debug,
+ )
dhcp_options = WALinuxAgentShim._load_dhclient_json()
value = WALinuxAgentShim._get_value_from_dhcpoptions(dhcp_options)
if value is None:
# Fallback and check the leases file if unsuccessful
report_diagnostic_event(
- 'No Azure endpoint from dhclient logs. '
- 'Unable to find endpoint in dhclient logs. '
- 'Falling back to check lease files',
- logger_func=LOG.debug)
+ "No Azure endpoint from dhclient logs. "
+ "Unable to find endpoint in dhclient logs. "
+ "Falling back to check lease files",
+ logger_func=LOG.debug,
+ )
if fallback_lease_file is None:
report_diagnostic_event(
- 'No fallback lease file was specified.',
- logger_func=LOG.warning)
+ "No fallback lease file was specified.",
+ logger_func=LOG.warning,
+ )
value = None
else:
report_diagnostic_event(
- 'Looking for endpoint in lease file %s'
- % fallback_lease_file, logger_func=LOG.debug)
+ "Looking for endpoint in lease file %s"
+ % fallback_lease_file,
+ logger_func=LOG.debug,
+ )
value = WALinuxAgentShim._get_value_from_leases_file(
- fallback_lease_file)
+ fallback_lease_file
+ )
if value is None:
value = DEFAULT_WIRESERVER_ENDPOINT
report_diagnostic_event(
- 'No lease found; using default endpoint: %s' % value,
- logger_func=LOG.warning)
+ "No lease found; using default endpoint: %s" % value,
+ logger_func=LOG.warning,
+ )
endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
report_diagnostic_event(
- 'Azure endpoint found at %s' % endpoint_ip_address,
- logger_func=LOG.debug)
+ "Azure endpoint found at %s" % endpoint_ip_address,
+ logger_func=LOG.debug,
+ )
return endpoint_ip_address
@azure_ds_telemetry_reporter
- def register_with_azure_and_fetch_data(self, pubkey_info=None) -> dict:
+ def eject_iso(self, iso_dev) -> None:
+ try:
+ LOG.debug("Ejecting the provisioning iso")
+ subp.subp(["eject", iso_dev])
+ except Exception as e:
+ report_diagnostic_event(
+ "Failed ejecting the provisioning iso: %s" % e,
+ logger_func=LOG.debug,
+ )
+
+ @azure_ds_telemetry_reporter
+ def register_with_azure_and_fetch_data(
+ self, pubkey_info=None, iso_dev=None
+ ) -> Optional[List[str]]:
"""Gets the VM's GoalState from Azure, uses the GoalState information
to report ready/send the ready signal/provisioning complete signal to
Azure, and then uses pubkey_info to filter and obtain the user's
@@ -880,7 +1024,8 @@ class WALinuxAgentShim:
http_client_certificate = self.openssl_manager.certificate
if self.azure_endpoint_client is None:
self.azure_endpoint_client = AzureEndpointHttpClient(
- http_client_certificate)
+ http_client_certificate
+ )
goal_state = self._fetch_goal_state_from_azure(
need_certificate=http_client_certificate is not None
)
@@ -888,9 +1033,14 @@ class WALinuxAgentShim:
if pubkey_info is not None:
ssh_keys = self._get_user_pubkeys(goal_state, pubkey_info)
health_reporter = GoalStateHealthReporter(
- goal_state, self.azure_endpoint_client, self.endpoint)
+ goal_state, self.azure_endpoint_client, self.endpoint
+ )
+
+ if iso_dev is not None:
+ self.eject_iso(iso_dev)
+
health_reporter.send_ready_signal()
- return {'public-keys': ssh_keys}
+ return ssh_keys
@azure_ds_telemetry_reporter
def register_with_azure_and_report_failure(self, description: str) -> None:
@@ -903,13 +1053,14 @@ class WALinuxAgentShim:
self.azure_endpoint_client = AzureEndpointHttpClient(None)
goal_state = self._fetch_goal_state_from_azure(need_certificate=False)
health_reporter = GoalStateHealthReporter(
- goal_state, self.azure_endpoint_client, self.endpoint)
+ goal_state, self.azure_endpoint_client, self.endpoint
+ )
health_reporter.send_failure_signal(description=description)
@azure_ds_telemetry_reporter
def _fetch_goal_state_from_azure(
- self,
- need_certificate: bool) -> GoalState:
+ self, need_certificate: bool
+ ) -> GoalState:
"""Fetches the GoalState XML from the Azure endpoint, parses the XML,
and returns a GoalState object.
@@ -918,8 +1069,7 @@ class WALinuxAgentShim:
"""
unparsed_goal_state_xml = self._get_raw_goal_state_xml_from_azure()
return self._parse_raw_goal_state_xml(
- unparsed_goal_state_xml,
- need_certificate
+ unparsed_goal_state_xml, need_certificate
)
@azure_ds_telemetry_reporter
@@ -930,27 +1080,29 @@ class WALinuxAgentShim:
@return: GoalState XML string
"""
- LOG.info('Registering with Azure...')
- url = 'http://{}/machine/?comp=goalstate'.format(self.endpoint)
+ LOG.info("Registering with Azure...")
+ url = "http://{}/machine/?comp=goalstate".format(self.endpoint)
try:
with events.ReportEventStack(
- name="goalstate-retrieval",
- description="retrieve goalstate",
- parent=azure_ds_reporter):
+ name="goalstate-retrieval",
+ description="retrieve goalstate",
+ parent=azure_ds_reporter,
+ ):
response = self.azure_endpoint_client.get(url)
except Exception as e:
report_diagnostic_event(
- 'failed to register with Azure and fetch GoalState XML: %s'
- % e, logger_func=LOG.warning)
+ "failed to register with Azure and fetch GoalState XML: %s"
+ % e,
+ logger_func=LOG.warning,
+ )
raise
- LOG.debug('Successfully fetched GoalState XML.')
+ LOG.debug("Successfully fetched GoalState XML.")
return response.contents
@azure_ds_telemetry_reporter
def _parse_raw_goal_state_xml(
- self,
- unparsed_goal_state_xml: str,
- need_certificate: bool) -> GoalState:
+ self, unparsed_goal_state_xml: str, need_certificate: bool
+ ) -> GoalState:
"""Parses a GoalState XML string and returns a GoalState object.
@param unparsed_goal_state_xml: GoalState XML string
@@ -961,23 +1113,28 @@ class WALinuxAgentShim:
goal_state = GoalState(
unparsed_goal_state_xml,
self.azure_endpoint_client,
- need_certificate
+ need_certificate,
)
except Exception as e:
report_diagnostic_event(
- 'Error processing GoalState XML: %s' % e,
- logger_func=LOG.warning)
+ "Error processing GoalState XML: %s" % e,
+ logger_func=LOG.warning,
+ )
raise
- msg = ', '.join([
- 'GoalState XML container id: %s' % goal_state.container_id,
- 'GoalState XML instance id: %s' % goal_state.instance_id,
- 'GoalState XML incarnation: %s' % goal_state.incarnation])
+ msg = ", ".join(
+ [
+ "GoalState XML container id: %s" % goal_state.container_id,
+ "GoalState XML instance id: %s" % goal_state.instance_id,
+ "GoalState XML incarnation: %s" % goal_state.incarnation,
+ ]
+ )
report_diagnostic_event(msg, logger_func=LOG.debug)
return goal_state
@azure_ds_telemetry_reporter
def _get_user_pubkeys(
- self, goal_state: GoalState, pubkey_info: list) -> list:
+ self, goal_state: GoalState, pubkey_info: list
+ ) -> list:
"""Gets and filters the VM admin user's authorized pubkeys.
The admin user in this case is the username specified as "admin"
@@ -1005,15 +1162,16 @@ class WALinuxAgentShim:
"""
ssh_keys = []
if goal_state.certificates_xml is not None and pubkey_info is not None:
- LOG.debug('Certificate XML found; parsing out public keys.')
+ LOG.debug("Certificate XML found; parsing out public keys.")
keys_by_fingerprint = self.openssl_manager.parse_certificates(
- goal_state.certificates_xml)
+ goal_state.certificates_xml
+ )
ssh_keys = self._filter_pubkeys(keys_by_fingerprint, pubkey_info)
return ssh_keys
@staticmethod
def _filter_pubkeys(keys_by_fingerprint: dict, pubkey_info: list) -> list:
- """ Filter and return only the user's actual pubkeys.
+ """Filter and return only the user's actual pubkeys.
@param keys_by_fingerprint: pubkey fingerprint -> pubkey value dict
that was obtained from GoalState Certificates XML. May contain
@@ -1026,70 +1184,65 @@ class WALinuxAgentShim:
"""
keys = []
for pubkey in pubkey_info:
- if 'value' in pubkey and pubkey['value']:
- keys.append(pubkey['value'])
- elif 'fingerprint' in pubkey and pubkey['fingerprint']:
- fingerprint = pubkey['fingerprint']
+ if "value" in pubkey and pubkey["value"]:
+ keys.append(pubkey["value"])
+ elif "fingerprint" in pubkey and pubkey["fingerprint"]:
+ fingerprint = pubkey["fingerprint"]
if fingerprint in keys_by_fingerprint:
keys.append(keys_by_fingerprint[fingerprint])
else:
- LOG.warning("ovf-env.xml specified PublicKey fingerprint "
- "%s not found in goalstate XML", fingerprint)
+ LOG.warning(
+ "ovf-env.xml specified PublicKey fingerprint "
+ "%s not found in goalstate XML",
+ fingerprint,
+ )
else:
- LOG.warning("ovf-env.xml specified PublicKey with neither "
- "value nor fingerprint: %s", pubkey)
+ LOG.warning(
+ "ovf-env.xml specified PublicKey with neither "
+ "value nor fingerprint: %s",
+ pubkey,
+ )
return keys
@azure_ds_telemetry_reporter
-def get_metadata_from_fabric(fallback_lease_file=None, dhcp_opts=None,
- pubkey_info=None):
- shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
- dhcp_options=dhcp_opts)
+def get_metadata_from_fabric(
+ fallback_lease_file=None, dhcp_opts=None, pubkey_info=None, iso_dev=None
+):
+ shim = WALinuxAgentShim(
+ fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts
+ )
try:
- return shim.register_with_azure_and_fetch_data(pubkey_info=pubkey_info)
+ return shim.register_with_azure_and_fetch_data(
+ pubkey_info=pubkey_info, iso_dev=iso_dev
+ )
finally:
shim.clean_up()
@azure_ds_telemetry_reporter
-def report_failure_to_fabric(fallback_lease_file=None, dhcp_opts=None,
- description=None):
- shim = WALinuxAgentShim(fallback_lease_file=fallback_lease_file,
- dhcp_options=dhcp_opts)
+def report_failure_to_fabric(
+ fallback_lease_file=None, dhcp_opts=None, description=None
+):
+ shim = WALinuxAgentShim(
+ fallback_lease_file=fallback_lease_file, dhcp_options=dhcp_opts
+ )
if not description:
description = DEFAULT_REPORT_FAILURE_USER_VISIBLE_MESSAGE
try:
- shim.register_with_azure_and_report_failure(
- description=description)
+ shim.register_with_azure_and_report_failure(description=description)
finally:
shim.clean_up()
def dhcp_log_cb(out, err):
report_diagnostic_event(
- "dhclient output stream: %s" % out, logger_func=LOG.debug)
+ "dhclient output stream: %s" % out, logger_func=LOG.debug
+ )
report_diagnostic_event(
- "dhclient error stream: %s" % err, logger_func=LOG.debug)
-
-
-class EphemeralDHCPv4WithReporting:
- def __init__(self, reporter, nic=None):
- self.reporter = reporter
- self.ephemeralDHCPv4 = EphemeralDHCPv4(
- iface=nic, dhcp_log_func=dhcp_log_cb)
-
- def __enter__(self):
- with events.ReportEventStack(
- name="obtain-dhcp-lease",
- description="obtain dhcp lease",
- parent=self.reporter):
- return self.ephemeralDHCPv4.__enter__()
-
- def __exit__(self, excp_type, excp_value, excp_traceback):
- self.ephemeralDHCPv4.__exit__(
- excp_type, excp_value, excp_traceback)
+ "dhclient error stream: %s" % err, logger_func=LOG.debug
+ )
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py
index f9be4ecb..72515caf 100644
--- a/cloudinit/sources/helpers/digitalocean.py
+++ b/cloudinit/sources/helpers/digitalocean.py
@@ -8,20 +8,18 @@ import random
from cloudinit import dmi
from cloudinit import log as logging
from cloudinit import net as cloudnet
-from cloudinit import url_helper
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, url_helper, util
-NIC_MAP = {'public': 'eth0', 'private': 'eth1'}
+NIC_MAP = {"public": "eth0", "private": "eth1"}
LOG = logging.getLogger(__name__)
def assign_ipv4_link_local(distro, nic=None):
- """Bring up NIC using an address using link-local (ip4LL) IPs. On
- DigitalOcean, the link-local domain is per-droplet routed, so there
- is no risk of collisions. However, to be more safe, the ip4LL
- address is random.
+ """Bring up NIC using an address using link-local (ip4LL) IPs.
+ On DigitalOcean, the link-local domain is per-droplet routed, so there
+ is no risk of collisions. However, to be more safe, the ip4LL
+ address is random.
"""
if not nic:
@@ -29,18 +27,22 @@ def assign_ipv4_link_local(distro, nic=None):
LOG.debug("selected interface '%s' for reading metadata", nic)
if not nic:
- raise RuntimeError("unable to find interfaces to access the"
- "meta-data server. This droplet is broken.")
+ raise RuntimeError(
+ "unable to find interfaces to access the"
+ "meta-data server. This droplet is broken."
+ )
- addr = "169.254.{0}.{1}/16".format(random.randint(1, 168),
- random.randint(0, 255))
+ addr = "169.254.{0}.{1}/16".format(
+ random.randint(1, 168), random.randint(0, 255)
+ )
- ip_addr_cmd = ['ip', 'addr', 'add', addr, 'dev', nic]
- ip_link_cmd = ['ip', 'link', 'set', 'dev', nic, 'up']
+ ip_addr_cmd = ["ip", "addr", "add", addr, "dev", nic]
+ ip_link_cmd = ["ip", "link", "set", "dev", nic, "up"]
- if not subp.which('ip'):
- raise RuntimeError("No 'ip' command available to configure ip4LL "
- "address")
+ if not subp.which("ip"):
+ raise RuntimeError(
+ "No 'ip' command available to configure ip4LL address"
+ )
try:
subp.subp(ip_addr_cmd)
@@ -48,8 +50,13 @@ def assign_ipv4_link_local(distro, nic=None):
subp.subp(ip_link_cmd)
LOG.debug("brought device '%s' up", nic)
except Exception:
- util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed."
- " Droplet networking will be broken", addr, nic)
+ util.logexc(
+ LOG,
+ "ip4LL address assignment of '%s' to '%s' failed."
+ " Droplet networking will be broken",
+ addr,
+ nic,
+ )
raise
return nic
@@ -63,21 +70,23 @@ def get_link_local_nic(distro):
]
if not nics:
return None
- return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, 'ifindex'))
+ return min(nics, key=lambda d: cloudnet.read_sys_net_int(d, "ifindex"))
def del_ipv4_link_local(nic=None):
"""Remove the ip4LL address. While this is not necessary, the ip4LL
- address is extraneous and confusing to users.
+ address is extraneous and confusing to users.
"""
if not nic:
- LOG.debug("no link_local address interface defined, skipping link "
- "local address cleanup")
+ LOG.debug(
+ "no link_local address interface defined, skipping link "
+ "local address cleanup"
+ )
return
LOG.debug("cleaning up ipv4LL address")
- ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic]
+ ip_addr_cmd = ["ip", "addr", "flush", "dev", nic]
try:
subp.subp(ip_addr_cmd)
@@ -89,44 +98,47 @@ def del_ipv4_link_local(nic=None):
def convert_network_configuration(config, dns_servers):
"""Convert the DigitalOcean Network description into Cloud-init's netconfig
- format.
-
- Example JSON:
- {'public': [
- {'mac': '04:01:58:27:7f:01',
- 'ipv4': {'gateway': '45.55.32.1',
- 'netmask': '255.255.224.0',
- 'ip_address': '45.55.50.93'},
- 'anchor_ipv4': {
- 'gateway': '10.17.0.1',
- 'netmask': '255.255.0.0',
- 'ip_address': '10.17.0.9'},
- 'type': 'public',
- 'ipv6': {'gateway': '....',
- 'ip_address': '....',
- 'cidr': 64}}
- ],
- 'private': [
- {'mac': '04:01:58:27:7f:02',
- 'ipv4': {'gateway': '10.132.0.1',
- 'netmask': '255.255.0.0',
- 'ip_address': '10.132.75.35'},
- 'type': 'private'}
- ]
- }
+ format.
+
+ Example JSON:
+ {'public': [
+ {'mac': '04:01:58:27:7f:01',
+ 'ipv4': {'gateway': '45.55.32.1',
+ 'netmask': '255.255.224.0',
+ 'ip_address': '45.55.50.93'},
+ 'anchor_ipv4': {
+ 'gateway': '10.17.0.1',
+ 'netmask': '255.255.0.0',
+ 'ip_address': '10.17.0.9'},
+ 'type': 'public',
+ 'ipv6': {'gateway': '....',
+ 'ip_address': '....',
+ 'cidr': 64}}
+ ],
+ 'private': [
+ {'mac': '04:01:58:27:7f:02',
+ 'ipv4': {'gateway': '10.132.0.1',
+ 'netmask': '255.255.0.0',
+ 'ip_address': '10.132.75.35'},
+ 'type': 'private'}
+ ]
+ }
"""
def _get_subnet_part(pcfg):
- subpart = {'type': 'static',
- 'control': 'auto',
- 'address': pcfg.get('ip_address'),
- 'gateway': pcfg.get('gateway')}
-
- if ":" in pcfg.get('ip_address'):
- subpart['address'] = "{0}/{1}".format(pcfg.get('ip_address'),
- pcfg.get('cidr'))
+ subpart = {
+ "type": "static",
+ "control": "auto",
+ "address": pcfg.get("ip_address"),
+ "gateway": pcfg.get("gateway"),
+ }
+
+ if ":" in pcfg.get("ip_address"):
+ subpart["address"] = "{0}/{1}".format(
+ pcfg.get("ip_address"), pcfg.get("cidr")
+ )
else:
- subpart['netmask'] = pcfg.get('netmask')
+ subpart["netmask"] = pcfg.get("netmask")
return subpart
@@ -138,54 +150,66 @@ def convert_network_configuration(config, dns_servers):
nic = config[n][0]
LOG.debug("considering %s", nic)
- mac_address = nic.get('mac')
+ mac_address = nic.get("mac")
if mac_address not in macs_to_nics:
- raise RuntimeError("Did not find network interface on system "
- "with mac '%s'. Cannot apply configuration: %s"
- % (mac_address, nic))
+ raise RuntimeError(
+ "Did not find network interface on system "
+ "with mac '%s'. Cannot apply configuration: %s"
+ % (mac_address, nic)
+ )
sysfs_name = macs_to_nics.get(mac_address)
- nic_type = nic.get('type', 'unknown')
+ nic_type = nic.get("type", "unknown")
if_name = NIC_MAP.get(nic_type, sysfs_name)
if if_name != sysfs_name:
- LOG.debug("Found %s interface '%s' on '%s', assigned name of '%s'",
- nic_type, mac_address, sysfs_name, if_name)
+ LOG.debug(
+ "Found %s interface '%s' on '%s', assigned name of '%s'",
+ nic_type,
+ mac_address,
+ sysfs_name,
+ if_name,
+ )
else:
- msg = ("Found interface '%s' on '%s', which is not a public "
- "or private interface. Using default system naming.")
+ msg = (
+ "Found interface '%s' on '%s', which is not a public "
+ "or private interface. Using default system naming."
+ )
LOG.debug(msg, mac_address, sysfs_name)
- ncfg = {'type': 'physical',
- 'mac_address': mac_address,
- 'name': if_name}
+ ncfg = {
+ "type": "physical",
+ "mac_address": mac_address,
+ "name": if_name,
+ }
subnets = []
- for netdef in ('ipv4', 'ipv6', 'anchor_ipv4', 'anchor_ipv6'):
+ for netdef in ("ipv4", "ipv6", "anchor_ipv4", "anchor_ipv6"):
raw_subnet = nic.get(netdef, None)
if not raw_subnet:
continue
sub_part = _get_subnet_part(raw_subnet)
if nic_type != "public" or "anchor" in netdef:
- del sub_part['gateway']
+ del sub_part["gateway"]
subnets.append(sub_part)
- ncfg['subnets'] = subnets
+ ncfg["subnets"] = subnets
nic_configs.append(ncfg)
LOG.debug("nic '%s' configuration: %s", if_name, ncfg)
if dns_servers:
LOG.debug("added dns servers: %s", dns_servers)
- nic_configs.append({'type': 'nameserver', 'address': dns_servers})
+ nic_configs.append({"type": "nameserver", "address": dns_servers})
- return {'version': 1, 'config': nic_configs}
+ return {"version": 1, "config": nic_configs}
def read_metadata(url, timeout=2, sec_between=2, retries=30):
- response = url_helper.readurl(url, timeout=timeout,
- sec_between=sec_between, retries=retries)
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
if not response.ok():
raise RuntimeError("unable to read metadata at %s" % url)
return json.loads(response.contents.decode())
@@ -202,16 +226,21 @@ def read_sysinfo():
droplet_id = dmi.read_dmi_data("system-serial-number")
if droplet_id:
- LOG.debug("system identified via SMBIOS as DigitalOcean Droplet: %s",
- droplet_id)
+ LOG.debug(
+ "system identified via SMBIOS as DigitalOcean Droplet: %s",
+ droplet_id,
+ )
else:
- msg = ("system identified via SMBIOS as a DigitalOcean "
- "Droplet, but did not provide an ID. Please file a "
- "support ticket at: "
- "https://cloud.digitalocean.com/support/tickets/new")
+ msg = (
+ "system identified via SMBIOS as a DigitalOcean "
+ "Droplet, but did not provide an ID. Please file a "
+ "support ticket at: "
+ "https://cloud.digitalocean.com/support/tickets/new"
+ )
LOG.critical(msg)
raise RuntimeError(msg)
return (True, droplet_id)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py
index 33dc4c53..592ae80b 100644
--- a/cloudinit/sources/helpers/hetzner.py
+++ b/cloudinit/sources/helpers/hetzner.py
@@ -3,24 +3,25 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import url_helper
-from cloudinit import util
-
import base64
import binascii
+from cloudinit import url_helper, util
+
def read_metadata(url, timeout=2, sec_between=2, retries=30):
- response = url_helper.readurl(url, timeout=timeout,
- sec_between=sec_between, retries=retries)
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
if not response.ok():
raise RuntimeError("unable to read metadata at %s" % url)
return util.load_yaml(response.contents.decode())
def read_userdata(url, timeout=2, sec_between=2, retries=30):
- response = url_helper.readurl(url, timeout=timeout,
- sec_between=sec_between, retries=retries)
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
if not response.ok():
raise RuntimeError("unable to read userdata at %s" % url)
return response.contents
diff --git a/cloudinit/sources/helpers/netlink.py b/cloudinit/sources/helpers/netlink.py
index e13d6834..2953e858 100644
--- a/cloudinit/sources/helpers/netlink.py
+++ b/cloudinit/sources/helpers/netlink.py
@@ -2,14 +2,14 @@
#
# This file is part of cloud-init. See LICENSE file for license information.
-from cloudinit import log as logging
-from cloudinit import util
-from collections import namedtuple
-
import os
import select
import socket
import struct
+from collections import namedtuple
+
+from cloudinit import log as logging
+from cloudinit import util
LOG = logging.getLogger(__name__)
@@ -47,29 +47,30 @@ OPER_TESTING = 4
OPER_DORMANT = 5
OPER_UP = 6
-RTAAttr = namedtuple('RTAAttr', ['length', 'rta_type', 'data'])
-InterfaceOperstate = namedtuple('InterfaceOperstate', ['ifname', 'operstate'])
-NetlinkHeader = namedtuple('NetlinkHeader', ['length', 'type', 'flags', 'seq',
- 'pid'])
+RTAAttr = namedtuple("RTAAttr", ["length", "rta_type", "data"])
+InterfaceOperstate = namedtuple("InterfaceOperstate", ["ifname", "operstate"])
+NetlinkHeader = namedtuple(
+ "NetlinkHeader", ["length", "type", "flags", "seq", "pid"]
+)
class NetlinkCreateSocketError(RuntimeError):
- '''Raised if netlink socket fails during create or bind.'''
+ """Raised if netlink socket fails during create or bind."""
def create_bound_netlink_socket():
- '''Creates netlink socket and bind on netlink group to catch interface
+ """Creates netlink socket and bind on netlink group to catch interface
down/up events. The socket will bound only on RTMGRP_LINK (which only
includes RTM_NEWLINK/RTM_DELLINK/RTM_GETLINK events). The socket is set to
non-blocking mode since we're only receiving messages.
:returns: netlink socket in non-blocking mode
:raises: NetlinkCreateSocketError
- '''
+ """
try:
- netlink_socket = socket.socket(socket.AF_NETLINK,
- socket.SOCK_RAW,
- socket.NETLINK_ROUTE)
+ netlink_socket = socket.socket(
+ socket.AF_NETLINK, socket.SOCK_RAW, socket.NETLINK_ROUTE
+ )
netlink_socket.bind((os.getpid(), RTMGRP_LINK))
netlink_socket.setblocking(0)
except socket.error as e:
@@ -80,7 +81,7 @@ def create_bound_netlink_socket():
def get_netlink_msg_header(data):
- '''Gets netlink message type and length
+ """Gets netlink message type and length
:param: data read from netlink socket
:returns: netlink message type
@@ -92,18 +93,20 @@ def get_netlink_msg_header(data):
__u32 nlmsg_seq; /* Sequence number */
__u32 nlmsg_pid; /* Sender port ID */
};
- '''
- assert (data is not None), ("data is none")
- assert (len(data) >= NLMSGHDR_SIZE), (
- "data is smaller than netlink message header")
- msg_len, msg_type, flags, seq, pid = struct.unpack(NLMSGHDR_FMT,
- data[:MSG_TYPE_OFFSET])
+ """
+ assert data is not None, "data is none"
+ assert (
+ len(data) >= NLMSGHDR_SIZE
+ ), "data is smaller than netlink message header"
+ msg_len, msg_type, flags, seq, pid = struct.unpack(
+ NLMSGHDR_FMT, data[:MSG_TYPE_OFFSET]
+ )
LOG.debug("Got netlink msg of type %d", msg_type)
return NetlinkHeader(msg_len, msg_type, flags, seq, pid)
def read_netlink_socket(netlink_socket, timeout=None):
- '''Select and read from the netlink socket if ready.
+ """Select and read from the netlink socket if ready.
:param: netlink_socket: specify which socket object to read from
:param: timeout: specify a timeout value (integer) to wait while reading,
@@ -111,8 +114,8 @@ def read_netlink_socket(netlink_socket, timeout=None):
:returns: string of data read (max length = <MAX_SIZE>) from socket,
if no data read, returns None
:raises: AssertionError if netlink_socket is None
- '''
- assert (netlink_socket is not None), ("netlink socket is none")
+ """
+ assert netlink_socket is not None, "netlink socket is none"
read_set, _, _ = select.select([netlink_socket], [], [], timeout)
# Incase of timeout,read_set doesn't contain netlink socket.
# just return from this function
@@ -126,32 +129,33 @@ def read_netlink_socket(netlink_socket, timeout=None):
def unpack_rta_attr(data, offset):
- '''Unpack a single rta attribute.
+ """Unpack a single rta attribute.
:param: data: string of data read from netlink socket
:param: offset: starting offset of RTA Attribute
:return: RTAAttr object with length, type and data. On error, return None.
:raises: AssertionError if data is None or offset is not integer.
- '''
- assert (data is not None), ("data is none")
- assert (type(offset) == int), ("offset is not integer")
- assert (offset >= RTATTR_START_OFFSET), (
- "rta offset is less than expected length")
+ """
+ assert data is not None, "data is none"
+ assert type(offset) == int, "offset is not integer"
+ assert (
+ offset >= RTATTR_START_OFFSET
+ ), "rta offset is less than expected length"
length = rta_type = 0
attr_data = None
try:
length = struct.unpack_from("H", data, offset=offset)[0]
- rta_type = struct.unpack_from("H", data, offset=offset+2)[0]
+ rta_type = struct.unpack_from("H", data, offset=offset + 2)[0]
except struct.error:
return None # Should mean our offset is >= remaining data
# Unpack just the attribute's data. Offset by 4 to skip length/type header
- attr_data = data[offset+RTA_DATA_START_OFFSET:offset+length]
+ attr_data = data[offset + RTA_DATA_START_OFFSET : offset + length]
return RTAAttr(length, rta_type, attr_data)
def read_rta_oper_state(data):
- '''Reads Interface name and operational state from RTA Data.
+ """Reads Interface name and operational state from RTA Data.
:param: data: string of data read from netlink socket
:returns: InterfaceOperstate object containing if_name and oper_state.
@@ -159,10 +163,11 @@ def read_rta_oper_state(data):
IFLA_IFNAME messages.
:raises: AssertionError if data is None or length of data is
smaller than RTATTR_START_OFFSET.
- '''
- assert (data is not None), ("data is none")
- assert (len(data) > RTATTR_START_OFFSET), (
- "length of data is smaller than RTATTR_START_OFFSET")
+ """
+ assert data is not None, "data is none"
+ assert (
+ len(data) > RTATTR_START_OFFSET
+ ), "length of data is smaller than RTATTR_START_OFFSET"
ifname = operstate = None
offset = RTATTR_START_OFFSET
while offset <= len(data):
@@ -170,15 +175,16 @@ def read_rta_oper_state(data):
if not attr or attr.length == 0:
break
# Each attribute is 4-byte aligned. Determine pad length.
- padlen = (PAD_ALIGNMENT -
- (attr.length % PAD_ALIGNMENT)) % PAD_ALIGNMENT
+ padlen = (
+ PAD_ALIGNMENT - (attr.length % PAD_ALIGNMENT)
+ ) % PAD_ALIGNMENT
offset += attr.length + padlen
if attr.rta_type == IFLA_OPERSTATE:
operstate = ord(attr.data)
elif attr.rta_type == IFLA_IFNAME:
- interface_name = util.decode_binary(attr.data, 'utf-8')
- ifname = interface_name.strip('\0')
+ interface_name = util.decode_binary(attr.data, "utf-8")
+ ifname = interface_name.strip("\0")
if not ifname or operstate is None:
return None
LOG.debug("rta attrs: ifname %s operstate %d", ifname, operstate)
@@ -186,12 +192,12 @@ def read_rta_oper_state(data):
def wait_for_nic_attach_event(netlink_socket, existing_nics):
- '''Block until a single nic is attached.
+ """Block until a single nic is attached.
:param: netlink_socket: netlink_socket to receive events
:param: existing_nics: List of existing nics so that we can skip them.
:raises: AssertionError if netlink_socket is none.
- '''
+ """
LOG.debug("Preparing to wait for nic attach.")
ifname = None
@@ -204,19 +210,21 @@ def wait_for_nic_attach_event(netlink_socket, existing_nics):
# We can return even if the operational state of the new nic is DOWN
# because we set it to UP before doing dhcp.
- read_netlink_messages(netlink_socket,
- None,
- [RTM_NEWLINK],
- [OPER_UP, OPER_DOWN],
- should_continue_cb)
+ read_netlink_messages(
+ netlink_socket,
+ None,
+ [RTM_NEWLINK],
+ [OPER_UP, OPER_DOWN],
+ should_continue_cb,
+ )
return ifname
def wait_for_nic_detach_event(netlink_socket):
- '''Block until a single nic is detached and its operational state is down.
+ """Block until a single nic is detached and its operational state is down.
:param: netlink_socket: netlink_socket to receive events.
- '''
+ """
LOG.debug("Preparing to wait for nic detach.")
ifname = None
@@ -225,16 +233,14 @@ def wait_for_nic_detach_event(netlink_socket):
ifname = iname
return False
- read_netlink_messages(netlink_socket,
- None,
- [RTM_DELLINK],
- [OPER_DOWN],
- should_continue_cb)
+ read_netlink_messages(
+ netlink_socket, None, [RTM_DELLINK], [OPER_DOWN], should_continue_cb
+ )
return ifname
def wait_for_media_disconnect_connect(netlink_socket, ifname):
- '''Block until media disconnect and connect has happened on an interface.
+ """Block until media disconnect and connect has happened on an interface.
Listens on netlink socket to receive netlink events and when the carrier
changes from 0 to 1, it considers event has happened and
return from this function
@@ -242,10 +248,10 @@ def wait_for_media_disconnect_connect(netlink_socket, ifname):
:param: netlink_socket: netlink_socket to receive events
:param: ifname: Interface name to lookout for netlink events
:raises: AssertionError if netlink_socket is None or ifname is None.
- '''
- assert (netlink_socket is not None), ("netlink socket is none")
- assert (ifname is not None), ("interface name is none")
- assert (len(ifname) > 0), ("interface name cannot be empty")
+ """
+ assert netlink_socket is not None, "netlink socket is none"
+ assert ifname is not None, "interface name is none"
+ assert len(ifname) > 0, "interface name cannot be empty"
def should_continue_cb(iname, carrier, prevCarrier):
# check for carrier down, up sequence
@@ -256,19 +262,23 @@ def wait_for_media_disconnect_connect(netlink_socket, ifname):
return True
LOG.debug("Wait for media disconnect and reconnect to happen")
- read_netlink_messages(netlink_socket,
- ifname,
- [RTM_NEWLINK, RTM_DELLINK],
- [OPER_UP, OPER_DOWN],
- should_continue_cb)
-
-
-def read_netlink_messages(netlink_socket,
- ifname_filter,
- rtm_types,
- operstates,
- should_continue_callback):
- ''' Reads from the netlink socket until the condition specified by
+ read_netlink_messages(
+ netlink_socket,
+ ifname,
+ [RTM_NEWLINK, RTM_DELLINK],
+ [OPER_UP, OPER_DOWN],
+ should_continue_cb,
+ )
+
+
+def read_netlink_messages(
+ netlink_socket,
+ ifname_filter,
+ rtm_types,
+ operstates,
+ should_continue_callback,
+):
+ """Reads from the netlink socket until the condition specified by
the continuation callback is met.
:param: netlink_socket: netlink_socket to receive events.
@@ -276,7 +286,7 @@ def read_netlink_messages(netlink_socket,
:param: rtm_types: Type of netlink events to listen for.
:param: operstates: Operational states to listen.
:param: should_continue_callback: Specifies when to stop listening.
- '''
+ """
if netlink_socket is None:
raise RuntimeError("Netlink socket is none")
data = bytes()
@@ -286,9 +296,9 @@ def read_netlink_messages(netlink_socket,
recv_data = read_netlink_socket(netlink_socket, SELECT_TIMEOUT)
if recv_data is None:
continue
- LOG.debug('read %d bytes from socket', len(recv_data))
+ LOG.debug("read %d bytes from socket", len(recv_data))
data += recv_data
- LOG.debug('Length of data after concat %d', len(data))
+ LOG.debug("Length of data after concat %d", len(data))
offset = 0
datalen = len(data)
while offset < datalen:
@@ -300,30 +310,37 @@ def read_netlink_messages(netlink_socket,
if len(nl_msg) < nlheader.length:
LOG.debug("Partial data. Smaller than netlink message")
break
- padlen = (nlheader.length+PAD_ALIGNMENT-1) & ~(PAD_ALIGNMENT-1)
+ padlen = (nlheader.length + PAD_ALIGNMENT - 1) & ~(
+ PAD_ALIGNMENT - 1
+ )
offset = offset + padlen
- LOG.debug('offset to next netlink message: %d', offset)
+ LOG.debug("offset to next netlink message: %d", offset)
# Continue if we are not interested in this message.
if nlheader.type not in rtm_types:
continue
interface_state = read_rta_oper_state(nl_msg)
if interface_state is None:
- LOG.debug('Failed to read rta attributes: %s', interface_state)
+ LOG.debug("Failed to read rta attributes: %s", interface_state)
continue
- if (ifname_filter is not None and
- interface_state.ifname != ifname_filter):
+ if (
+ ifname_filter is not None
+ and interface_state.ifname != ifname_filter
+ ):
LOG.debug(
"Ignored netlink event on interface %s. Waiting for %s.",
- interface_state.ifname, ifname_filter)
+ interface_state.ifname,
+ ifname_filter,
+ )
continue
if interface_state.operstate not in operstates:
continue
prevCarrier = carrier
carrier = interface_state.operstate
- if not should_continue_callback(interface_state.ifname,
- carrier,
- prevCarrier):
+ if not should_continue_callback(
+ interface_state.ifname, carrier, prevCarrier
+ ):
return
data = data[offset:]
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 3e6365f1..a42543e4 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -14,11 +14,7 @@ import os
from cloudinit import ec2_utils
from cloudinit import log as logging
-from cloudinit import net
-from cloudinit import sources
-from cloudinit import subp
-from cloudinit import url_helper
-from cloudinit import util
+from cloudinit import net, sources, subp, url_helper, util
from cloudinit.sources import BrokenMetadata
# See https://docs.openstack.org/user-guide/cli-config-drive.html
@@ -27,30 +23,30 @@ LOG = logging.getLogger(__name__)
FILES_V1 = {
# Path <-> (metadata key name, translator function, default value)
- 'etc/network/interfaces': ('network_config', lambda x: x, ''),
- 'meta.js': ('meta_js', util.load_json, {}),
- "root/.ssh/authorized_keys": ('authorized_keys', lambda x: x, ''),
+ "etc/network/interfaces": ("network_config", lambda x: x, ""),
+ "meta.js": ("meta_js", util.load_json, {}),
+ "root/.ssh/authorized_keys": ("authorized_keys", lambda x: x, ""),
}
KEY_COPIES = (
# Cloud-init metadata names <-> (metadata key, is required)
- ('local-hostname', 'hostname', False),
- ('instance-id', 'uuid', True),
+ ("local-hostname", "hostname", False),
+ ("instance-id", "uuid", True),
)
# Versions and names taken from nova source nova/api/metadata/base.py
-OS_LATEST = 'latest'
-OS_FOLSOM = '2012-08-10'
-OS_GRIZZLY = '2013-04-04'
-OS_HAVANA = '2013-10-17'
-OS_LIBERTY = '2015-10-15'
+OS_LATEST = "latest"
+OS_FOLSOM = "2012-08-10"
+OS_GRIZZLY = "2013-04-04"
+OS_HAVANA = "2013-10-17"
+OS_LIBERTY = "2015-10-15"
# NEWTON_ONE adds 'devices' to md (sriov-pf-passthrough-neutron-port-vlan)
-OS_NEWTON_ONE = '2016-06-30'
+OS_NEWTON_ONE = "2016-06-30"
# NEWTON_TWO adds vendor_data2.json (vendordata-reboot)
-OS_NEWTON_TWO = '2016-10-06'
+OS_NEWTON_TWO = "2016-10-06"
# OS_OCATA adds 'vif' field to devices (sriov-pf-passthrough-neutron-port-vlan)
-OS_OCATA = '2017-02-22'
+OS_OCATA = "2017-02-22"
# OS_ROCKY adds a vf_trusted field to devices (sriov-trusted-vfs)
-OS_ROCKY = '2018-08-27'
+OS_ROCKY = "2018-08-27"
# keep this in chronological order. new supported versions go at the end.
@@ -67,18 +63,18 @@ OS_VERSIONS = (
KNOWN_PHYSICAL_TYPES = (
None,
- 'bgpovs', # not present in OpenStack upstream but used on OVH cloud.
- 'bridge',
- 'cascading', # not present in OpenStack upstream, used on OpenTelekomCloud
- 'dvs',
- 'ethernet',
- 'hw_veb',
- 'hyperv',
- 'ovs',
- 'phy',
- 'tap',
- 'vhostuser',
- 'vif',
+ "bgpovs", # not present in OpenStack upstream but used on OVH cloud.
+ "bridge",
+ "cascading", # not present in OpenStack upstream, used on OpenTelekomCloud
+ "dvs",
+ "ethernet",
+ "hw_veb",
+ "hyperv",
+ "ovs",
+ "phy",
+ "tap",
+ "vhostuser",
+ "vif",
)
@@ -90,7 +86,7 @@ class SourceMixin(object):
def _ec2_name_to_device(self, name):
if not self.ec2_metadata:
return None
- bdm = self.ec2_metadata.get('block-device-mapping', {})
+ bdm = self.ec2_metadata.get("block-device-mapping", {})
for (ent_name, device) in bdm.items():
if name == ent_name:
return device
@@ -105,9 +101,9 @@ class SourceMixin(object):
def _os_name_to_device(self, name):
device = None
try:
- criteria = 'LABEL=%s' % (name)
- if name == 'swap':
- criteria = 'TYPE=%s' % (name)
+ criteria = "LABEL=%s" % (name)
+ if name == "swap":
+ criteria = "TYPE=%s" % (name)
dev_entries = util.find_devs_with(criteria)
if dev_entries:
device = dev_entries[0]
@@ -135,10 +131,10 @@ class SourceMixin(object):
return None
# Try the ec2 mapping first
names = [name]
- if name == 'root':
- names.insert(0, 'ami')
- if name == 'ami':
- names.append('root')
+ if name == "root":
+ names.insert(0, "ami")
+ if name == "ami":
+ names.append("root")
device = None
LOG.debug("Using ec2 style lookup to find device %s", names)
for n in names:
@@ -163,7 +159,6 @@ class SourceMixin(object):
class BaseReader(metaclass=abc.ABCMeta):
-
def __init__(self, base_path):
self.base_path = base_path
@@ -187,8 +182,11 @@ class BaseReader(metaclass=abc.ABCMeta):
try:
versions_available = self._fetch_available_versions()
except Exception as e:
- LOG.debug("Unable to read openstack versions from %s due to: %s",
- self.base_path, e)
+ LOG.debug(
+ "Unable to read openstack versions from %s due to: %s",
+ self.base_path,
+ e,
+ )
versions_available = []
# openstack.OS_VERSIONS is stored in chronological order, so
@@ -202,12 +200,15 @@ class BaseReader(metaclass=abc.ABCMeta):
selected_version = potential_version
break
- LOG.debug("Selected version '%s' from %s", selected_version,
- versions_available)
+ LOG.debug(
+ "Selected version '%s' from %s",
+ selected_version,
+ versions_available,
+ )
return selected_version
def _read_content_path(self, item, decode=False):
- path = item.get('content_path', '').lstrip("/")
+ path = item.get("content_path", "").lstrip("/")
path_pieces = path.split("/")
valid_pieces = [p for p in path_pieces if len(p)]
if not valid_pieces:
@@ -225,38 +226,44 @@ class BaseReader(metaclass=abc.ABCMeta):
"""
load_json_anytype = functools.partial(
- util.load_json, root_types=(dict, list, str))
+ util.load_json, root_types=(dict, list, str)
+ )
def datafiles(version):
files = {}
- files['metadata'] = (
+ files["metadata"] = (
# File path to read
- self._path_join("openstack", version, 'meta_data.json'),
+ self._path_join("openstack", version, "meta_data.json"),
# Is it required?
True,
# Translator function (applied after loading)
util.load_json,
)
- files['userdata'] = (
- self._path_join("openstack", version, 'user_data'),
+ files["userdata"] = (
+ self._path_join("openstack", version, "user_data"),
False,
lambda x: x,
)
- files['vendordata'] = (
- self._path_join("openstack", version, 'vendor_data.json'),
+ files["vendordata"] = (
+ self._path_join("openstack", version, "vendor_data.json"),
+ False,
+ load_json_anytype,
+ )
+ files["vendordata2"] = (
+ self._path_join("openstack", version, "vendor_data2.json"),
False,
load_json_anytype,
)
- files['networkdata'] = (
- self._path_join("openstack", version, 'network_data.json'),
+ files["networkdata"] = (
+ self._path_join("openstack", version, "network_data.json"),
False,
load_json_anytype,
)
return files
results = {
- 'userdata': '',
- 'version': 2,
+ "userdata": "",
+ "version": 2,
}
data = datafiles(self._find_working_version())
for (name, (path, required, translator)) in data.items():
@@ -267,11 +274,13 @@ class BaseReader(metaclass=abc.ABCMeta):
data = self._path_read(path)
except IOError as e:
if not required:
- LOG.debug("Failed reading optional path %s due"
- " to: %s", path, e)
+ LOG.debug(
+ "Failed reading optional path %s due to: %s", path, e
+ )
else:
- LOG.debug("Failed reading mandatory path %s due"
- " to: %s", path, e)
+ LOG.debug(
+ "Failed reading mandatory path %s due to: %s", path, e
+ )
else:
found = True
if required and not found:
@@ -286,11 +295,11 @@ class BaseReader(metaclass=abc.ABCMeta):
if found:
results[name] = data
- metadata = results['metadata']
- if 'random_seed' in metadata:
- random_seed = metadata['random_seed']
+ metadata = results["metadata"]
+ if "random_seed" in metadata:
+ random_seed = metadata["random_seed"]
try:
- metadata['random_seed'] = base64.b64decode(random_seed)
+ metadata["random_seed"] = base64.b64decode(random_seed)
except (ValueError, TypeError) as e:
raise BrokenMetadata(
"Badly formatted metadata random_seed entry: %s" % e
@@ -298,18 +307,18 @@ class BaseReader(metaclass=abc.ABCMeta):
# load any files that were provided
files = {}
- metadata_files = metadata.get('files', [])
+ metadata_files = metadata.get("files", [])
for item in metadata_files:
- if 'path' not in item:
+ if "path" not in item:
continue
- path = item['path']
+ path = item["path"]
try:
files[path] = self._read_content_path(item)
except Exception as e:
raise BrokenMetadata(
"Failed to read provided file %s: %s" % (path, e)
) from e
- results['files'] = files
+ results["files"] = files
# The 'network_config' item in metadata is a content pointer
# to the network config that should be applied. It is just a
@@ -318,7 +327,7 @@ class BaseReader(metaclass=abc.ABCMeta):
if net_item:
try:
content = self._read_content_path(net_item, decode=True)
- results['network_config'] = content
+ results["network_config"] = content
except IOError as e:
raise BrokenMetadata(
"Failed to read network configuration: %s" % (e)
@@ -329,12 +338,12 @@ class BaseReader(metaclass=abc.ABCMeta):
# if they specify 'dsmode' they're indicating the mode that they intend
# for this datasource to operate in.
try:
- results['dsmode'] = metadata['meta']['dsmode']
+ results["dsmode"] = metadata["meta"]["dsmode"]
except KeyError:
pass
# Read any ec2-metadata (if applicable)
- results['ec2-metadata'] = self._read_ec2_metadata()
+ results["ec2-metadata"] = self._read_ec2_metadata()
# Perform some misc. metadata key renames...
for (target_key, source_key, is_required) in KEY_COPIES:
@@ -359,15 +368,19 @@ class ConfigDriveReader(BaseReader):
def _fetch_available_versions(self):
if self._versions is None:
- path = self._path_join(self.base_path, 'openstack')
- found = [d for d in os.listdir(path)
- if os.path.isdir(os.path.join(path))]
+ path = self._path_join(self.base_path, "openstack")
+ found = [
+ d
+ for d in os.listdir(path)
+ if os.path.isdir(os.path.join(path))
+ ]
self._versions = sorted(found)
return self._versions
def _read_ec2_metadata(self):
- path = self._path_join(self.base_path,
- 'ec2', 'latest', 'meta-data.json')
+ path = self._path_join(
+ self.base_path, "ec2", "latest", "meta-data.json"
+ )
if not os.path.exists(path):
return {}
else:
@@ -414,14 +427,14 @@ class ConfigDriveReader(BaseReader):
else:
md[key] = copy.deepcopy(default)
- keydata = md['authorized_keys']
- meta_js = md['meta_js']
+ keydata = md["authorized_keys"]
+ meta_js = md["meta_js"]
# keydata in meta_js is preferred over "injected"
- keydata = meta_js.get('public-keys', keydata)
+ keydata = meta_js.get("public-keys", keydata)
if keydata:
lines = keydata.splitlines()
- md['public-keys'] = [
+ md["public-keys"] = [
line
for line in lines
if len(line) and not line.startswith("#")
@@ -429,25 +442,25 @@ class ConfigDriveReader(BaseReader):
# config-drive-v1 has no way for openstack to provide the instance-id
# so we copy that into metadata from the user input
- if 'instance-id' in meta_js:
- md['instance-id'] = meta_js['instance-id']
+ if "instance-id" in meta_js:
+ md["instance-id"] = meta_js["instance-id"]
results = {
- 'version': 1,
- 'metadata': md,
+ "version": 1,
+ "metadata": md,
}
# allow the user to specify 'dsmode' in a meta tag
- if 'dsmode' in meta_js:
- results['dsmode'] = meta_js['dsmode']
+ if "dsmode" in meta_js:
+ results["dsmode"] = meta_js["dsmode"]
# config-drive-v1 has no way of specifying user-data, so the user has
# to cheat and stuff it in a meta tag also.
- results['userdata'] = meta_js.get('user-data', '')
+ results["userdata"] = meta_js.get("user-data", "")
# this implementation does not support files other than
# network/interfaces and authorized_keys...
- results['files'] = {}
+ results["files"] = {}
return results
@@ -476,7 +489,6 @@ class MetadataReader(BaseReader):
return self._versions
def _path_read(self, path, decode=False):
-
def should_retry_cb(_request_args, cause):
try:
code = int(cause.code)
@@ -487,11 +499,13 @@ class MetadataReader(BaseReader):
pass
return True
- response = url_helper.readurl(path,
- retries=self.retries,
- ssl_details=self.ssl_details,
- timeout=self.timeout,
- exception_cb=should_retry_cb)
+ response = url_helper.readurl(
+ path,
+ retries=self.retries,
+ ssl_details=self.ssl_details,
+ timeout=self.timeout,
+ exception_cb=should_retry_cb,
+ )
if decode:
return response.contents.decode()
else:
@@ -501,9 +515,11 @@ class MetadataReader(BaseReader):
return url_helper.combine_url(base, *add_ons)
def _read_ec2_metadata(self):
- return ec2_utils.get_instance_metadata(ssl_details=self.ssl_details,
- timeout=self.timeout,
- retries=self.retries)
+ return ec2_utils.get_instance_metadata(
+ ssl_details=self.ssl_details,
+ timeout=self.timeout,
+ retries=self.retries,
+ )
# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
@@ -539,32 +555,32 @@ def convert_net_json(network_json=None, known_macs=None):
# dict of network_config key for filtering network_json
valid_keys = {
- 'physical': [
- 'name',
- 'type',
- 'mac_address',
- 'subnets',
- 'params',
- 'mtu',
+ "physical": [
+ "name",
+ "type",
+ "mac_address",
+ "subnets",
+ "params",
+ "mtu",
],
- 'subnet': [
- 'type',
- 'address',
- 'netmask',
- 'broadcast',
- 'metric',
- 'gateway',
- 'pointopoint',
- 'scope',
- 'dns_nameservers',
- 'dns_search',
- 'routes',
+ "subnet": [
+ "type",
+ "address",
+ "netmask",
+ "broadcast",
+ "metric",
+ "gateway",
+ "pointopoint",
+ "scope",
+ "dns_nameservers",
+ "dns_search",
+ "routes",
],
}
- links = network_json.get('links', [])
- networks = network_json.get('networks', [])
- services = network_json.get('services', [])
+ links = network_json.get("links", [])
+ networks = network_json.get("networks", [])
+ services = network_json.get("services", [])
link_updates = []
link_id_info = {}
@@ -573,65 +589,77 @@ def convert_net_json(network_json=None, known_macs=None):
config = []
for link in links:
subnets = []
- cfg = dict((k, v) for k, v in link.items()
- if k in valid_keys['physical'])
+ cfg = dict(
+ (k, v) for k, v in link.items() if k in valid_keys["physical"]
+ )
# 'name' is not in openstack spec yet, but we will support it if it is
# present. The 'id' in the spec is currently implemented as the host
# nic's name, meaning something like 'tap-adfasdffd'. We do not want
# to name guest devices with such ugly names.
- if 'name' in link:
- cfg['name'] = link['name']
+ if "name" in link:
+ cfg["name"] = link["name"]
link_mac_addr = None
- if link.get('ethernet_mac_address'):
- link_mac_addr = link.get('ethernet_mac_address').lower()
- link_id_info[link['id']] = link_mac_addr
-
- curinfo = {'name': cfg.get('name'), 'mac': link_mac_addr,
- 'id': link['id'], 'type': link['type']}
-
- for network in [n for n in networks
- if n['link'] == link['id']]:
- subnet = dict((k, v) for k, v in network.items()
- if k in valid_keys['subnet'])
-
- if network['type'] == 'ipv4_dhcp':
- subnet.update({'type': 'dhcp4'})
- elif network['type'] == 'ipv6_dhcp':
- subnet.update({'type': 'dhcp6'})
- elif network['type'] in ['ipv6_slaac', 'ipv6_dhcpv6-stateless',
- 'ipv6_dhcpv6-stateful']:
- subnet.update({'type': network['type']})
- elif network['type'] in ['ipv4', 'static']:
- subnet.update({
- 'type': 'static',
- 'address': network.get('ip_address'),
- })
- elif network['type'] in ['ipv6', 'static6']:
- cfg.update({'accept-ra': False})
- subnet.update({
- 'type': 'static6',
- 'address': network.get('ip_address'),
- })
+ if link.get("ethernet_mac_address"):
+ link_mac_addr = link.get("ethernet_mac_address").lower()
+ link_id_info[link["id"]] = link_mac_addr
+
+ curinfo = {
+ "name": cfg.get("name"),
+ "mac": link_mac_addr,
+ "id": link["id"],
+ "type": link["type"],
+ }
+
+ for network in [n for n in networks if n["link"] == link["id"]]:
+ subnet = dict(
+ (k, v) for k, v in network.items() if k in valid_keys["subnet"]
+ )
+
+ if network["type"] == "ipv4_dhcp":
+ subnet.update({"type": "dhcp4"})
+ elif network["type"] == "ipv6_dhcp":
+ subnet.update({"type": "dhcp6"})
+ elif network["type"] in [
+ "ipv6_slaac",
+ "ipv6_dhcpv6-stateless",
+ "ipv6_dhcpv6-stateful",
+ ]:
+ subnet.update({"type": network["type"]})
+ elif network["type"] in ["ipv4", "static"]:
+ subnet.update(
+ {
+ "type": "static",
+ "address": network.get("ip_address"),
+ }
+ )
+ elif network["type"] in ["ipv6", "static6"]:
+ cfg.update({"accept-ra": False})
+ subnet.update(
+ {
+ "type": "static6",
+ "address": network.get("ip_address"),
+ }
+ )
# Enable accept_ra for stateful and legacy ipv6_dhcp types
- if network['type'] in ['ipv6_dhcpv6-stateful', 'ipv6_dhcp']:
- cfg.update({'accept-ra': True})
+ if network["type"] in ["ipv6_dhcpv6-stateful", "ipv6_dhcp"]:
+ cfg.update({"accept-ra": True})
- if network['type'] == 'ipv4':
- subnet['ipv4'] = True
- if network['type'] == 'ipv6':
- subnet['ipv6'] = True
+ if network["type"] == "ipv4":
+ subnet["ipv4"] = True
+ if network["type"] == "ipv6":
+ subnet["ipv6"] = True
subnets.append(subnet)
- cfg.update({'subnets': subnets})
- if link['type'] in ['bond']:
+ cfg.update({"subnets": subnets})
+ if link["type"] in ["bond"]:
params = {}
if link_mac_addr:
- params['mac_address'] = link_mac_addr
+ params["mac_address"] = link_mac_addr
for k, v in link.items():
- if k == 'bond_links':
+ if k == "bond_links":
continue
- elif k.startswith('bond'):
+ elif k.startswith("bond"):
params.update({k: v})
# openstack does not provide a name for the bond.
@@ -644,35 +672,45 @@ def convert_net_json(network_json=None, known_macs=None):
# to the network config by their nic name.
# store that in bond_links_needed, and update these later.
link_updates.append(
- (cfg, 'bond_interfaces', '%s',
- copy.deepcopy(link['bond_links']))
+ (
+ cfg,
+ "bond_interfaces",
+ "%s",
+ copy.deepcopy(link["bond_links"]),
+ )
+ )
+ cfg.update({"params": params, "name": link_name})
+
+ curinfo["name"] = link_name
+ elif link["type"] in ["vlan"]:
+ name = "%s.%s" % (link["vlan_link"], link["vlan_id"])
+ cfg.update(
+ {
+ "name": name,
+ "vlan_id": link["vlan_id"],
+ "mac_address": link["vlan_mac_address"],
+ }
)
- cfg.update({'params': params, 'name': link_name})
-
- curinfo['name'] = link_name
- elif link['type'] in ['vlan']:
- name = "%s.%s" % (link['vlan_link'], link['vlan_id'])
- cfg.update({
- 'name': name,
- 'vlan_id': link['vlan_id'],
- 'mac_address': link['vlan_mac_address'],
- })
- link_updates.append((cfg, 'vlan_link', '%s', link['vlan_link']))
- link_updates.append((cfg, 'name', "%%s.%s" % link['vlan_id'],
- link['vlan_link']))
- curinfo.update({'mac': link['vlan_mac_address'],
- 'name': name})
+ link_updates.append((cfg, "vlan_link", "%s", link["vlan_link"]))
+ link_updates.append(
+ (cfg, "name", "%%s.%s" % link["vlan_id"], link["vlan_link"])
+ )
+ curinfo.update({"mac": link["vlan_mac_address"], "name": name})
else:
- if link['type'] not in KNOWN_PHYSICAL_TYPES:
- LOG.warning('Unknown network_data link type (%s); treating as'
- ' physical', link['type'])
- cfg.update({'type': 'physical', 'mac_address': link_mac_addr})
+ if link["type"] not in KNOWN_PHYSICAL_TYPES:
+ LOG.warning(
+ "Unknown network_data link type (%s); treating as"
+ " physical",
+ link["type"],
+ )
+ cfg.update({"type": "physical", "mac_address": link_mac_addr})
config.append(cfg)
- link_id_info[curinfo['id']] = curinfo
+ link_id_info[curinfo["id"]] = curinfo
- need_names = [d for d in config
- if d.get('type') == 'physical' and 'name' not in d]
+ need_names = [
+ d for d in config if d.get("type") == "physical" and "name" not in d
+ ]
if need_names or link_updates:
if known_macs is None:
@@ -680,26 +718,26 @@ def convert_net_json(network_json=None, known_macs=None):
# go through and fill out the link_id_info with names
for _link_id, info in link_id_info.items():
- if info.get('name'):
+ if info.get("name"):
continue
- if info.get('mac') in known_macs:
- info['name'] = known_macs[info['mac']]
+ if info.get("mac") in known_macs:
+ info["name"] = known_macs[info["mac"]]
for d in need_names:
- mac = d.get('mac_address')
+ mac = d.get("mac_address")
if not mac:
raise ValueError("No mac_address or name entry for %s" % d)
if mac not in known_macs:
raise ValueError("Unable to find a system nic for %s" % d)
- d['name'] = known_macs[mac]
+ d["name"] = known_macs[mac]
for cfg, key, fmt, targets in link_updates:
if isinstance(targets, (list, tuple)):
cfg[key] = [
- fmt % link_id_info[target]['name'] for target in targets
+ fmt % link_id_info[target]["name"] for target in targets
]
else:
- cfg[key] = fmt % link_id_info[targets]['name']
+ cfg[key] = fmt % link_id_info[targets]["name"]
# Infiniband interfaces may be referenced in network_data.json by a 6 byte
# Ethernet MAC-style address, and we use that address to look up the
@@ -708,15 +746,16 @@ def convert_net_json(network_json=None, known_macs=None):
ib_known_hwaddrs = net.get_ib_hwaddrs_by_interface()
if ib_known_hwaddrs:
for cfg in config:
- if cfg['name'] in ib_known_hwaddrs:
- cfg['mac_address'] = ib_known_hwaddrs[cfg['name']]
- cfg['type'] = 'infiniband'
+ if cfg["name"] in ib_known_hwaddrs:
+ cfg["mac_address"] = ib_known_hwaddrs[cfg["name"]]
+ cfg["type"] = "infiniband"
for service in services:
cfg = service
- cfg.update({'type': 'nameserver'})
+ cfg.update({"type": "nameserver"})
config.append(cfg)
- return {'version': 1, 'config': config}
+ return {"version": 1, "config": config}
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/tests/test_netlink.py b/cloudinit/sources/helpers/tests/test_netlink.py
deleted file mode 100644
index cafe3961..00000000
--- a/cloudinit/sources/helpers/tests/test_netlink.py
+++ /dev/null
@@ -1,480 +0,0 @@
-# Author: Tamilmani Manoharan <tamanoha@microsoft.com>
-#
-# This file is part of cloud-init. See LICENSE file for license information.
-
-from cloudinit.tests.helpers import CiTestCase, mock
-import socket
-import struct
-import codecs
-from cloudinit.sources.helpers.netlink import (
- NetlinkCreateSocketError, create_bound_netlink_socket, read_netlink_socket,
- read_rta_oper_state, unpack_rta_attr, wait_for_media_disconnect_connect,
- wait_for_nic_attach_event, wait_for_nic_detach_event,
- OPER_DOWN, OPER_UP, OPER_DORMANT, OPER_LOWERLAYERDOWN, OPER_NOTPRESENT,
- OPER_TESTING, OPER_UNKNOWN, RTATTR_START_OFFSET, RTM_NEWLINK, RTM_DELLINK,
- RTM_SETLINK, RTM_GETLINK, MAX_SIZE)
-
-
-def int_to_bytes(i):
- '''convert integer to binary: eg: 1 to \x01'''
- hex_value = '{0:x}'.format(i)
- hex_value = '0' * (len(hex_value) % 2) + hex_value
- return codecs.decode(hex_value, 'hex_codec')
-
-
-class TestCreateBoundNetlinkSocket(CiTestCase):
-
- @mock.patch('cloudinit.sources.helpers.netlink.socket.socket')
- def test_socket_error_on_create(self, m_socket):
- '''create_bound_netlink_socket catches socket creation exception'''
-
- """NetlinkCreateSocketError is raised when socket creation errors."""
- m_socket.side_effect = socket.error("Fake socket failure")
- with self.assertRaises(NetlinkCreateSocketError) as ctx_mgr:
- create_bound_netlink_socket()
- self.assertEqual(
- 'Exception during netlink socket create: Fake socket failure',
- str(ctx_mgr.exception))
-
-
-class TestReadNetlinkSocket(CiTestCase):
-
- @mock.patch('cloudinit.sources.helpers.netlink.socket.socket')
- @mock.patch('cloudinit.sources.helpers.netlink.select.select')
- def test_read_netlink_socket(self, m_select, m_socket):
- '''read_netlink_socket able to receive data'''
- data = 'netlinktest'
- m_select.return_value = [m_socket], None, None
- m_socket.recv.return_value = data
- recv_data = read_netlink_socket(m_socket, 2)
- m_select.assert_called_with([m_socket], [], [], 2)
- m_socket.recv.assert_called_with(MAX_SIZE)
- self.assertIsNotNone(recv_data)
- self.assertEqual(recv_data, data)
-
- @mock.patch('cloudinit.sources.helpers.netlink.socket.socket')
- @mock.patch('cloudinit.sources.helpers.netlink.select.select')
- def test_netlink_read_timeout(self, m_select, m_socket):
- '''read_netlink_socket should timeout if nothing to read'''
- m_select.return_value = [], None, None
- data = read_netlink_socket(m_socket, 1)
- m_select.assert_called_with([m_socket], [], [], 1)
- self.assertEqual(m_socket.recv.call_count, 0)
- self.assertIsNone(data)
-
- def test_read_invalid_socket(self):
- '''read_netlink_socket raises assert error if socket is invalid'''
- socket = None
- with self.assertRaises(AssertionError) as context:
- read_netlink_socket(socket, 1)
- self.assertTrue('netlink socket is none' in str(context.exception))
-
-
-class TestParseNetlinkMessage(CiTestCase):
-
- def test_read_rta_oper_state(self):
- '''read_rta_oper_state could parse netlink message and extract data'''
- ifname = "eth0"
- bytes = ifname.encode("utf-8")
- buf = bytearray(48)
- struct.pack_into("HH4sHHc", buf, RTATTR_START_OFFSET, 8, 3, bytes, 5,
- 16, int_to_bytes(OPER_DOWN))
- interface_state = read_rta_oper_state(buf)
- self.assertEqual(interface_state.ifname, ifname)
- self.assertEqual(interface_state.operstate, OPER_DOWN)
-
- def test_read_none_data(self):
- '''read_rta_oper_state raises assert error if data is none'''
- data = None
- with self.assertRaises(AssertionError) as context:
- read_rta_oper_state(data)
- self.assertEqual('data is none', str(context.exception))
-
- def test_read_invalid_rta_operstate_none(self):
- '''read_rta_oper_state returns none if operstate is none'''
- ifname = "eth0"
- buf = bytearray(40)
- bytes = ifname.encode("utf-8")
- struct.pack_into("HH4s", buf, RTATTR_START_OFFSET, 8, 3, bytes)
- interface_state = read_rta_oper_state(buf)
- self.assertIsNone(interface_state)
-
- def test_read_invalid_rta_ifname_none(self):
- '''read_rta_oper_state returns none if ifname is none'''
- buf = bytearray(40)
- struct.pack_into("HHc", buf, RTATTR_START_OFFSET, 5, 16,
- int_to_bytes(OPER_DOWN))
- interface_state = read_rta_oper_state(buf)
- self.assertIsNone(interface_state)
-
- def test_read_invalid_data_len(self):
- '''raise assert error if data size is smaller than required size'''
- buf = bytearray(32)
- with self.assertRaises(AssertionError) as context:
- read_rta_oper_state(buf)
- self.assertTrue('length of data is smaller than RTATTR_START_OFFSET' in
- str(context.exception))
-
- def test_unpack_rta_attr_none_data(self):
- '''unpack_rta_attr raises assert error if data is none'''
- data = None
- with self.assertRaises(AssertionError) as context:
- unpack_rta_attr(data, RTATTR_START_OFFSET)
- self.assertTrue('data is none' in str(context.exception))
-
- def test_unpack_rta_attr_invalid_offset(self):
- '''unpack_rta_attr raises assert error if offset is invalid'''
- data = bytearray(48)
- with self.assertRaises(AssertionError) as context:
- unpack_rta_attr(data, "offset")
- self.assertTrue('offset is not integer' in str(context.exception))
- with self.assertRaises(AssertionError) as context:
- unpack_rta_attr(data, 31)
- self.assertTrue('rta offset is less than expected length' in
- str(context.exception))
-
-
-@mock.patch('cloudinit.sources.helpers.netlink.socket.socket')
-@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket')
-class TestNicAttachDetach(CiTestCase):
- with_logs = True
-
- def _media_switch_data(self, ifname, msg_type, operstate):
- '''construct netlink data with specified fields'''
- if ifname and operstate is not None:
- data = bytearray(48)
- bytes = ifname.encode("utf-8")
- struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(operstate))
- elif ifname:
- data = bytearray(40)
- bytes = ifname.encode("utf-8")
- struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes)
- elif operstate:
- data = bytearray(40)
- struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16,
- int_to_bytes(operstate))
- struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0)
- return data
-
- def test_nic_attached_oper_down(self, m_read_netlink_socket, m_socket):
- '''Test for a new nic attached'''
- ifname = "eth0"
- data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
- m_read_netlink_socket.side_effect = [data_op_down]
- ifread = wait_for_nic_attach_event(m_socket, [])
- self.assertEqual(m_read_netlink_socket.call_count, 1)
- self.assertEqual(ifname, ifread)
-
- def test_nic_attached_oper_up(self, m_read_netlink_socket, m_socket):
- '''Test for a new nic attached'''
- ifname = "eth0"
- data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_op_up]
- ifread = wait_for_nic_attach_event(m_socket, [])
- self.assertEqual(m_read_netlink_socket.call_count, 1)
- self.assertEqual(ifname, ifread)
-
- def test_nic_attach_ignore_existing(self, m_read_netlink_socket, m_socket):
- '''Test that we read only the interfaces we are interested in.'''
- data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN)
- data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN)
- m_read_netlink_socket.side_effect = [data_eth0, data_eth1]
- ifread = wait_for_nic_attach_event(m_socket, ["eth0"])
- self.assertEqual(m_read_netlink_socket.call_count, 2)
- self.assertEqual("eth1", ifread)
-
- def test_nic_attach_read_first(self, m_read_netlink_socket, m_socket):
- '''Test that we read only the interfaces we are interested in.'''
- data_eth0 = self._media_switch_data("eth0", RTM_NEWLINK, OPER_DOWN)
- data_eth1 = self._media_switch_data("eth1", RTM_NEWLINK, OPER_DOWN)
- m_read_netlink_socket.side_effect = [data_eth0, data_eth1]
- ifread = wait_for_nic_attach_event(m_socket, ["eth1"])
- self.assertEqual(m_read_netlink_socket.call_count, 1)
- self.assertEqual("eth0", ifread)
-
- def test_nic_detached(self, m_read_netlink_socket, m_socket):
- '''Test for an existing nic detached'''
- ifname = "eth0"
- data_op_down = self._media_switch_data(ifname, RTM_DELLINK, OPER_DOWN)
- m_read_netlink_socket.side_effect = [data_op_down]
- ifread = wait_for_nic_detach_event(m_socket)
- self.assertEqual(m_read_netlink_socket.call_count, 1)
- self.assertEqual(ifname, ifread)
-
-
-@mock.patch('cloudinit.sources.helpers.netlink.socket.socket')
-@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket')
-class TestWaitForMediaDisconnectConnect(CiTestCase):
- with_logs = True
-
- def _media_switch_data(self, ifname, msg_type, operstate):
- '''construct netlink data with specified fields'''
- if ifname and operstate is not None:
- data = bytearray(48)
- bytes = ifname.encode("utf-8")
- struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(operstate))
- elif ifname:
- data = bytearray(40)
- bytes = ifname.encode("utf-8")
- struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes)
- elif operstate:
- data = bytearray(40)
- struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16,
- int_to_bytes(operstate))
- struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0)
- return data
-
- def test_media_down_up_scenario(self, m_read_netlink_socket,
- m_socket):
- '''Test for media down up sequence for required interface name'''
- ifname = "eth0"
- # construct data for Oper State down
- data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
- # construct data for Oper State up
- data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [data_op_down, data_op_up]
- wait_for_media_disconnect_connect(m_socket, ifname)
- self.assertEqual(m_read_netlink_socket.call_count, 2)
-
- def test_wait_for_media_switch_diff_interface(self, m_read_netlink_socket,
- m_socket):
- '''wait_for_media_disconnect_connect ignores unexpected interfaces.
-
- The first two messages are for other interfaces and last two are for
- expected interface. So the function exit only after receiving last
- 2 messages and therefore the call count for m_read_netlink_socket
- has to be 4
- '''
- other_ifname = "eth1"
- expected_ifname = "eth0"
- data_op_down_eth1 = self._media_switch_data(
- other_ifname, RTM_NEWLINK, OPER_DOWN
- )
- data_op_up_eth1 = self._media_switch_data(
- other_ifname, RTM_NEWLINK, OPER_UP
- )
- data_op_down_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_DOWN
- )
- data_op_up_eth0 = self._media_switch_data(
- expected_ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [
- data_op_down_eth1,
- data_op_up_eth1,
- data_op_down_eth0,
- data_op_up_eth0
- ]
- wait_for_media_disconnect_connect(m_socket, expected_ifname)
- self.assertIn('Ignored netlink event on interface %s' % other_ifname,
- self.logs.getvalue())
- self.assertEqual(m_read_netlink_socket.call_count, 4)
-
- def test_invalid_msgtype_getlink(self, m_read_netlink_socket, m_socket):
- '''wait_for_media_disconnect_connect ignores GETLINK events.
-
- The first two messages are for oper down and up for RTM_GETLINK type
- which netlink module will ignore. The last 2 messages are RTM_NEWLINK
- with oper state down and up messages. Therefore the call count for
- m_read_netlink_socket has to be 4 ignoring first 2 messages
- of RTM_GETLINK
- '''
- ifname = "eth0"
- data_getlink_down = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_DOWN
- )
- data_getlink_up = self._media_switch_data(
- ifname, RTM_GETLINK, OPER_UP
- )
- data_newlink_down = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DOWN
- )
- data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP
- )
- m_read_netlink_socket.side_effect = [
- data_getlink_down,
- data_getlink_up,
- data_newlink_down,
- data_newlink_up
- ]
- wait_for_media_disconnect_connect(m_socket, ifname)
- self.assertEqual(m_read_netlink_socket.call_count, 4)
-
- def test_invalid_msgtype_setlink(self, m_read_netlink_socket, m_socket):
- '''wait_for_media_disconnect_connect ignores SETLINK events.
-
- The first two messages are for oper down and up for RTM_GETLINK type
- which it will ignore. 3rd and 4th messages are RTM_NEWLINK with down
- and up messages. This function should exit after 4th messages since it
- sees down->up scenario. So the call count for m_read_netlink_socket
- has to be 4 ignoring first 2 messages of RTM_GETLINK and
- last 2 messages of RTM_NEWLINK
- '''
- ifname = "eth0"
- data_setlink_down = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_DOWN
- )
- data_setlink_up = self._media_switch_data(
- ifname, RTM_SETLINK, OPER_UP
- )
- data_newlink_down = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DOWN
- )
- data_newlink_up = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UP
- )
- m_read_netlink_socket.side_effect = [
- data_setlink_down,
- data_setlink_up,
- data_newlink_down,
- data_newlink_up,
- data_newlink_down,
- data_newlink_up
- ]
- wait_for_media_disconnect_connect(m_socket, ifname)
- self.assertEqual(m_read_netlink_socket.call_count, 4)
-
- def test_netlink_invalid_switch_scenario(self, m_read_netlink_socket,
- m_socket):
- '''returns only if it receives UP event after a DOWN event'''
- ifname = "eth0"
- data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
- data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- data_op_dormant = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DORMANT
- )
- data_op_notpresent = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_NOTPRESENT
- )
- data_op_lowerdown = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_LOWERLAYERDOWN
- )
- data_op_testing = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_TESTING
- )
- data_op_unknown = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UNKNOWN
- )
- m_read_netlink_socket.side_effect = [
- data_op_up, data_op_up,
- data_op_dormant, data_op_up,
- data_op_notpresent, data_op_up,
- data_op_lowerdown, data_op_up,
- data_op_testing, data_op_up,
- data_op_unknown, data_op_up,
- data_op_down, data_op_up
- ]
- wait_for_media_disconnect_connect(m_socket, ifname)
- self.assertEqual(m_read_netlink_socket.call_count, 14)
-
- def test_netlink_valid_inbetween_transitions(self, m_read_netlink_socket,
- m_socket):
- '''wait_for_media_disconnect_connect handles in between transitions'''
- ifname = "eth0"
- data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
- data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- data_op_dormant = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_DORMANT)
- data_op_unknown = self._media_switch_data(
- ifname, RTM_NEWLINK, OPER_UNKNOWN)
- m_read_netlink_socket.side_effect = [
- data_op_down, data_op_dormant,
- data_op_unknown, data_op_up
- ]
- wait_for_media_disconnect_connect(m_socket, ifname)
- self.assertEqual(m_read_netlink_socket.call_count, 4)
-
- def test_netlink_invalid_operstate(self, m_read_netlink_socket, m_socket):
- '''wait_for_media_disconnect_connect should handle invalid operstates.
-
- The function should not fail and return even if it receives invalid
- operstates. It always should wait for down up sequence.
- '''
- ifname = "eth0"
- data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
- data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7)
- m_read_netlink_socket.side_effect = [
- data_op_invalid, data_op_up,
- data_op_down, data_op_invalid,
- data_op_up
- ]
- wait_for_media_disconnect_connect(m_socket, ifname)
- self.assertEqual(m_read_netlink_socket.call_count, 5)
-
- def test_wait_invalid_socket(self, m_read_netlink_socket, m_socket):
- '''wait_for_media_disconnect_connect handle none netlink socket.'''
- socket = None
- ifname = "eth0"
- with self.assertRaises(AssertionError) as context:
- wait_for_media_disconnect_connect(socket, ifname)
- self.assertTrue('netlink socket is none' in str(context.exception))
-
- def test_wait_invalid_ifname(self, m_read_netlink_socket, m_socket):
- '''wait_for_media_disconnect_connect handle none interface name'''
- ifname = None
- with self.assertRaises(AssertionError) as context:
- wait_for_media_disconnect_connect(m_socket, ifname)
- self.assertTrue('interface name is none' in str(context.exception))
- ifname = ""
- with self.assertRaises(AssertionError) as context:
- wait_for_media_disconnect_connect(m_socket, ifname)
- self.assertTrue('interface name cannot be empty' in
- str(context.exception))
-
- def test_wait_invalid_rta_attr(self, m_read_netlink_socket, m_socket):
- ''' wait_for_media_disconnect_connect handles invalid rta data'''
- ifname = "eth0"
- data_invalid1 = self._media_switch_data(None, RTM_NEWLINK, OPER_DOWN)
- data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None)
- data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN)
- data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP)
- m_read_netlink_socket.side_effect = [
- data_invalid1, data_invalid2, data_op_down, data_op_up
- ]
- wait_for_media_disconnect_connect(m_socket, ifname)
- self.assertEqual(m_read_netlink_socket.call_count, 4)
-
- def test_read_multiple_netlink_msgs(self, m_read_netlink_socket, m_socket):
- '''Read multiple messages in single receive call'''
- ifname = "eth0"
- bytes = ifname.encode("utf-8")
- data = bytearray(96)
- struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into(
- "HH4sHHc", data, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN)
- )
- struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into(
- "HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8,
- 3, bytes, 5, 16, int_to_bytes(OPER_UP)
- )
- m_read_netlink_socket.return_value = data
- wait_for_media_disconnect_connect(m_socket, ifname)
- self.assertEqual(m_read_netlink_socket.call_count, 1)
-
- def test_read_partial_netlink_msgs(self, m_read_netlink_socket, m_socket):
- '''Read partial messages in receive call'''
- ifname = "eth0"
- bytes = ifname.encode("utf-8")
- data1 = bytearray(112)
- data2 = bytearray(32)
- struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into(
- "HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3,
- bytes, 5, 16, int_to_bytes(OPER_DOWN)
- )
- struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into(
- "HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, int_to_bytes(OPER_DOWN)
- )
- struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0)
- struct.pack_into(
- "HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, int_to_bytes(OPER_UP)
- )
- m_read_netlink_socket.side_effect = [data1, data2]
- wait_for_media_disconnect_connect(m_socket, ifname)
- self.assertEqual(m_read_netlink_socket.call_count, 2)
diff --git a/cloudinit/sources/helpers/tests/test_openstack.py b/cloudinit/sources/helpers/tests/test_openstack.py
deleted file mode 100644
index 2bde1e3f..00000000
--- a/cloudinit/sources/helpers/tests/test_openstack.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-# ./cloudinit/sources/helpers/tests/test_openstack.py
-
-from cloudinit.sources.helpers import openstack
-from cloudinit.tests import helpers as test_helpers
-
-
-class TestConvertNetJson(test_helpers.CiTestCase):
-
- def test_phy_types(self):
- """Verify the different known physical types are handled."""
- # network_data.json example from
- # https://docs.openstack.org/nova/latest/user/metadata.html
- mac0 = "fa:16:3e:9c:bf:3d"
- net_json = {
- "links": [
- {"ethernet_mac_address": mac0, "id": "tapcd9f6d46-4a",
- "mtu": None, "type": "bridge",
- "vif_id": "cd9f6d46-4a3a-43ab-a466-994af9db96fc"}
- ],
- "networks": [
- {"id": "network0", "link": "tapcd9f6d46-4a",
- "network_id": "99e88329-f20d-4741-9593-25bf07847b16",
- "type": "ipv4_dhcp"}
- ],
- "services": [{"address": "8.8.8.8", "type": "dns"}]
- }
- macs = {mac0: 'eth0'}
-
- expected = {
- 'version': 1,
- 'config': [
- {'mac_address': 'fa:16:3e:9c:bf:3d',
- 'mtu': None, 'name': 'eth0',
- 'subnets': [{'type': 'dhcp4'}],
- 'type': 'physical'},
- {'address': '8.8.8.8', 'type': 'nameserver'}]}
-
- for t in openstack.KNOWN_PHYSICAL_TYPES:
- net_json["links"][0]["type"] = t
- self.assertEqual(
- expected,
- openstack.convert_net_json(network_json=net_json,
- known_macs=macs))
diff --git a/cloudinit/sources/helpers/upcloud.py b/cloudinit/sources/helpers/upcloud.py
new file mode 100644
index 00000000..e7b95a5e
--- /dev/null
+++ b/cloudinit/sources/helpers/upcloud.py
@@ -0,0 +1,229 @@
+# Author: Antti Myyrä <antti.myyra@upcloud.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+
+from cloudinit import dmi
+from cloudinit import log as logging
+from cloudinit import net as cloudnet
+from cloudinit import url_helper
+
+LOG = logging.getLogger(__name__)
+
+
+def convert_to_network_config_v1(config):
+ """
+ Convert the UpCloud network metadata description into
+ Cloud-init's version 1 netconfig format.
+
+ Example JSON:
+ {
+ "interfaces": [
+ {
+ "index": 1,
+ "ip_addresses": [
+ {
+ "address": "94.237.105.53",
+ "dhcp": true,
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "94.237.104.1",
+ "network": "94.237.104.0/22"
+ },
+ {
+ "address": "94.237.105.50",
+ "dhcp": false,
+ "dns": [],
+ "family": "IPv4",
+ "floating": true,
+ "gateway": "",
+ "network": "94.237.105.50/32"
+ }
+ ],
+ "mac": "32:d5:ba:4a:36:e7",
+ "network_id": "031457f4-0f8c-483c-96f2-eccede02909c",
+ "type": "public"
+ },
+ {
+ "index": 2,
+ "ip_addresses": [
+ {
+ "address": "10.6.3.27",
+ "dhcp": true,
+ "dns": [],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "10.6.0.1",
+ "network": "10.6.0.0/22"
+ }
+ ],
+ "mac": "32:d5:ba:4a:84:cc",
+ "network_id": "03d82553-5bea-4132-b29a-e1cf67ec2dd1",
+ "type": "utility"
+ },
+ {
+ "index": 3,
+ "ip_addresses": [
+ {
+ "address": "2a04:3545:1000:720:38d6:baff:fe4a:63e7",
+ "dhcp": true,
+ "dns": [
+ "2a04:3540:53::1",
+ "2a04:3544:53::1"
+ ],
+ "family": "IPv6",
+ "floating": false,
+ "gateway": "2a04:3545:1000:720::1",
+ "network": "2a04:3545:1000:720::/64"
+ }
+ ],
+ "mac": "32:d5:ba:4a:63:e7",
+ "network_id": "03000000-0000-4000-8046-000000000000",
+ "type": "public"
+ },
+ {
+ "index": 4,
+ "ip_addresses": [
+ {
+ "address": "172.30.1.10",
+ "dhcp": true,
+ "dns": [],
+ "family": "IPv4",
+ "floating": false,
+ "gateway": "172.30.1.1",
+ "network": "172.30.1.0/24"
+ }
+ ],
+ "mac": "32:d5:ba:4a:8a:e1",
+ "network_id": "035a0a4a-77b4-4de5-820d-189fc8135714",
+ "type": "private"
+ }
+ ],
+ "dns": [
+ "94.237.127.9",
+ "94.237.40.9"
+ ]
+ }
+ """
+
+ def _get_subnet_config(ip_addr, dns):
+ if ip_addr.get("dhcp"):
+ dhcp_type = "dhcp"
+ if ip_addr.get("family") == "IPv6":
+ # UpCloud currently passes IPv6 addresses via
+ # StateLess Address Auto Configuration (SLAAC)
+ dhcp_type = "ipv6_dhcpv6-stateless"
+ return {"type": dhcp_type}
+
+ static_type = "static"
+ if ip_addr.get("family") == "IPv6":
+ static_type = "static6"
+ subpart = {
+ "type": static_type,
+ "control": "auto",
+ "address": ip_addr.get("address"),
+ }
+
+ if ip_addr.get("gateway"):
+ subpart["gateway"] = ip_addr.get("gateway")
+
+ if "/" in ip_addr.get("network"):
+ subpart["netmask"] = ip_addr.get("network").split("/")[1]
+
+ if dns != ip_addr.get("dns") and ip_addr.get("dns"):
+ subpart["dns_nameservers"] = ip_addr.get("dns")
+
+ return subpart
+
+ nic_configs = []
+ macs_to_interfaces = cloudnet.get_interfaces_by_mac()
+ LOG.debug("NIC mapping: %s", macs_to_interfaces)
+
+ for raw_iface in config.get("interfaces"):
+ LOG.debug("Considering %s", raw_iface)
+
+ mac_address = raw_iface.get("mac")
+ if mac_address not in macs_to_interfaces:
+ raise RuntimeError(
+ "Did not find network interface on system "
+ "with mac '%s'. Cannot apply configuration: %s"
+ % (mac_address, raw_iface)
+ )
+
+ iface_type = raw_iface.get("type")
+ sysfs_name = macs_to_interfaces.get(mac_address)
+
+ LOG.debug(
+ "Found %s interface '%s' with address '%s' (index %d)",
+ iface_type,
+ sysfs_name,
+ mac_address,
+ raw_iface.get("index"),
+ )
+
+ interface = {
+ "type": "physical",
+ "name": sysfs_name,
+ "mac_address": mac_address,
+ }
+
+ subnets = []
+ for ip_address in raw_iface.get("ip_addresses"):
+ sub_part = _get_subnet_config(ip_address, config.get("dns"))
+ subnets.append(sub_part)
+
+ interface["subnets"] = subnets
+ nic_configs.append(interface)
+
+ if config.get("dns"):
+ LOG.debug("Setting DNS nameservers to %s", config.get("dns"))
+ nic_configs.append(
+ {"type": "nameserver", "address": config.get("dns")}
+ )
+
+ return {"version": 1, "config": nic_configs}
+
+
+def convert_network_config(config):
+ return convert_to_network_config_v1(config)
+
+
+def read_metadata(url, timeout=2, sec_between=2, retries=30):
+ response = url_helper.readurl(
+ url, timeout=timeout, sec_between=sec_between, retries=retries
+ )
+ if not response.ok():
+ raise RuntimeError("unable to read metadata at %s" % url)
+ return json.loads(response.contents.decode())
+
+
+def read_sysinfo():
+ # UpCloud embeds vendor ID and server UUID in the
+ # SMBIOS information
+
+ # Detect if we are on UpCloud and return the UUID
+
+ vendor_name = dmi.read_dmi_data("system-manufacturer")
+ if vendor_name != "UpCloud":
+ return False, None
+
+ server_uuid = dmi.read_dmi_data("system-uuid")
+ if server_uuid:
+ LOG.debug(
+ "system identified via SMBIOS as UpCloud server: %s", server_uuid
+ )
+ else:
+ msg = (
+ "system identified via SMBIOS as a UpCloud server, but "
+ "did not provide an ID. Please contact support via"
+ "https://hub.upcloud.com or via email with support@upcloud.com"
+ )
+ LOG.critical(msg)
+ raise RuntimeError(msg)
+
+ return True, server_uuid
diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
index 9a07eafa..a5c67bb7 100644
--- a/cloudinit/sources/helpers/vmware/imc/boot_proto.py
+++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
@@ -9,7 +9,8 @@
class BootProtoEnum(object):
"""Specifies the NIC Boot Settings."""
- DHCP = 'dhcp'
- STATIC = 'static'
+ DHCP = "dhcp"
+ STATIC = "static"
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index 7109aef3..39dacee0 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -15,18 +15,20 @@ class Config(object):
Specification file.
"""
- CUSTOM_SCRIPT = 'CUSTOM-SCRIPT|SCRIPT-NAME'
- DNS = 'DNS|NAMESERVER|'
- DOMAINNAME = 'NETWORK|DOMAINNAME'
- HOSTNAME = 'NETWORK|HOSTNAME'
- MARKERID = 'MISC|MARKER-ID'
- PASS = 'PASSWORD|-PASS'
- RESETPASS = 'PASSWORD|RESET'
- SUFFIX = 'DNS|SUFFIX|'
- TIMEZONE = 'DATETIME|TIMEZONE'
- UTC = 'DATETIME|UTC'
- POST_GC_STATUS = 'MISC|POST-GC-STATUS'
- DEFAULT_RUN_POST_SCRIPT = 'MISC|DEFAULT-RUN-POST-CUST-SCRIPT'
+ CUSTOM_SCRIPT = "CUSTOM-SCRIPT|SCRIPT-NAME"
+ DNS = "DNS|NAMESERVER|"
+ DOMAINNAME = "NETWORK|DOMAINNAME"
+ HOSTNAME = "NETWORK|HOSTNAME"
+ MARKERID = "MISC|MARKER-ID"
+ PASS = "PASSWORD|-PASS"
+ RESETPASS = "PASSWORD|RESET"
+ SUFFIX = "DNS|SUFFIX|"
+ TIMEZONE = "DATETIME|TIMEZONE"
+ UTC = "DATETIME|UTC"
+ POST_GC_STATUS = "MISC|POST-GC-STATUS"
+ DEFAULT_RUN_POST_SCRIPT = "MISC|DEFAULT-RUN-POST-CUST-SCRIPT"
+ CLOUDINIT_META_DATA = "CLOUDINIT|METADATA"
+ CLOUDINIT_USER_DATA = "CLOUDINIT|USERDATA"
def __init__(self, configFile):
self._configFile = configFile
@@ -82,8 +84,8 @@ class Config(object):
def nics(self):
"""Return the list of associated NICs."""
res = []
- nics = self._configFile['NIC-CONFIG|NICS']
- for nic in nics.split(','):
+ nics = self._configFile["NIC-CONFIG|NICS"]
+ for nic in nics.split(","):
res.append(Nic(nic, self._configFile))
return res
@@ -91,11 +93,11 @@ class Config(object):
@property
def reset_password(self):
"""Retreives if the root password needs to be reset."""
- resetPass = self._configFile.get(Config.RESETPASS, 'no')
+ resetPass = self._configFile.get(Config.RESETPASS, "no")
resetPass = resetPass.lower()
- if resetPass not in ('yes', 'no'):
- raise ValueError('ResetPassword value should be yes/no')
- return resetPass == 'yes'
+ if resetPass not in ("yes", "no"):
+ raise ValueError("ResetPassword value should be yes/no")
+ return resetPass == "yes"
@property
def marker_id(self):
@@ -110,11 +112,11 @@ class Config(object):
@property
def post_gc_status(self):
"""Return whether to post guestinfo.gc.status VMX property."""
- postGcStatus = self._configFile.get(Config.POST_GC_STATUS, 'no')
+ postGcStatus = self._configFile.get(Config.POST_GC_STATUS, "no")
postGcStatus = postGcStatus.lower()
- if postGcStatus not in ('yes', 'no'):
- raise ValueError('PostGcStatus value should be yes/no')
- return postGcStatus == 'yes'
+ if postGcStatus not in ("yes", "no"):
+ raise ValueError("PostGcStatus value should be yes/no")
+ return postGcStatus == "yes"
@property
def default_run_post_script(self):
@@ -123,11 +125,22 @@ class Config(object):
is absent in VM Tools configuration
"""
defaultRunPostScript = self._configFile.get(
- Config.DEFAULT_RUN_POST_SCRIPT,
- 'no')
+ Config.DEFAULT_RUN_POST_SCRIPT, "no"
+ )
defaultRunPostScript = defaultRunPostScript.lower()
- if defaultRunPostScript not in ('yes', 'no'):
- raise ValueError('defaultRunPostScript value should be yes/no')
- return defaultRunPostScript == 'yes'
+ if defaultRunPostScript not in ("yes", "no"):
+ raise ValueError("defaultRunPostScript value should be yes/no")
+ return defaultRunPostScript == "yes"
+
+ @property
+ def meta_data_name(self):
+ """Return the name of cloud-init meta data."""
+ return self._configFile.get(Config.CLOUDINIT_META_DATA, None)
+
+ @property
+ def user_data_name(self):
+ """Return the name of cloud-init user data."""
+ return self._configFile.get(Config.CLOUDINIT_USER_DATA, None)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
index 2ab22de9..8240ea8f 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_custom_script.py
@@ -9,8 +9,7 @@ import logging
import os
import stat
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
@@ -24,8 +23,7 @@ class CustomScriptConstant(object):
# The user defined custom script
CUSTOM_SCRIPT_NAME = "customize.sh"
- CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR,
- CUSTOM_SCRIPT_NAME)
+ CUSTOM_SCRIPT = os.path.join(CUSTOM_TMP_DIR, CUSTOM_SCRIPT_NAME)
POST_CUSTOM_PENDING_MARKER = "/.guest-customization-post-reboot-pending"
# The cc_scripts_per_instance script to launch custom script
POST_CUSTOM_SCRIPT_NAME = "post-customize-guest.sh"
@@ -39,22 +37,25 @@ class RunCustomScript(object):
def prepare_script(self):
if not os.path.exists(self.scriptpath):
- raise CustomScriptNotFound("Script %s not found!! "
- "Cannot execute custom script!"
- % self.scriptpath)
+ raise CustomScriptNotFound(
+ "Script %s not found!! Cannot execute custom script!"
+ % self.scriptpath
+ )
util.ensure_dir(CustomScriptConstant.CUSTOM_TMP_DIR)
- LOG.debug("Copying custom script to %s",
- CustomScriptConstant.CUSTOM_SCRIPT)
+ LOG.debug(
+ "Copying custom script to %s", CustomScriptConstant.CUSTOM_SCRIPT
+ )
util.copy(self.scriptpath, CustomScriptConstant.CUSTOM_SCRIPT)
# Strip any CR characters from the decoded script
- content = util.load_file(
- CustomScriptConstant.CUSTOM_SCRIPT).replace("\r", "")
- util.write_file(CustomScriptConstant.CUSTOM_SCRIPT,
- content,
- mode=0o544)
+ content = util.load_file(CustomScriptConstant.CUSTOM_SCRIPT).replace(
+ "\r", ""
+ )
+ util.write_file(
+ CustomScriptConstant.CUSTOM_SCRIPT, content, mode=0o544
+ )
class PreCustomScript(RunCustomScript):
@@ -70,8 +71,8 @@ class PostCustomScript(RunCustomScript):
super(PostCustomScript, self).__init__(scriptname, directory)
self.ccScriptsDir = ccScriptsDir
self.ccScriptPath = os.path.join(
- ccScriptsDir,
- CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME)
+ ccScriptsDir, CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME
+ )
def execute(self):
"""
@@ -81,15 +82,17 @@ class PostCustomScript(RunCustomScript):
"""
self.prepare_script()
- LOG.debug("Copying post customize run script to %s",
- self.ccScriptPath)
+ LOG.debug("Copying post customize run script to %s", self.ccScriptPath)
util.copy(
- os.path.join(self.directory,
- CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME),
- self.ccScriptPath)
+ os.path.join(
+ self.directory, CustomScriptConstant.POST_CUSTOM_SCRIPT_NAME
+ ),
+ self.ccScriptPath,
+ )
st = os.stat(self.ccScriptPath)
os.chmod(self.ccScriptPath, st.st_mode | stat.S_IEXEC)
LOG.info("Creating post customization pending marker")
util.ensure_file(CustomScriptConstant.POST_CUSTOM_PENDING_MARKER)
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index fc034c95..845294ec 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -35,7 +35,7 @@ class ConfigFile(ConfigSource, dict):
key = key.strip()
val = val.strip()
- if key.startswith('-') or '|-' in key:
+ if key.startswith("-") or "|-" in key:
canLog = False
else:
canLog = True
@@ -59,7 +59,7 @@ class ConfigFile(ConfigSource, dict):
Keyword arguments:
filename - The full path to the config file.
"""
- logger.info('Parsing the config file %s.', filename)
+ logger.info("Parsing the config file %s.", filename)
config = configparser.ConfigParser()
config.optionxform = str
@@ -71,7 +71,7 @@ class ConfigFile(ConfigSource, dict):
logger.debug("FOUND CATEGORY = '%s'", category)
for (key, value) in config.items(category):
- self._insertKey(category + '|' + key, value)
+ self._insertKey(category + "|" + key, value)
def should_keep_current_value(self, key):
"""
@@ -115,4 +115,5 @@ class ConfigFile(ConfigSource, dict):
"""
return len([key for key in self if key.startswith(prefix)])
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
index 5899d8f7..3b3b2d5a 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
@@ -11,4 +11,5 @@ from .config_source import ConfigSource
class ConfigNamespace(ConfigSource):
"""Specifies the Config Namespace."""
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 9cd2c0c0..6c135f48 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -9,9 +9,8 @@ import logging
import os
import re
-from cloudinit.net.network_state import mask_to_net_prefix
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
+from cloudinit.net.network_state import ipv4_mask_to_net_prefix
logger = logging.getLogger(__name__)
@@ -63,8 +62,10 @@ class NicConfigurator(object):
if not primary_nics:
return None
elif len(primary_nics) > 1:
- raise Exception('There can only be one primary nic',
- [nic.mac for nic in primary_nics])
+ raise Exception(
+ "There can only be one primary nic",
+ [nic.mac for nic in primary_nics],
+ )
else:
return primary_nics[0]
@@ -73,17 +74,17 @@ class NicConfigurator(object):
Create the mac2Name dictionary
The mac address(es) are in the lower case
"""
- cmd = ['ip', 'addr', 'show']
+ cmd = ["ip", "addr", "show"]
output, _err = subp.subp(cmd)
- sections = re.split(r'\n\d+: ', '\n' + output)[1:]
+ sections = re.split(r"\n\d+: ", "\n" + output)[1:]
- macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
+ macPat = r"link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))"
for section in sections:
match = re.search(macPat, section)
if not match: # Only keep info about nics
continue
mac = match.group(1).lower()
- name = section.split(':', 1)[0]
+ name = section.split(":", 1)[0]
self.mac2Name[mac] = name
def gen_one_nic(self, nic):
@@ -95,11 +96,11 @@ class NicConfigurator(object):
mac = nic.mac.lower()
name = self.mac2Name.get(mac)
if not name:
- raise ValueError('No known device has MACADDR: %s' % nic.mac)
+ raise ValueError("No known device has MACADDR: %s" % nic.mac)
nics_cfg_list = []
- cfg = {'type': 'physical', 'name': name, 'mac_address': mac}
+ cfg = {"type": "physical", "name": name, "mac_address": mac}
subnet_list = []
route_list = []
@@ -114,7 +115,7 @@ class NicConfigurator(object):
subnet_list.extend(subnets)
route_list.extend(routes)
- cfg.update({'subnets': subnet_list})
+ cfg.update({"subnets": subnet_list})
nics_cfg_list.append(cfg)
if route_list:
@@ -135,17 +136,17 @@ class NicConfigurator(object):
route_list = []
if nic.onboot:
- subnet.update({'control': 'auto'})
+ subnet.update({"control": "auto"})
bootproto = nic.bootProto.lower()
- if nic.ipv4_mode.lower() == 'disabled':
- bootproto = 'manual'
+ if nic.ipv4_mode.lower() == "disabled":
+ bootproto = "manual"
- if bootproto != 'static':
- subnet.update({'type': 'dhcp'})
+ if bootproto != "static":
+ subnet.update({"type": "dhcp"})
return ([subnet], route_list)
else:
- subnet.update({'type': 'static'})
+ subnet.update({"type": "static"})
# Static Ipv4
addrs = nic.staticIpv4
@@ -154,20 +155,21 @@ class NicConfigurator(object):
v4 = addrs[0]
if v4.ip:
- subnet.update({'address': v4.ip})
+ subnet.update({"address": v4.ip})
if v4.netmask:
- subnet.update({'netmask': v4.netmask})
+ subnet.update({"netmask": v4.netmask})
# Add the primary gateway
if nic.primary and v4.gateways:
self.ipv4PrimaryGateway = v4.gateways[0]
- subnet.update({'gateway': self.ipv4PrimaryGateway})
+ subnet.update({"gateway": self.ipv4PrimaryGateway})
return ([subnet], route_list)
# Add routes if there is no primary nic
if not self._primaryNic and v4.gateways:
subnet.update(
- {'routes': self.gen_ipv4_route(nic, v4.gateways, v4.netmask)})
+ {"routes": self.gen_ipv4_route(nic, v4.gateways, v4.netmask)}
+ )
return ([subnet], route_list)
@@ -180,14 +182,18 @@ class NicConfigurator(object):
"""
route_list = []
- cidr = mask_to_net_prefix(netmask)
+ cidr = ipv4_mask_to_net_prefix(netmask)
for gateway in gateways:
destination = "%s/%d" % (gen_subnet(gateway, netmask), cidr)
- route_list.append({'destination': destination,
- 'type': 'route',
- 'gateway': gateway,
- 'metric': 10000})
+ route_list.append(
+ {
+ "destination": destination,
+ "type": "route",
+ "gateway": gateway,
+ "metric": 10000,
+ }
+ )
return route_list
@@ -208,9 +214,11 @@ class NicConfigurator(object):
addrs = nic.staticIpv6
for addr in addrs:
- subnet = {'type': 'static6',
- 'address': addr.ip,
- 'netmask': addr.netmask}
+ subnet = {
+ "type": "static6",
+ "address": addr.ip,
+ "netmask": addr.netmask,
+ }
subnet_list.append(subnet)
# TODO: Add the primary gateway
@@ -226,9 +234,9 @@ class NicConfigurator(object):
route_list = []
for addr in addrs:
- route_list.append({'type': 'route',
- 'gateway': addr.gateway,
- 'metric': 10000})
+ route_list.append(
+ {"type": "route", "gateway": addr.gateway, "metric": 10000}
+ )
return route_list
@@ -246,7 +254,7 @@ class NicConfigurator(object):
return nics_cfg_list
def clear_dhcp(self):
- logger.info('Clearing DHCP leases')
+ logger.info("Clearing DHCP leases")
# Ignore the return code 1.
subp.subp(["pkill", "dhclient"], rcs=[0, 1])
@@ -262,11 +270,12 @@ class NicConfigurator(object):
logger.info("Debian OS not detected. Skipping the configure step")
return
- containingDir = '/etc/network'
+ containingDir = "/etc/network"
- interfaceFile = os.path.join(containingDir, 'interfaces')
- originalFile = os.path.join(containingDir,
- 'interfaces.before_vmware_customization')
+ interfaceFile = os.path.join(containingDir, "interfaces")
+ originalFile = os.path.join(
+ containingDir, "interfaces.before_vmware_customization"
+ )
if not os.path.exists(originalFile) and os.path.exists(interfaceFile):
os.rename(interfaceFile, originalFile)
@@ -274,12 +283,13 @@ class NicConfigurator(object):
lines = [
"# DO NOT EDIT THIS FILE BY HAND --"
" AUTOMATICALLY GENERATED BY cloud-init",
- "source /etc/network/interfaces.d/*.cfg",
+ "source /etc/network/interfaces.d/*",
"source-directory /etc/network/interfaces.d",
]
- util.write_file(interfaceFile, content='\n'.join(lines))
+ util.write_file(interfaceFile, content="\n".join(lines))
self.clear_dhcp()
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
index d16a7690..4d3967a1 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py
@@ -9,8 +9,7 @@
import logging
import os
-from cloudinit import subp
-from cloudinit import util
+from cloudinit import subp, util
LOG = logging.getLogger(__name__)
@@ -20,6 +19,7 @@ class PasswordConfigurator(object):
Class for changing configurations related to passwords in a VM. Includes
setting and expiring passwords.
"""
+
def configure(self, passwd, resetPasswd, distro):
"""
Main method to perform all functionalities based on configuration file
@@ -28,25 +28,25 @@ class PasswordConfigurator(object):
@param resetPasswd: boolean to determine if password needs to be reset.
@return cfg: dict to be used by cloud-init set_passwd code.
"""
- LOG.info('Starting password configuration')
+ LOG.info("Starting password configuration")
if passwd:
passwd = util.b64d(passwd)
allRootUsers = []
- for line in open('/etc/passwd', 'r'):
- if line.split(':')[2] == '0':
- allRootUsers.append(line.split(':')[0])
+ for line in open("/etc/passwd", "r"):
+ if line.split(":")[2] == "0":
+ allRootUsers.append(line.split(":")[0])
# read shadow file and check for each user, if its uid0 or root.
uidUsersList = []
- for line in open('/etc/shadow', 'r'):
- user = line.split(':')[0]
+ for line in open("/etc/shadow", "r"):
+ user = line.split(":")[0]
if user in allRootUsers:
uidUsersList.append(user)
if passwd:
- LOG.info('Setting admin password')
- distro.set_passwd('root', passwd)
+ LOG.info("Setting admin password")
+ distro.set_passwd("root", passwd)
if resetPasswd:
self.reset_password(uidUsersList)
- LOG.info('Configure Password completed!')
+ LOG.info("Configure Password completed!")
def reset_password(self, uidUserList):
"""
@@ -54,15 +54,19 @@ class PasswordConfigurator(object):
not succeeded using passwd command. Log failure message otherwise.
@param: list of users for which to expire password.
"""
- LOG.info('Expiring password.')
+ LOG.info("Expiring password.")
for user in uidUserList:
try:
- subp.subp(['passwd', '--expire', user])
+ subp.subp(["passwd", "--expire", user])
except subp.ProcessExecutionError as e:
- if os.path.exists('/usr/bin/chage'):
- subp.subp(['chage', '-d', '0', user])
+ if os.path.exists("/usr/bin/chage"):
+ subp.subp(["chage", "-d", "0", user])
else:
- LOG.warning('Failed to expire password for %s with error: '
- '%s', user, e)
+ LOG.warning(
+ "Failed to expire password for %s with error: %s",
+ user,
+ e,
+ )
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
index 7ec06a9c..e99f9b43 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_source.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_source.py
@@ -9,4 +9,5 @@
class ConfigSource(object):
"""Specifies a source for the Config Content."""
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
index 65ae7390..eda84cfb 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
@@ -11,5 +11,7 @@ class GuestCustErrorEnum(object):
GUESTCUST_ERROR_SUCCESS = 0
GUESTCUST_ERROR_SCRIPT_DISABLED = 6
+ GUESTCUST_ERROR_WRONG_META_FORMAT = 9
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
index e84c1cb0..33169a7e 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
@@ -14,4 +14,5 @@ class GuestCustEventEnum(object):
GUESTCUST_EVENT_ENABLE_NICS = 103
GUESTCUST_EVENT_QUERY_NICS = 104
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
index a8211dea..c74fbc8b 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
@@ -12,4 +12,5 @@ class GuestCustStateEnum(object):
GUESTCUST_STATE_RUNNING = 4
GUESTCUST_STATE_DONE = 5
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
index d919f693..08763e62 100644
--- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -73,7 +73,7 @@ def get_nics_to_enable(nicsfilepath):
if not os.path.exists(nicsfilepath):
return None
- with open(nicsfilepath, 'r') as fp:
+ with open(nicsfilepath, "r") as fp:
nics = fp.read(NICS_SIZE)
return nics
@@ -95,7 +95,8 @@ def enable_nics(nics):
(out, _err) = set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS,
- nics)
+ nics,
+ )
if not out:
time.sleep(enableNicsWaitCount * enableNicsWaitSeconds)
continue
@@ -108,32 +109,36 @@ def enable_nics(nics):
(out, _err) = set_customization_status(
GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS,
- nics)
+ nics,
+ )
if out and out == NICS_STATUS_CONNECTED:
logger.info("NICS are connected on %d second", count)
return
time.sleep(enableNicsWaitSeconds)
- logger.warning("Can't connect network interfaces after %d attempts",
- enableNicsWaitRetries)
+ logger.warning(
+ "Can't connect network interfaces after %d attempts",
+ enableNicsWaitRetries,
+ )
def get_tools_config(section, key, defaultVal):
- """ Return the value of [section] key from VMTools configuration.
+ """Return the value of [section] key from VMTools configuration.
- @param section: String of section to read from VMTools config
- @returns: String value from key in [section] or defaultVal if
- [section] is not present or vmware-toolbox-cmd is
- not installed.
+ @param section: String of section to read from VMTools config
+ @returns: String value from key in [section] or defaultVal if
+ [section] is not present or vmware-toolbox-cmd is
+ not installed.
"""
- if not subp.which('vmware-toolbox-cmd'):
+ if not subp.which("vmware-toolbox-cmd"):
logger.debug(
- 'vmware-toolbox-cmd not installed, returning default value')
+ "vmware-toolbox-cmd not installed, returning default value"
+ )
return defaultVal
- cmd = ['vmware-toolbox-cmd', 'config', 'get', section, key]
+ cmd = ["vmware-toolbox-cmd", "config", "get", section, key]
try:
(outText, _) = subp.subp(cmd)
@@ -141,22 +146,27 @@ def get_tools_config(section, key, defaultVal):
if e.exit_code == 69:
logger.debug(
"vmware-toolbox-cmd returned 69 (unavailable) for cmd: %s."
- " Return default value: %s", " ".join(cmd), defaultVal)
+ " Return default value: %s",
+ " ".join(cmd),
+ defaultVal,
+ )
else:
logger.error("Failed running %s[%s]", cmd, e.exit_code)
logger.exception(e)
return defaultVal
retValue = defaultVal
- m = re.match(r'([^=]+)=(.*)', outText)
+ m = re.match(r"([^=]+)=(.*)", outText)
if m:
retValue = m.group(2).strip()
- logger.debug("Get tools config: [%s] %s = %s",
- section, key, retValue)
+ logger.debug("Get tools config: [%s] %s = %s", section, key, retValue)
else:
logger.debug(
"Tools config: [%s] %s is not found, return default value: %s",
- section, key, retValue)
+ section,
+ key,
+ retValue,
+ )
return retValue
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
index d793bdeb..673204a0 100644
--- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
+++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
@@ -18,18 +18,19 @@ class Ipv4ModeEnum(object):
# The legacy mode which only allows dhcp/static based on whether IPv4
# addresses list is empty or not
- IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE'
+ IPV4_MODE_BACKWARDS_COMPATIBLE = "BACKWARDS_COMPATIBLE"
# IPv4 must use static address. Reserved for future use
- IPV4_MODE_STATIC = 'STATIC'
+ IPV4_MODE_STATIC = "STATIC"
# IPv4 must use DHCPv4. Reserved for future use
- IPV4_MODE_DHCP = 'DHCP'
+ IPV4_MODE_DHCP = "DHCP"
# IPv4 must be disabled
- IPV4_MODE_DISABLED = 'DISABLED'
+ IPV4_MODE_DISABLED = "DISABLED"
# IPv4 settings should be left untouched. Reserved for future use
- IPV4_MODE_AS_IS = 'AS_IS'
+ IPV4_MODE_AS_IS = "AS_IS"
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
index ef8f87f7..7b742d0f 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic.py
@@ -20,7 +20,7 @@ class Nic(NicBase):
self._configFile = configFile
def _get(self, what):
- return self._configFile.get(self.name + '|' + what, None)
+ return self._configFile.get(self.name + "|" + what, None)
def _get_count_with_prefix(self, prefix):
return self._configFile.get_count_with_prefix(self.name + prefix)
@@ -31,29 +31,29 @@ class Nic(NicBase):
@property
def mac(self):
- return self._get('MACADDR').lower()
+ return self._get("MACADDR").lower()
@property
def primary(self):
- value = self._get('PRIMARY')
+ value = self._get("PRIMARY")
if value:
value = value.lower()
- return value == 'yes' or value == 'true'
+ return value == "yes" or value == "true"
else:
return False
@property
def onboot(self):
- value = self._get('ONBOOT')
+ value = self._get("ONBOOT")
if value:
value = value.lower()
- return value == 'yes' or value == 'true'
+ return value == "yes" or value == "true"
else:
return False
@property
def bootProto(self):
- value = self._get('BOOTPROTO')
+ value = self._get("BOOTPROTO")
if value:
return value.lower()
else:
@@ -61,7 +61,7 @@ class Nic(NicBase):
@property
def ipv4_mode(self):
- value = self._get('IPv4_MODE')
+ value = self._get("IPv4_MODE")
if value:
return value.lower()
else:
@@ -80,7 +80,7 @@ class Nic(NicBase):
@property
def staticIpv6(self):
- cnt = self._get_count_with_prefix('|IPv6ADDR|')
+ cnt = self._get_count_with_prefix("|IPv6ADDR|")
if not cnt:
return None
@@ -100,17 +100,17 @@ class StaticIpv4Addr(StaticIpv4Base):
@property
def ip(self):
- return self._nic._get('IPADDR')
+ return self._nic._get("IPADDR")
@property
def netmask(self):
- return self._nic._get('NETMASK')
+ return self._nic._get("NETMASK")
@property
def gateways(self):
- value = self._nic._get('GATEWAY')
+ value = self._nic._get("GATEWAY")
if value:
- return [x.strip() for x in value.split(',')]
+ return [x.strip() for x in value.split(",")]
else:
return None
@@ -124,14 +124,15 @@ class StaticIpv6Addr(StaticIpv6Base):
@property
def ip(self):
- return self._nic._get('IPv6ADDR|' + str(self._index))
+ return self._nic._get("IPv6ADDR|" + str(self._index))
@property
def netmask(self):
- return self._nic._get('IPv6NETMASK|' + str(self._index))
+ return self._nic._get("IPv6NETMASK|" + str(self._index))
@property
def gateway(self):
- return self._nic._get('IPv6GATEWAY|' + str(self._index))
+ return self._nic._get("IPv6GATEWAY|" + str(self._index))
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py
index de7b866d..37d9602f 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic_base.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic_base.py
@@ -18,7 +18,7 @@ class NicBase(object):
Retrieves the mac address of the nic
@return (str) : the MACADDR setting
"""
- raise NotImplementedError('MACADDR')
+ raise NotImplementedError("MACADDR")
@property
def primary(self):
@@ -29,7 +29,7 @@ class NicBase(object):
be set.
@return (bool): the PRIMARY setting
"""
- raise NotImplementedError('PRIMARY')
+ raise NotImplementedError("PRIMARY")
@property
def onboot(self):
@@ -37,7 +37,7 @@ class NicBase(object):
Retrieves whether the nic should be up at the boot time
@return (bool) : the ONBOOT setting
"""
- raise NotImplementedError('ONBOOT')
+ raise NotImplementedError("ONBOOT")
@property
def bootProto(self):
@@ -45,7 +45,7 @@ class NicBase(object):
Retrieves the boot protocol of the nic
@return (str): the BOOTPROTO setting, valid values: dhcp and static.
"""
- raise NotImplementedError('BOOTPROTO')
+ raise NotImplementedError("BOOTPROTO")
@property
def ipv4_mode(self):
@@ -54,7 +54,7 @@ class NicBase(object):
@return (str): the IPv4_MODE setting, valid values:
backwards_compatible, static, dhcp, disabled, as_is
"""
- raise NotImplementedError('IPv4_MODE')
+ raise NotImplementedError("IPv4_MODE")
@property
def staticIpv4(self):
@@ -62,7 +62,7 @@ class NicBase(object):
Retrieves the static IPv4 configuration of the nic
@return (StaticIpv4Base list): the static ipv4 setting
"""
- raise NotImplementedError('Static IPv4')
+ raise NotImplementedError("Static IPv4")
@property
def staticIpv6(self):
@@ -70,7 +70,7 @@ class NicBase(object):
Retrieves the IPv6 configuration of the nic
@return (StaticIpv6Base list): the static ipv6 setting
"""
- raise NotImplementedError('Static Ipv6')
+ raise NotImplementedError("Static Ipv6")
def validate(self):
"""
@@ -78,7 +78,7 @@ class NicBase(object):
For example, the staticIpv4 property is required and should not be
empty when ipv4Mode is STATIC
"""
- raise NotImplementedError('Check constraints on properties')
+ raise NotImplementedError("Check constraints on properties")
class StaticIpv4Base(object):
@@ -93,7 +93,7 @@ class StaticIpv4Base(object):
Retrieves the Ipv4 address
@return (str): the IPADDR setting
"""
- raise NotImplementedError('Ipv4 Address')
+ raise NotImplementedError("Ipv4 Address")
@property
def netmask(self):
@@ -101,7 +101,7 @@ class StaticIpv4Base(object):
Retrieves the Ipv4 NETMASK setting
@return (str): the NETMASK setting
"""
- raise NotImplementedError('Ipv4 NETMASK')
+ raise NotImplementedError("Ipv4 NETMASK")
@property
def gateways(self):
@@ -109,7 +109,7 @@ class StaticIpv4Base(object):
Retrieves the gateways on this Ipv4 subnet
@return (str list): the GATEWAY setting
"""
- raise NotImplementedError('Ipv4 GATEWAY')
+ raise NotImplementedError("Ipv4 GATEWAY")
class StaticIpv6Base(object):
@@ -123,7 +123,7 @@ class StaticIpv6Base(object):
Retrieves the Ipv6 address
@return (str): the IPv6ADDR setting
"""
- raise NotImplementedError('Ipv6 Address')
+ raise NotImplementedError("Ipv6 Address")
@property
def netmask(self):
@@ -131,7 +131,7 @@ class StaticIpv6Base(object):
Retrieves the Ipv6 NETMASK setting
@return (str): the IPv6NETMASK setting
"""
- raise NotImplementedError('Ipv6 NETMASK')
+ raise NotImplementedError("Ipv6 NETMASK")
@property
def gateway(self):
@@ -139,6 +139,7 @@ class StaticIpv6Base(object):
Retrieves the Ipv6 GATEWAY setting
@return (str): the IPv6GATEWAY setting
"""
- raise NotImplementedError('Ipv6 GATEWAY')
+ raise NotImplementedError("Ipv6 GATEWAY")
+
# vi: ts=4 expandtab
diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py
new file mode 100644
index 00000000..88a21034
--- /dev/null
+++ b/cloudinit/sources/helpers/vultr.py
@@ -0,0 +1,230 @@
+# Author: Eric Benner <ebenner@vultr.com>
+#
+# This file is part of cloud-init. See LICENSE file for license information.
+
+import json
+from functools import lru_cache
+
+from cloudinit import dmi
+from cloudinit import log as log
+from cloudinit import net, subp, url_helper, util
+from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError
+
+# Get LOG
+LOG = log.getLogger(__name__)
+
+
+@lru_cache()
+def get_metadata(url, timeout, retries, sec_between, agent):
+ # Bring up interface (and try untill one works)
+ exception = RuntimeError("Failed to DHCP")
+
+ # Seek iface with DHCP
+ for iface in net.get_interfaces():
+ # Skip dummy, lo interfaces
+ if "dummy" in iface[0]:
+ continue
+ if "lo" == iface[0]:
+ continue
+ try:
+ with EphemeralDHCPv4(
+ iface=iface[0], connectivity_url_data={"url": url}
+ ):
+ # Fetch the metadata
+ v1 = read_metadata(url, timeout, retries, sec_between, agent)
+
+ return json.loads(v1)
+ except (NoDHCPLeaseError, subp.ProcessExecutionError) as exc:
+ LOG.error("DHCP Exception: %s", exc)
+ exception = exc
+ raise exception
+
+
+# Read the system information from SMBIOS
+def get_sysinfo():
+ return {
+ "manufacturer": dmi.read_dmi_data("system-manufacturer"),
+ "subid": dmi.read_dmi_data("system-serial-number"),
+ }
+
+
+# Assumes is Vultr is already checked
+def is_baremetal():
+ if get_sysinfo()["manufacturer"] != "Vultr":
+ return True
+ return False
+
+
+# Confirm is Vultr
+def is_vultr():
+ # VC2, VDC, and HFC use DMI
+ sysinfo = get_sysinfo()
+
+ if sysinfo["manufacturer"] == "Vultr":
+ return True
+
+ # Baremetal requires a kernel parameter
+ if "vultr" in util.get_cmdline().split():
+ return True
+
+ return False
+
+
+# Read Metadata endpoint
+def read_metadata(url, timeout, retries, sec_between, agent):
+ url = "%s/v1.json" % url
+
+ # Announce os details so we can handle non Vultr origin
+ # images and provide correct vendordata generation.
+ headers = {"Metadata-Token": "cloudinit", "User-Agent": agent}
+
+ response = url_helper.readurl(
+ url,
+ timeout=timeout,
+ retries=retries,
+ headers=headers,
+ sec_between=sec_between,
+ )
+
+ if not response.ok():
+ raise RuntimeError(
+ "Failed to connect to %s: Code: %s" % url, response.code
+ )
+
+ return response.contents.decode()
+
+
+# Wrapped for caching
+@lru_cache()
+def get_interface_map():
+ return net.get_interfaces_by_mac()
+
+
+# Convert macs to nics
+def get_interface_name(mac):
+ macs_to_nic = get_interface_map()
+
+ if mac not in macs_to_nic:
+ return None
+
+ return macs_to_nic.get(mac)
+
+
+# Generate network configs
+def generate_network_config(interfaces):
+ network = {
+ "version": 1,
+ "config": [{"type": "nameserver", "address": ["108.61.10.10"]}],
+ }
+
+ # Prepare interface 0, public
+ if len(interfaces) > 0:
+ public = generate_interface(interfaces[0], primary=True)
+ network["config"].append(public)
+
+ # Prepare additional interfaces, private
+ for i in range(1, len(interfaces)):
+ private = generate_interface(interfaces[i])
+ network["config"].append(private)
+
+ return network
+
+
+def generate_interface(interface, primary=False):
+ interface_name = get_interface_name(interface["mac"])
+ if not interface_name:
+ raise RuntimeError(
+ "Interface: %s could not be found on the system" % interface["mac"]
+ )
+
+ netcfg = {
+ "name": interface_name,
+ "type": "physical",
+ "mac_address": interface["mac"],
+ }
+
+ if primary:
+ netcfg["accept-ra"] = 1
+ netcfg["subnets"] = [
+ {"type": "dhcp", "control": "auto"},
+ {"type": "ipv6_slaac", "control": "auto"},
+ ]
+
+ if not primary:
+ netcfg["subnets"] = [
+ {
+ "type": "static",
+ "control": "auto",
+ "address": interface["ipv4"]["address"],
+ "netmask": interface["ipv4"]["netmask"],
+ }
+ ]
+
+ generate_interface_routes(interface, netcfg)
+ generate_interface_additional_addresses(interface, netcfg)
+
+ # Add config to template
+ return netcfg
+
+
+def generate_interface_routes(interface, netcfg):
+ # Options that may or may not be used
+ if "mtu" in interface:
+ netcfg["mtu"] = interface["mtu"]
+
+ if "accept-ra" in interface:
+ netcfg["accept-ra"] = interface["accept-ra"]
+
+ if "routes" in interface:
+ netcfg["subnets"][0]["routes"] = interface["routes"]
+
+
+def generate_interface_additional_addresses(interface, netcfg):
+ # Check for additional IP's
+ additional_count = len(interface["ipv4"]["additional"])
+ if "ipv4" in interface and additional_count > 0:
+ for additional in interface["ipv4"]["additional"]:
+ add = {
+ "type": "static",
+ "control": "auto",
+ "address": additional["address"],
+ "netmask": additional["netmask"],
+ }
+
+ if "routes" in additional:
+ add["routes"] = additional["routes"]
+
+ netcfg["subnets"].append(add)
+
+ # Check for additional IPv6's
+ additional_count = len(interface["ipv6"]["additional"])
+ if "ipv6" in interface and additional_count > 0:
+ for additional in interface["ipv6"]["additional"]:
+ add = {
+ "type": "static6",
+ "control": "auto",
+ "address": "%s/%s"
+ % (additional["network"], additional["prefix"]),
+ }
+
+ if "routes" in additional:
+ add["routes"] = additional["routes"]
+
+ netcfg["subnets"].append(add)
+
+
+# Make required adjustments to the network configs provided
+def add_interface_names(interfaces):
+ for interface in interfaces:
+ interface_name = get_interface_name(interface["mac"])
+ if not interface_name:
+ raise RuntimeError(
+ "Interface: %s could not be found on the system"
+ % interface["mac"]
+ )
+ interface["name"] = interface_name
+
+ return interfaces
+
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/__init__.py b/cloudinit/sources/tests/__init__.py
deleted file mode 100644
index e69de29b..00000000
--- a/cloudinit/sources/tests/__init__.py
+++ /dev/null
diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py
deleted file mode 100644
index 1420a988..00000000
--- a/cloudinit/sources/tests/test_init.py
+++ /dev/null
@@ -1,759 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import copy
-import inspect
-import os
-import stat
-
-from cloudinit.event import EventType
-from cloudinit.helpers import Paths
-from cloudinit import importer
-from cloudinit.sources import (
- EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE,
- METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource,
- canonical_cloud_id, redact_sensitive_keys)
-from cloudinit.tests.helpers import CiTestCase, mock
-from cloudinit.user_data import UserDataProcessor
-from cloudinit import util
-
-
-class DataSourceTestSubclassNet(DataSource):
-
- dsname = 'MyTestSubclass'
- url_max_wait = 55
-
- def __init__(self, sys_cfg, distro, paths, custom_metadata=None,
- custom_userdata=None, get_data_retval=True):
- super(DataSourceTestSubclassNet, self).__init__(
- sys_cfg, distro, paths)
- self._custom_userdata = custom_userdata
- self._custom_metadata = custom_metadata
- self._get_data_retval = get_data_retval
-
- def _get_cloud_name(self):
- return 'SubclassCloudName'
-
- def _get_data(self):
- if self._custom_metadata:
- self.metadata = self._custom_metadata
- else:
- self.metadata = {'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion'}
- if self._custom_userdata:
- self.userdata_raw = self._custom_userdata
- else:
- self.userdata_raw = 'userdata_raw'
- self.vendordata_raw = 'vendordata_raw'
- return self._get_data_retval
-
-
-class InvalidDataSourceTestSubclassNet(DataSource):
- pass
-
-
-class TestDataSource(CiTestCase):
-
- with_logs = True
- maxDiff = None
-
- def setUp(self):
- super(TestDataSource, self).setUp()
- self.sys_cfg = {'datasource': {'_undef': {'key1': False}}}
- self.distro = 'distrotest' # generally should be a Distro object
- self.paths = Paths({})
- self.datasource = DataSource(self.sys_cfg, self.distro, self.paths)
-
- def test_datasource_init(self):
- """DataSource initializes metadata attributes, ds_cfg and ud_proc."""
- self.assertEqual(self.paths, self.datasource.paths)
- self.assertEqual(self.sys_cfg, self.datasource.sys_cfg)
- self.assertEqual(self.distro, self.datasource.distro)
- self.assertIsNone(self.datasource.userdata)
- self.assertEqual({}, self.datasource.metadata)
- self.assertIsNone(self.datasource.userdata_raw)
- self.assertIsNone(self.datasource.vendordata)
- self.assertIsNone(self.datasource.vendordata_raw)
- self.assertEqual({'key1': False}, self.datasource.ds_cfg)
- self.assertIsInstance(self.datasource.ud_proc, UserDataProcessor)
-
- def test_datasource_init_gets_ds_cfg_using_dsname(self):
- """Init uses DataSource.dsname for sourcing ds_cfg."""
- sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}}
- distro = 'distrotest' # generally should be a Distro object
- datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
- self.assertEqual({'key2': False}, datasource.ds_cfg)
-
- def test_str_is_classname(self):
- """The string representation of the datasource is the classname."""
- self.assertEqual('DataSource', str(self.datasource))
- self.assertEqual(
- 'DataSourceTestSubclassNet',
- str(DataSourceTestSubclassNet('', '', self.paths)))
-
- def test_datasource_get_url_params_defaults(self):
- """get_url_params default url config settings for the datasource."""
- params = self.datasource.get_url_params()
- self.assertEqual(params.max_wait_seconds, self.datasource.url_max_wait)
- self.assertEqual(params.timeout_seconds, self.datasource.url_timeout)
- self.assertEqual(params.num_retries, self.datasource.url_retries)
-
- def test_datasource_get_url_params_subclassed(self):
- """Subclasses can override get_url_params defaults."""
- sys_cfg = {'datasource': {'MyTestSubclass': {'key2': False}}}
- distro = 'distrotest' # generally should be a Distro object
- datasource = DataSourceTestSubclassNet(sys_cfg, distro, self.paths)
- expected = (datasource.url_max_wait, datasource.url_timeout,
- datasource.url_retries)
- url_params = datasource.get_url_params()
- self.assertNotEqual(self.datasource.get_url_params(), url_params)
- self.assertEqual(expected, url_params)
-
- def test_datasource_get_url_params_ds_config_override(self):
- """Datasource configuration options can override url param defaults."""
- sys_cfg = {
- 'datasource': {
- 'MyTestSubclass': {
- 'max_wait': '1', 'timeout': '2', 'retries': '3'}}}
- datasource = DataSourceTestSubclassNet(
- sys_cfg, self.distro, self.paths)
- expected = (1, 2, 3)
- url_params = datasource.get_url_params()
- self.assertNotEqual(
- (datasource.url_max_wait, datasource.url_timeout,
- datasource.url_retries),
- url_params)
- self.assertEqual(expected, url_params)
-
- def test_datasource_get_url_params_is_zero_or_greater(self):
- """get_url_params ignores timeouts with a value below 0."""
- # Set an override that is below 0 which gets ignored.
- sys_cfg = {'datasource': {'_undef': {'timeout': '-1'}}}
- datasource = DataSource(sys_cfg, self.distro, self.paths)
- (_max_wait, timeout, _retries) = datasource.get_url_params()
- self.assertEqual(0, timeout)
-
- def test_datasource_get_url_uses_defaults_on_errors(self):
- """On invalid system config values for url_params defaults are used."""
- # All invalid values should be logged
- sys_cfg = {'datasource': {
- '_undef': {
- 'max_wait': 'nope', 'timeout': 'bug', 'retries': 'nonint'}}}
- datasource = DataSource(sys_cfg, self.distro, self.paths)
- url_params = datasource.get_url_params()
- expected = (datasource.url_max_wait, datasource.url_timeout,
- datasource.url_retries)
- self.assertEqual(expected, url_params)
- logs = self.logs.getvalue()
- expected_logs = [
- "Config max_wait 'nope' is not an int, using default '-1'",
- "Config timeout 'bug' is not an int, using default '10'",
- "Config retries 'nonint' is not an int, using default '5'",
- ]
- for log in expected_logs:
- self.assertIn(log, logs)
-
- @mock.patch('cloudinit.sources.net.find_fallback_nic')
- def test_fallback_interface_is_discovered(self, m_get_fallback_nic):
- """The fallback_interface is discovered via find_fallback_nic."""
- m_get_fallback_nic.return_value = 'nic9'
- self.assertEqual('nic9', self.datasource.fallback_interface)
-
- @mock.patch('cloudinit.sources.net.find_fallback_nic')
- def test_fallback_interface_logs_undiscovered(self, m_get_fallback_nic):
- """Log a warning when fallback_interface can not discover the nic."""
- self.datasource._cloud_name = 'MySupahCloud'
- m_get_fallback_nic.return_value = None # Couldn't discover nic
- self.assertIsNone(self.datasource.fallback_interface)
- self.assertEqual(
- 'WARNING: Did not find a fallback interface on MySupahCloud.\n',
- self.logs.getvalue())
-
- @mock.patch('cloudinit.sources.net.find_fallback_nic')
- def test_wb_fallback_interface_is_cached(self, m_get_fallback_nic):
- """The fallback_interface is cached and won't be rediscovered."""
- self.datasource._fallback_interface = 'nic10'
- self.assertEqual('nic10', self.datasource.fallback_interface)
- m_get_fallback_nic.assert_not_called()
-
- def test__get_data_unimplemented(self):
- """Raise an error when _get_data is not implemented."""
- with self.assertRaises(NotImplementedError) as context_manager:
- self.datasource.get_data()
- self.assertIn(
- 'Subclasses of DataSource must implement _get_data',
- str(context_manager.exception))
- datasource2 = InvalidDataSourceTestSubclassNet(
- self.sys_cfg, self.distro, self.paths)
- with self.assertRaises(NotImplementedError) as context_manager:
- datasource2.get_data()
- self.assertIn(
- 'Subclasses of DataSource must implement _get_data',
- str(context_manager.exception))
-
- def test_get_data_calls_subclass__get_data(self):
- """Datasource.get_data uses the subclass' version of _get_data."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- self.assertTrue(datasource.get_data())
- self.assertEqual(
- {'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion'},
- datasource.metadata)
- self.assertEqual('userdata_raw', datasource.userdata_raw)
- self.assertEqual('vendordata_raw', datasource.vendordata_raw)
-
- def test_get_hostname_strips_local_hostname_without_domain(self):
- """Datasource.get_hostname strips metadata local-hostname of domain."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- self.assertTrue(datasource.get_data())
- self.assertEqual(
- 'test-subclass-hostname', datasource.metadata['local-hostname'])
- self.assertEqual('test-subclass-hostname', datasource.get_hostname())
- datasource.metadata['local-hostname'] = 'hostname.my.domain.com'
- self.assertEqual('hostname', datasource.get_hostname())
-
- def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self):
- """Datasource.get_hostname with fqdn set gets qualified hostname."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- self.assertTrue(datasource.get_data())
- datasource.metadata['local-hostname'] = 'hostname.my.domain.com'
- self.assertEqual(
- 'hostname.my.domain.com', datasource.get_hostname(fqdn=True))
-
- def test_get_hostname_without_metadata_uses_system_hostname(self):
- """Datasource.gethostname runs util.get_hostname when no metadata."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- self.assertEqual({}, datasource.metadata)
- mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts'
- with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost:
- with mock.patch(mock_fqdn) as m_fqdn:
- m_gethost.return_value = 'systemhostname.domain.com'
- m_fqdn.return_value = None # No maching fqdn in /etc/hosts
- self.assertEqual('systemhostname', datasource.get_hostname())
- self.assertEqual(
- 'systemhostname.domain.com',
- datasource.get_hostname(fqdn=True))
-
- def test_get_hostname_without_metadata_returns_none(self):
- """Datasource.gethostname returns None when metadata_only and no MD."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- self.assertEqual({}, datasource.metadata)
- mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts'
- with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost:
- with mock.patch(mock_fqdn) as m_fqdn:
- self.assertIsNone(datasource.get_hostname(metadata_only=True))
- self.assertIsNone(
- datasource.get_hostname(fqdn=True, metadata_only=True))
- self.assertEqual([], m_gethost.call_args_list)
- self.assertEqual([], m_fqdn.call_args_list)
-
- def test_get_hostname_without_metadata_prefers_etc_hosts(self):
- """Datasource.gethostname prefers /etc/hosts to util.get_hostname."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- self.assertEqual({}, datasource.metadata)
- mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts'
- with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost:
- with mock.patch(mock_fqdn) as m_fqdn:
- m_gethost.return_value = 'systemhostname.domain.com'
- m_fqdn.return_value = 'fqdnhostname.domain.com'
- self.assertEqual('fqdnhostname', datasource.get_hostname())
- self.assertEqual('fqdnhostname.domain.com',
- datasource.get_hostname(fqdn=True))
-
- def test_get_data_does_not_write_instance_data_on_failure(self):
- """get_data does not write INSTANCE_JSON_FILE on get_data False."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- get_data_retval=False)
- self.assertFalse(datasource.get_data())
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- self.assertFalse(
- os.path.exists(json_file), 'Found unexpected file %s' % json_file)
-
- def test_get_data_writes_json_instance_data_on_success(self):
- """get_data writes INSTANCE_JSON_FILE to run_dir as world readable."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- sys_info = {
- "python": "3.7",
- "platform":
- "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
- "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
- "x86_64"],
- "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
- with mock.patch("cloudinit.util.system_info", return_value=sys_info):
- datasource.get_data()
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- content = util.load_file(json_file)
- expected = {
- 'base64_encoded_keys': [],
- 'merged_cfg': REDACT_SENSITIVE_VALUE,
- 'sensitive_keys': ['merged_cfg'],
- 'sys_info': sys_info,
- 'v1': {
- '_beta_keys': ['subplatform'],
- 'availability-zone': 'myaz',
- 'availability_zone': 'myaz',
- 'cloud-name': 'subclasscloudname',
- 'cloud_name': 'subclasscloudname',
- 'distro': 'ubuntu',
- 'distro_release': 'focal',
- 'distro_version': '20.04',
- 'instance-id': 'iid-datasource',
- 'instance_id': 'iid-datasource',
- 'local-hostname': 'test-subclass-hostname',
- 'local_hostname': 'test-subclass-hostname',
- 'kernel_release': '5.4.0-24-generic',
- 'machine': 'x86_64',
- 'platform': 'mytestsubclass',
- 'public_ssh_keys': [],
- 'python_version': '3.7',
- 'region': 'myregion',
- 'system_platform':
- 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
- 'subplatform': 'unknown',
- 'variant': 'ubuntu'},
- 'ds': {
-
- '_doc': EXPERIMENTAL_TEXT,
- 'meta_data': {'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion'}}}
- self.assertEqual(expected, util.load_json(content))
- file_stat = os.stat(json_file)
- self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
- self.assertEqual(expected, util.load_json(content))
-
- def test_get_data_writes_redacted_public_json_instance_data(self):
- """get_data writes redacted content to public INSTANCE_JSON_FILE."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={
- 'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion',
- 'some': {'security-credentials': {
- 'cred1': 'sekret', 'cred2': 'othersekret'}}})
- self.assertCountEqual(
- ('merged_cfg', 'security-credentials',),
- datasource.sensitive_metadata_keys)
- sys_info = {
- "python": "3.7",
- "platform":
- "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
- "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
- "x86_64"],
- "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
- with mock.patch("cloudinit.util.system_info", return_value=sys_info):
- datasource.get_data()
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- redacted = util.load_json(util.load_file(json_file))
- expected = {
- 'base64_encoded_keys': [],
- 'merged_cfg': REDACT_SENSITIVE_VALUE,
- 'sensitive_keys': [
- 'ds/meta_data/some/security-credentials', 'merged_cfg'],
- 'sys_info': sys_info,
- 'v1': {
- '_beta_keys': ['subplatform'],
- 'availability-zone': 'myaz',
- 'availability_zone': 'myaz',
- 'cloud-name': 'subclasscloudname',
- 'cloud_name': 'subclasscloudname',
- 'distro': 'ubuntu',
- 'distro_release': 'focal',
- 'distro_version': '20.04',
- 'instance-id': 'iid-datasource',
- 'instance_id': 'iid-datasource',
- 'local-hostname': 'test-subclass-hostname',
- 'local_hostname': 'test-subclass-hostname',
- 'kernel_release': '5.4.0-24-generic',
- 'machine': 'x86_64',
- 'platform': 'mytestsubclass',
- 'public_ssh_keys': [],
- 'python_version': '3.7',
- 'region': 'myregion',
- 'system_platform':
- 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
- 'subplatform': 'unknown',
- 'variant': 'ubuntu'},
- 'ds': {
- '_doc': EXPERIMENTAL_TEXT,
- 'meta_data': {
- 'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion',
- 'some': {'security-credentials': REDACT_SENSITIVE_VALUE}}}
- }
- self.assertCountEqual(expected, redacted)
- file_stat = os.stat(json_file)
- self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
-
- def test_get_data_writes_json_instance_data_sensitive(self):
- """
- get_data writes unmodified data to sensitive file as root-readonly.
- """
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={
- 'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion',
- 'some': {'security-credentials': {
- 'cred1': 'sekret', 'cred2': 'othersekret'}}})
- sys_info = {
- "python": "3.7",
- "platform":
- "Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal",
- "uname": ["Linux", "myhost", "5.4.0-24-generic", "SMP blah",
- "x86_64"],
- "variant": "ubuntu", "dist": ["ubuntu", "20.04", "focal"]}
-
- self.assertCountEqual(
- ('merged_cfg', 'security-credentials',),
- datasource.sensitive_metadata_keys)
- with mock.patch("cloudinit.util.system_info", return_value=sys_info):
- datasource.get_data()
- sensitive_json_file = self.tmp_path(INSTANCE_JSON_SENSITIVE_FILE, tmp)
- content = util.load_file(sensitive_json_file)
- expected = {
- 'base64_encoded_keys': [],
- 'merged_cfg': {
- '_doc': (
- 'Merged cloud-init system config from '
- '/etc/cloud/cloud.cfg and /etc/cloud/cloud.cfg.d/'
- ),
- 'datasource': {'_undef': {'key1': False}}},
- 'sensitive_keys': [
- 'ds/meta_data/some/security-credentials', 'merged_cfg'],
- 'sys_info': sys_info,
- 'v1': {
- '_beta_keys': ['subplatform'],
- 'availability-zone': 'myaz',
- 'availability_zone': 'myaz',
- 'cloud-name': 'subclasscloudname',
- 'cloud_name': 'subclasscloudname',
- 'distro': 'ubuntu',
- 'distro_release': 'focal',
- 'distro_version': '20.04',
- 'instance-id': 'iid-datasource',
- 'instance_id': 'iid-datasource',
- 'kernel_release': '5.4.0-24-generic',
- 'local-hostname': 'test-subclass-hostname',
- 'local_hostname': 'test-subclass-hostname',
- 'machine': 'x86_64',
- 'platform': 'mytestsubclass',
- 'public_ssh_keys': [],
- 'python_version': '3.7',
- 'region': 'myregion',
- 'subplatform': 'unknown',
- 'system_platform':
- 'Linux-5.4.0-24-generic-x86_64-with-Ubuntu-20.04-focal',
- 'variant': 'ubuntu'},
- 'ds': {
- '_doc': EXPERIMENTAL_TEXT,
- 'meta_data': {
- 'availability_zone': 'myaz',
- 'local-hostname': 'test-subclass-hostname',
- 'region': 'myregion',
- 'some': {
- 'security-credentials':
- {'cred1': 'sekret', 'cred2': 'othersekret'}}}}
- }
- self.assertCountEqual(expected, util.load_json(content))
- file_stat = os.stat(sensitive_json_file)
- self.assertEqual(0o600, stat.S_IMODE(file_stat.st_mode))
- self.assertEqual(expected, util.load_json(content))
-
- def test_get_data_handles_redacted_unserializable_content(self):
- """get_data warns unserializable content in INSTANCE_JSON_FILE."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={'key1': 'val1', 'key2': {'key2.1': self.paths}})
- datasource.get_data()
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- content = util.load_file(json_file)
- expected_metadata = {
- 'key1': 'val1',
- 'key2': {
- 'key2.1': "Warning: redacted unserializable type <class"
- " 'cloudinit.helpers.Paths'>"}}
- instance_json = util.load_json(content)
- self.assertEqual(
- expected_metadata, instance_json['ds']['meta_data'])
-
- def test_persist_instance_data_writes_ec2_metadata_when_set(self):
- """When ec2_metadata class attribute is set, persist to json."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- datasource.ec2_metadata = UNSET
- datasource.get_data()
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- instance_data = util.load_json(util.load_file(json_file))
- self.assertNotIn('ec2_metadata', instance_data['ds'])
- datasource.ec2_metadata = {'ec2stuff': 'is good'}
- datasource.persist_instance_data()
- instance_data = util.load_json(util.load_file(json_file))
- self.assertEqual(
- {'ec2stuff': 'is good'},
- instance_data['ds']['ec2_metadata'])
-
- def test_persist_instance_data_writes_network_json_when_set(self):
- """When network_data.json class attribute is set, persist to json."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}))
- datasource.get_data()
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- instance_data = util.load_json(util.load_file(json_file))
- self.assertNotIn('network_json', instance_data['ds'])
- datasource.network_json = {'network_json': 'is good'}
- datasource.persist_instance_data()
- instance_data = util.load_json(util.load_file(json_file))
- self.assertEqual(
- {'network_json': 'is good'},
- instance_data['ds']['network_json'])
-
- def test_get_data_base64encodes_unserializable_bytes(self):
- """On py3, get_data base64encodes any unserializable content."""
- tmp = self.tmp_dir()
- datasource = DataSourceTestSubclassNet(
- self.sys_cfg, self.distro, Paths({'run_dir': tmp}),
- custom_metadata={'key1': 'val1', 'key2': {'key2.1': b'\x123'}})
- self.assertTrue(datasource.get_data())
- json_file = self.tmp_path(INSTANCE_JSON_FILE, tmp)
- content = util.load_file(json_file)
- instance_json = util.load_json(content)
- self.assertCountEqual(
- ['ds/meta_data/key2/key2.1'],
- instance_json['base64_encoded_keys'])
- self.assertEqual(
- {'key1': 'val1', 'key2': {'key2.1': 'EjM='}},
- instance_json['ds']['meta_data'])
-
- def test_get_hostname_subclass_support(self):
- """Validate get_hostname signature on all subclasses of DataSource."""
- base_args = inspect.getfullargspec(DataSource.get_hostname)
- # Import all DataSource subclasses so we can inspect them.
- modules = util.find_modules(os.path.dirname(os.path.dirname(__file__)))
- for _loc, name in modules.items():
- mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], [])
- if mod_locs:
- importer.import_module(mod_locs[0])
- for child in DataSource.__subclasses__():
- if 'Test' in child.dsname:
- continue
- self.assertEqual(
- base_args,
- inspect.getfullargspec(child.get_hostname),
- '%s does not implement DataSource.get_hostname params'
- % child)
- for grandchild in child.__subclasses__():
- self.assertEqual(
- base_args,
- inspect.getfullargspec(grandchild.get_hostname),
- '%s does not implement DataSource.get_hostname params'
- % grandchild)
-
- def test_clear_cached_attrs_resets_cached_attr_class_attributes(self):
- """Class attributes listed in cached_attr_defaults are reset."""
- count = 0
- # Setup values for all cached class attributes
- for attr, value in self.datasource.cached_attr_defaults:
- setattr(self.datasource, attr, count)
- count += 1
- self.datasource._dirty_cache = True
- self.datasource.clear_cached_attrs()
- for attr, value in self.datasource.cached_attr_defaults:
- self.assertEqual(value, getattr(self.datasource, attr))
-
- def test_clear_cached_attrs_noops_on_clean_cache(self):
- """Class attributes listed in cached_attr_defaults are reset."""
- count = 0
- # Setup values for all cached class attributes
- for attr, _ in self.datasource.cached_attr_defaults:
- setattr(self.datasource, attr, count)
- count += 1
- self.datasource._dirty_cache = False # Fake clean cache
- self.datasource.clear_cached_attrs()
- count = 0
- for attr, _ in self.datasource.cached_attr_defaults:
- self.assertEqual(count, getattr(self.datasource, attr))
- count += 1
-
- def test_clear_cached_attrs_skips_non_attr_class_attributes(self):
- """Skip any cached_attr_defaults which aren't class attributes."""
- self.datasource._dirty_cache = True
- self.datasource.clear_cached_attrs()
- for attr in ('ec2_metadata', 'network_json'):
- self.assertFalse(hasattr(self.datasource, attr))
-
- def test_clear_cached_attrs_of_custom_attrs(self):
- """Custom attr_values can be passed to clear_cached_attrs."""
- self.datasource._dirty_cache = True
- cached_attr_name = self.datasource.cached_attr_defaults[0][0]
- setattr(self.datasource, cached_attr_name, 'himom')
- self.datasource.myattr = 'orig'
- self.datasource.clear_cached_attrs(
- attr_defaults=(('myattr', 'updated'),))
- self.assertEqual('himom', getattr(self.datasource, cached_attr_name))
- self.assertEqual('updated', self.datasource.myattr)
-
- def test_update_metadata_only_acts_on_supported_update_events(self):
- """update_metadata won't get_data on unsupported update events."""
- self.datasource.update_events['network'].discard(EventType.BOOT)
- self.assertEqual(
- {'network': set([EventType.BOOT_NEW_INSTANCE])},
- self.datasource.update_events)
-
- def fake_get_data():
- raise Exception('get_data should not be called')
-
- self.datasource.get_data = fake_get_data
- self.assertFalse(
- self.datasource.update_metadata(
- source_event_types=[EventType.BOOT]))
-
- def test_update_metadata_returns_true_on_supported_update_event(self):
- """update_metadata returns get_data response on supported events."""
-
- def fake_get_data():
- return True
-
- self.datasource.get_data = fake_get_data
- self.datasource._network_config = 'something'
- self.datasource._dirty_cache = True
- self.assertTrue(
- self.datasource.update_metadata(
- source_event_types=[
- EventType.BOOT, EventType.BOOT_NEW_INSTANCE]))
- self.assertEqual(UNSET, self.datasource._network_config)
- self.assertIn(
- "DEBUG: Update datasource metadata and network config due to"
- " events: New instance first boot",
- self.logs.getvalue())
-
-
-class TestRedactSensitiveData(CiTestCase):
-
- def test_redact_sensitive_data_noop_when_no_sensitive_keys_present(self):
- """When sensitive_keys is absent or empty from metadata do nothing."""
- md = {'my': 'data'}
- self.assertEqual(
- md, redact_sensitive_keys(md, redact_value='redacted'))
- md['sensitive_keys'] = []
- self.assertEqual(
- md, redact_sensitive_keys(md, redact_value='redacted'))
-
- def test_redact_sensitive_data_redacts_exact_match_name(self):
- """Only exact matched sensitive_keys are redacted from metadata."""
- md = {'sensitive_keys': ['md/secure'],
- 'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
- secure_md = copy.deepcopy(md)
- secure_md['md']['secure'] = 'redacted'
- self.assertEqual(
- secure_md,
- redact_sensitive_keys(md, redact_value='redacted'))
-
- def test_redact_sensitive_data_does_redacts_with_default_string(self):
- """When redact_value is absent, REDACT_SENSITIVE_VALUE is used."""
- md = {'sensitive_keys': ['md/secure'],
- 'md': {'secure': 's3kr1t', 'insecure': 'publik'}}
- secure_md = copy.deepcopy(md)
- secure_md['md']['secure'] = 'redacted for non-root user'
- self.assertEqual(
- secure_md,
- redact_sensitive_keys(md))
-
-
-class TestCanonicalCloudID(CiTestCase):
-
- def test_cloud_id_returns_platform_on_unknowns(self):
- """When region and cloud_name are unknown, return platform."""
- self.assertEqual(
- 'platform',
- canonical_cloud_id(cloud_name=METADATA_UNKNOWN,
- region=METADATA_UNKNOWN,
- platform='platform'))
-
- def test_cloud_id_returns_platform_on_none(self):
- """When region and cloud_name are unknown, return platform."""
- self.assertEqual(
- 'platform',
- canonical_cloud_id(cloud_name=None,
- region=None,
- platform='platform'))
-
- def test_cloud_id_returns_cloud_name_on_unknown_region(self):
- """When region is unknown, return cloud_name."""
- for region in (None, METADATA_UNKNOWN):
- self.assertEqual(
- 'cloudname',
- canonical_cloud_id(cloud_name='cloudname',
- region=region,
- platform='platform'))
-
- def test_cloud_id_returns_platform_on_unknown_cloud_name(self):
- """When region is set but cloud_name is unknown return cloud_name."""
- self.assertEqual(
- 'platform',
- canonical_cloud_id(cloud_name=METADATA_UNKNOWN,
- region='region',
- platform='platform'))
-
- def test_cloud_id_aws_based_on_region_and_cloud_name(self):
- """When cloud_name is aws, return proper cloud-id based on region."""
- self.assertEqual(
- 'aws-china',
- canonical_cloud_id(cloud_name='aws',
- region='cn-north-1',
- platform='platform'))
- self.assertEqual(
- 'aws',
- canonical_cloud_id(cloud_name='aws',
- region='us-east-1',
- platform='platform'))
- self.assertEqual(
- 'aws-gov',
- canonical_cloud_id(cloud_name='aws',
- region='us-gov-1',
- platform='platform'))
- self.assertEqual( # Overrideen non-aws cloud_name is returned
- '!aws',
- canonical_cloud_id(cloud_name='!aws',
- region='us-gov-1',
- platform='platform'))
-
- def test_cloud_id_azure_based_on_region_and_cloud_name(self):
- """Report cloud-id when cloud_name is azure and region is in china."""
- self.assertEqual(
- 'azure-china',
- canonical_cloud_id(cloud_name='azure',
- region='chinaeast',
- platform='platform'))
- self.assertEqual(
- 'azure',
- canonical_cloud_id(cloud_name='azure',
- region='!chinaeast',
- platform='platform'))
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py
deleted file mode 100644
index a7bbdfd9..00000000
--- a/cloudinit/sources/tests/test_oracle.py
+++ /dev/null
@@ -1,785 +0,0 @@
-# This file is part of cloud-init. See LICENSE file for license information.
-
-import base64
-import copy
-import json
-from contextlib import ExitStack
-from unittest import mock
-
-import pytest
-
-from cloudinit.sources import DataSourceOracle as oracle
-from cloudinit.sources import NetworkConfigSource
-from cloudinit.sources.DataSourceOracle import OpcMetadata
-from cloudinit.tests import helpers as test_helpers
-from cloudinit.url_helper import UrlError
-
-DS_PATH = "cloudinit.sources.DataSourceOracle"
-
-# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Bare Metal Machine
-# with a secondary VNIC attached (vnicId truncated for Python line length)
-OPC_BM_SECONDARY_VNIC_RESPONSE = """\
-[ {
- "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtyvcucqkhdqmgjszebxe4hrb!!TRUNCATED||",
- "privateIp" : "10.0.0.8",
- "vlanTag" : 0,
- "macAddr" : "90:e2:ba:d4:f1:68",
- "virtualRouterIp" : "10.0.0.1",
- "subnetCidrBlock" : "10.0.0.0/24",
- "nicIndex" : 0
-}, {
- "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtfmkxjdy2sqidndiwrsg63zf!!TRUNCATED||",
- "privateIp" : "10.0.4.5",
- "vlanTag" : 1,
- "macAddr" : "02:00:17:05:CF:51",
- "virtualRouterIp" : "10.0.4.1",
- "subnetCidrBlock" : "10.0.4.0/24",
- "nicIndex" : 0
-} ]"""
-
-# `curl -L http://169.254.169.254/opc/v1/vnics/` on a Oracle Virtual Machine
-# with a secondary VNIC attached
-OPC_VM_SECONDARY_VNIC_RESPONSE = """\
-[ {
- "vnicId" : "ocid1.vnic.oc1.phx.abyhqljtch72z5pd76cc2636qeqh7z_truncated",
- "privateIp" : "10.0.0.230",
- "vlanTag" : 1039,
- "macAddr" : "02:00:17:05:D1:DB",
- "virtualRouterIp" : "10.0.0.1",
- "subnetCidrBlock" : "10.0.0.0/24"
-}, {
- "vnicId" : "ocid1.vnic.oc1.phx.abyhqljt4iew3gwmvrwrhhf3bp5drj_truncated",
- "privateIp" : "10.0.0.231",
- "vlanTag" : 1041,
- "macAddr" : "00:00:17:02:2B:B1",
- "virtualRouterIp" : "10.0.0.1",
- "subnetCidrBlock" : "10.0.0.0/24"
-} ]"""
-
-
-# Fetched with `curl http://169.254.169.254/opc/v1/instance/` (and then
-# truncated for line length)
-OPC_V2_METADATA = """\
-{
- "availabilityDomain" : "qIZq:PHX-AD-1",
- "faultDomain" : "FAULT-DOMAIN-2",
- "compartmentId" : "ocid1.tenancy.oc1..aaaaaaaao7f7cccogqrg5emjxkxmTRUNCATED",
- "displayName" : "instance-20200320-1400",
- "hostname" : "instance-20200320-1400",
- "id" : "ocid1.instance.oc1.phx.anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED",
- "image" : "ocid1.image.oc1.phx.aaaaaaaagmkn4gdhvvx24kiahh2b2qchsicTRUNCATED",
- "metadata" : {
- "ssh_authorized_keys" : "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated",
- "user_data" : "IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v"
- },
- "region" : "phx",
- "canonicalRegionName" : "us-phoenix-1",
- "ociAdName" : "phx-ad-3",
- "shape" : "VM.Standard2.1",
- "state" : "Running",
- "timeCreated" : 1584727285318,
- "agentConfig" : {
- "monitoringDisabled" : true,
- "managementDisabled" : true
- }
-}"""
-
-# Just a small meaningless change to differentiate the two metadatas
-OPC_V1_METADATA = OPC_V2_METADATA.replace("ocid1.instance", "ocid2.instance")
-
-
-@pytest.fixture
-def metadata_version():
- return 2
-
-
-@pytest.yield_fixture
-def oracle_ds(request, fixture_utils, paths, metadata_version):
- """
- Return an instantiated DataSourceOracle.
-
- This also performs the mocking required for the default test case:
- * ``_read_system_uuid`` returns something,
- * ``_is_platform_viable`` returns True,
- * ``_is_iscsi_root`` returns True (the simpler code path),
- * ``read_opc_metadata`` returns ``OPC_V1_METADATA``
-
- (This uses the paths fixture for the required helpers.Paths object, and the
- fixture_utils fixture for fetching markers.)
- """
- sys_cfg = fixture_utils.closest_marker_first_arg_or(
- request, "ds_sys_cfg", mock.MagicMock()
- )
- metadata = OpcMetadata(metadata_version, json.loads(OPC_V2_METADATA), None)
- with mock.patch(DS_PATH + "._read_system_uuid", return_value="someuuid"):
- with mock.patch(DS_PATH + "._is_platform_viable", return_value=True):
- with mock.patch(DS_PATH + "._is_iscsi_root", return_value=True):
- with mock.patch(
- DS_PATH + ".read_opc_metadata",
- return_value=metadata,
- ):
- yield oracle.DataSourceOracle(
- sys_cfg=sys_cfg, distro=mock.Mock(), paths=paths,
- )
-
-
-class TestDataSourceOracle:
- def test_platform_info(self, oracle_ds):
- assert "oracle" == oracle_ds.cloud_name
- assert "oracle" == oracle_ds.platform_type
-
- def test_subplatform_before_fetch(self, oracle_ds):
- assert 'unknown' == oracle_ds.subplatform
-
- def test_platform_info_after_fetch(self, oracle_ds):
- oracle_ds._get_data()
- assert 'metadata (http://169.254.169.254/opc/v2/)' == \
- oracle_ds.subplatform
-
- @pytest.mark.parametrize('metadata_version', [1])
- def test_v1_platform_info_after_fetch(self, oracle_ds):
- oracle_ds._get_data()
- assert 'metadata (http://169.254.169.254/opc/v1/)' == \
- oracle_ds.subplatform
-
- def test_secondary_nics_disabled_by_default(self, oracle_ds):
- assert not oracle_ds.ds_cfg["configure_secondary_nics"]
-
- @pytest.mark.ds_sys_cfg(
- {"datasource": {"Oracle": {"configure_secondary_nics": True}}}
- )
- def test_sys_cfg_can_enable_configure_secondary_nics(self, oracle_ds):
- assert oracle_ds.ds_cfg["configure_secondary_nics"]
-
-
-class TestIsPlatformViable(test_helpers.CiTestCase):
- @mock.patch(DS_PATH + ".dmi.read_dmi_data",
- return_value=oracle.CHASSIS_ASSET_TAG)
- def test_expected_viable(self, m_read_dmi_data):
- """System with known chassis tag is viable."""
- self.assertTrue(oracle._is_platform_viable())
- m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
-
- @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value=None)
- def test_expected_not_viable_dmi_data_none(self, m_read_dmi_data):
- """System without known chassis tag is not viable."""
- self.assertFalse(oracle._is_platform_viable())
- m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
-
- @mock.patch(DS_PATH + ".dmi.read_dmi_data", return_value="LetsGoCubs")
- def test_expected_not_viable_other(self, m_read_dmi_data):
- """System with unnown chassis tag is not viable."""
- self.assertFalse(oracle._is_platform_viable())
- m_read_dmi_data.assert_has_calls([mock.call('chassis-asset-tag')])
-
-
-class TestNetworkConfigFromOpcImds:
- def test_no_secondary_nics_does_not_mutate_input(self, oracle_ds):
- oracle_ds._vnics_data = [{}]
- # We test this by using in a non-dict to ensure that no dict
- # operations are used; failure would be seen as exceptions
- oracle_ds._network_config = object()
- oracle_ds._add_network_config_from_opc_imds()
-
- def test_bare_metal_machine_skipped(self, oracle_ds, caplog):
- # nicIndex in the first entry indicates a bare metal machine
- oracle_ds._vnics_data = json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)
- # We test this by using a non-dict to ensure that no dict
- # operations are used
- oracle_ds._network_config = object()
- oracle_ds._add_network_config_from_opc_imds()
- assert 'bare metal machine' in caplog.text
-
- def test_missing_mac_skipped(self, oracle_ds, caplog):
- oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
-
- oracle_ds._network_config = {
- 'version': 1, 'config': [{'primary': 'nic'}]
- }
- with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
- oracle_ds._add_network_config_from_opc_imds()
-
- assert 1 == len(oracle_ds.network_config['config'])
- assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \
- caplog.text
-
- def test_missing_mac_skipped_v2(self, oracle_ds, caplog):
- oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
-
- oracle_ds._network_config = {
- 'version': 2, 'ethernets': {'primary': {'nic': {}}}
- }
- with mock.patch(DS_PATH + ".get_interfaces_by_mac", return_value={}):
- oracle_ds._add_network_config_from_opc_imds()
-
- assert 1 == len(oracle_ds.network_config['ethernets'])
- assert 'Interface with MAC 00:00:17:02:2b:b1 not found; skipping' in \
- caplog.text
-
- def test_secondary_nic(self, oracle_ds):
- oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
- oracle_ds._network_config = {
- 'version': 1, 'config': [{'primary': 'nic'}]
- }
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
- with mock.patch(DS_PATH + ".get_interfaces_by_mac",
- return_value={mac_addr: nic_name}):
- oracle_ds._add_network_config_from_opc_imds()
-
- # The input is mutated
- assert 2 == len(oracle_ds.network_config['config'])
-
- secondary_nic_cfg = oracle_ds.network_config['config'][1]
- assert nic_name == secondary_nic_cfg['name']
- assert 'physical' == secondary_nic_cfg['type']
- assert mac_addr == secondary_nic_cfg['mac_address']
- assert 9000 == secondary_nic_cfg['mtu']
-
- assert 1 == len(secondary_nic_cfg['subnets'])
- subnet_cfg = secondary_nic_cfg['subnets'][0]
- # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
- assert '10.0.0.231' == subnet_cfg['address']
-
- def test_secondary_nic_v2(self, oracle_ds):
- oracle_ds._vnics_data = json.loads(OPC_VM_SECONDARY_VNIC_RESPONSE)
- oracle_ds._network_config = {
- 'version': 2, 'ethernets': {'primary': {'nic': {}}}
- }
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
- with mock.patch(DS_PATH + ".get_interfaces_by_mac",
- return_value={mac_addr: nic_name}):
- oracle_ds._add_network_config_from_opc_imds()
-
- # The input is mutated
- assert 2 == len(oracle_ds.network_config['ethernets'])
-
- secondary_nic_cfg = oracle_ds.network_config['ethernets']['ens3']
- assert secondary_nic_cfg['dhcp4'] is False
- assert secondary_nic_cfg['dhcp6'] is False
- assert mac_addr == secondary_nic_cfg['match']['macaddress']
- assert 9000 == secondary_nic_cfg['mtu']
-
- assert 1 == len(secondary_nic_cfg['addresses'])
- # These values are hard-coded in OPC_VM_SECONDARY_VNIC_RESPONSE
- assert '10.0.0.231' == secondary_nic_cfg['addresses'][0]
-
-
-class TestNetworkConfigFiltersNetFailover(test_helpers.CiTestCase):
-
- def setUp(self):
- super(TestNetworkConfigFiltersNetFailover, self).setUp()
- self.add_patch(DS_PATH + '.get_interfaces_by_mac',
- 'm_get_interfaces_by_mac')
- self.add_patch(DS_PATH + '.is_netfail_master', 'm_netfail_master')
-
- def test_ignore_bogus_network_config(self):
- netcfg = {'something': 'here'}
- passed_netcfg = copy.copy(netcfg)
- oracle._ensure_netfailover_safe(passed_netcfg)
- self.assertEqual(netcfg, passed_netcfg)
-
- def test_ignore_network_config_unknown_versions(self):
- netcfg = {'something': 'here', 'version': 3}
- passed_netcfg = copy.copy(netcfg)
- oracle._ensure_netfailover_safe(passed_netcfg)
- self.assertEqual(netcfg, passed_netcfg)
-
- def test_checks_v1_type_physical_interfaces(self):
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
- self.m_get_interfaces_by_mac.return_value = {
- mac_addr: nic_name,
- }
- netcfg = {'version': 1, 'config': [
- {'type': 'physical', 'name': nic_name, 'mac_address': mac_addr,
- 'subnets': [{'type': 'dhcp4'}]}]}
- passed_netcfg = copy.copy(netcfg)
- self.m_netfail_master.return_value = False
- oracle._ensure_netfailover_safe(passed_netcfg)
- self.assertEqual(netcfg, passed_netcfg)
- self.assertEqual([mock.call(nic_name)],
- self.m_netfail_master.call_args_list)
-
- def test_checks_v1_skips_non_phys_interfaces(self):
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'bond0'
- self.m_get_interfaces_by_mac.return_value = {
- mac_addr: nic_name,
- }
- netcfg = {'version': 1, 'config': [
- {'type': 'bond', 'name': nic_name, 'mac_address': mac_addr,
- 'subnets': [{'type': 'dhcp4'}]}]}
- passed_netcfg = copy.copy(netcfg)
- oracle._ensure_netfailover_safe(passed_netcfg)
- self.assertEqual(netcfg, passed_netcfg)
- self.assertEqual(0, self.m_netfail_master.call_count)
-
- def test_removes_master_mac_property_v1(self):
- nic_master, mac_master = 'ens3', self.random_string()
- nic_other, mac_other = 'ens7', self.random_string()
- nic_extra, mac_extra = 'enp0s1f2', self.random_string()
- self.m_get_interfaces_by_mac.return_value = {
- mac_master: nic_master,
- mac_other: nic_other,
- mac_extra: nic_extra,
- }
- netcfg = {'version': 1, 'config': [
- {'type': 'physical', 'name': nic_master,
- 'mac_address': mac_master},
- {'type': 'physical', 'name': nic_other, 'mac_address': mac_other},
- {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra},
- ]}
-
- def _is_netfail_master(iface):
- if iface == 'ens3':
- return True
- return False
- self.m_netfail_master.side_effect = _is_netfail_master
- expected_cfg = {'version': 1, 'config': [
- {'type': 'physical', 'name': nic_master},
- {'type': 'physical', 'name': nic_other, 'mac_address': mac_other},
- {'type': 'physical', 'name': nic_extra, 'mac_address': mac_extra},
- ]}
- oracle._ensure_netfailover_safe(netcfg)
- self.assertEqual(expected_cfg, netcfg)
-
- def test_checks_v2_type_ethernet_interfaces(self):
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'ens3'
- self.m_get_interfaces_by_mac.return_value = {
- mac_addr: nic_name,
- }
- netcfg = {'version': 2, 'ethernets': {
- nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name,
- 'match': {'macaddress': mac_addr}}}}
- passed_netcfg = copy.copy(netcfg)
- self.m_netfail_master.return_value = False
- oracle._ensure_netfailover_safe(passed_netcfg)
- self.assertEqual(netcfg, passed_netcfg)
- self.assertEqual([mock.call(nic_name)],
- self.m_netfail_master.call_args_list)
-
- def test_skips_v2_non_ethernet_interfaces(self):
- mac_addr, nic_name = '00:00:17:02:2b:b1', 'wlps0'
- self.m_get_interfaces_by_mac.return_value = {
- mac_addr: nic_name,
- }
- netcfg = {'version': 2, 'wifis': {
- nic_name: {'dhcp4': True, 'critical': True, 'set-name': nic_name,
- 'match': {'macaddress': mac_addr}}}}
- passed_netcfg = copy.copy(netcfg)
- oracle._ensure_netfailover_safe(passed_netcfg)
- self.assertEqual(netcfg, passed_netcfg)
- self.assertEqual(0, self.m_netfail_master.call_count)
-
- def test_removes_master_mac_property_v2(self):
- nic_master, mac_master = 'ens3', self.random_string()
- nic_other, mac_other = 'ens7', self.random_string()
- nic_extra, mac_extra = 'enp0s1f2', self.random_string()
- self.m_get_interfaces_by_mac.return_value = {
- mac_master: nic_master,
- mac_other: nic_other,
- mac_extra: nic_extra,
- }
- netcfg = {'version': 2, 'ethernets': {
- nic_extra: {'dhcp4': True, 'set-name': nic_extra,
- 'match': {'macaddress': mac_extra}},
- nic_other: {'dhcp4': True, 'set-name': nic_other,
- 'match': {'macaddress': mac_other}},
- nic_master: {'dhcp4': True, 'set-name': nic_master,
- 'match': {'macaddress': mac_master}},
- }}
-
- def _is_netfail_master(iface):
- if iface == 'ens3':
- return True
- return False
- self.m_netfail_master.side_effect = _is_netfail_master
-
- expected_cfg = {'version': 2, 'ethernets': {
- nic_master: {'dhcp4': True, 'match': {'name': nic_master}},
- nic_extra: {'dhcp4': True, 'set-name': nic_extra,
- 'match': {'macaddress': mac_extra}},
- nic_other: {'dhcp4': True, 'set-name': nic_other,
- 'match': {'macaddress': mac_other}},
- }}
- oracle._ensure_netfailover_safe(netcfg)
- import pprint
- pprint.pprint(netcfg)
- print('---- ^^ modified ^^ ---- vv original vv ----')
- pprint.pprint(expected_cfg)
- self.assertEqual(expected_cfg, netcfg)
-
-
-def _mock_v2_urls(httpretty):
- def instance_callback(request, uri, response_headers):
- print(response_headers)
- assert request.headers.get("Authorization") == "Bearer Oracle"
- return [200, response_headers, OPC_V2_METADATA]
-
- def vnics_callback(request, uri, response_headers):
- assert request.headers.get("Authorization") == "Bearer Oracle"
- return [200, response_headers, OPC_BM_SECONDARY_VNIC_RESPONSE]
-
- httpretty.register_uri(
- httpretty.GET,
- "http://169.254.169.254/opc/v2/instance/",
- body=instance_callback
- )
- httpretty.register_uri(
- httpretty.GET,
- "http://169.254.169.254/opc/v2/vnics/",
- body=vnics_callback
- )
-
-
-def _mock_no_v2_urls(httpretty):
- httpretty.register_uri(
- httpretty.GET,
- "http://169.254.169.254/opc/v2/instance/",
- status=404,
- )
- httpretty.register_uri(
- httpretty.GET,
- "http://169.254.169.254/opc/v1/instance/",
- body=OPC_V1_METADATA
- )
- httpretty.register_uri(
- httpretty.GET,
- "http://169.254.169.254/opc/v1/vnics/",
- body=OPC_BM_SECONDARY_VNIC_RESPONSE
- )
-
-
-class TestReadOpcMetadata:
- # See https://docs.pytest.org/en/stable/example
- # /parametrize.html#parametrizing-conditional-raising
- does_not_raise = ExitStack
-
- @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
- @pytest.mark.parametrize(
- 'version,setup_urls,instance_data,fetch_vnics,vnics_data', [
- (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), True,
- json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)),
- (2, _mock_v2_urls, json.loads(OPC_V2_METADATA), False, None),
- (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), True,
- json.loads(OPC_BM_SECONDARY_VNIC_RESPONSE)),
- (1, _mock_no_v2_urls, json.loads(OPC_V1_METADATA), False, None),
- ]
- )
- def test_metadata_returned(
- self, version, setup_urls, instance_data,
- fetch_vnics, vnics_data, httpretty
- ):
- setup_urls(httpretty)
- metadata = oracle.read_opc_metadata(fetch_vnics_data=fetch_vnics)
-
- assert version == metadata.version
- assert instance_data == metadata.instance_data
- assert vnics_data == metadata.vnics_data
-
- # No need to actually wait between retries in the tests
- @mock.patch("cloudinit.url_helper.time.sleep", lambda _: None)
- @pytest.mark.parametrize(
- "v2_failure_count,v1_failure_count,expected_body,expectation",
- [
- (1, 0, json.loads(OPC_V2_METADATA), does_not_raise()),
- (2, 0, json.loads(OPC_V2_METADATA), does_not_raise()),
- (3, 0, json.loads(OPC_V1_METADATA), does_not_raise()),
- (3, 1, json.loads(OPC_V1_METADATA), does_not_raise()),
- (3, 2, json.loads(OPC_V1_METADATA), does_not_raise()),
- (3, 3, None, pytest.raises(UrlError)),
- ]
- )
- def test_retries(self, v2_failure_count, v1_failure_count,
- expected_body, expectation, httpretty):
- v2_responses = [httpretty.Response("", status=404)] * v2_failure_count
- v2_responses.append(httpretty.Response(OPC_V2_METADATA))
- v1_responses = [httpretty.Response("", status=404)] * v1_failure_count
- v1_responses.append(httpretty.Response(OPC_V1_METADATA))
-
- httpretty.register_uri(
- httpretty.GET,
- "http://169.254.169.254/opc/v1/instance/",
- responses=v1_responses,
- )
- httpretty.register_uri(
- httpretty.GET,
- "http://169.254.169.254/opc/v2/instance/",
- responses=v2_responses,
- )
- with expectation:
- assert expected_body == oracle.read_opc_metadata().instance_data
-
-
-class TestCommon_GetDataBehaviour:
- """This test class tests behaviour common to iSCSI and non-iSCSI root.
-
- It defines a fixture, parameterized_oracle_ds, which is used in all the
- tests herein to test that the commonly expected behaviour is the same with
- iSCSI root and without.
-
- (As non-iSCSI root behaviour is a superset of iSCSI root behaviour this
- class is implicitly also testing all iSCSI root behaviour so there is no
- separate class for that case.)
- """
-
- @pytest.yield_fixture(params=[True, False])
- def parameterized_oracle_ds(self, request, oracle_ds):
- """oracle_ds parameterized for iSCSI and non-iSCSI root respectively"""
- is_iscsi_root = request.param
- with ExitStack() as stack:
- stack.enter_context(
- mock.patch(
- DS_PATH + "._is_iscsi_root", return_value=is_iscsi_root
- )
- )
- if not is_iscsi_root:
- stack.enter_context(
- mock.patch(DS_PATH + ".net.find_fallback_nic")
- )
- stack.enter_context(
- mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4")
- )
- yield oracle_ds
-
- @mock.patch(
- DS_PATH + "._is_platform_viable", mock.Mock(return_value=False)
- )
- def test_false_if_platform_not_viable(
- self, parameterized_oracle_ds,
- ):
- assert not parameterized_oracle_ds._get_data()
-
- @pytest.mark.parametrize(
- "keyname,expected_value",
- (
- ("availability-zone", "phx-ad-3"),
- ("launch-index", 0),
- ("local-hostname", "instance-20200320-1400"),
- (
- "instance-id",
- "ocid1.instance.oc1.phx"
- ".anyhqljtniwq6syc3nex55sep5w34qbwmw6TRUNCATED",
- ),
- ("name", "instance-20200320-1400"),
- (
- "public_keys",
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ truncated",
- ),
- ),
- )
- def test_metadata_keys_set_correctly(
- self, keyname, expected_value, parameterized_oracle_ds,
- ):
- assert parameterized_oracle_ds._get_data()
- assert expected_value == parameterized_oracle_ds.metadata[keyname]
-
- @pytest.mark.parametrize(
- "attribute_name,expected_value",
- [
- ("_crawled_metadata", json.loads(OPC_V2_METADATA)),
- (
- "userdata_raw",
- base64.b64decode(b"IyEvYmluL3NoCnRvdWNoIC90bXAvZm9v"),
- ),
- ("system_uuid", "my-test-uuid"),
- ],
- )
- @mock.patch(
- DS_PATH + "._read_system_uuid", mock.Mock(return_value="my-test-uuid")
- )
- def test_attributes_set_correctly(
- self, attribute_name, expected_value, parameterized_oracle_ds,
- ):
- assert parameterized_oracle_ds._get_data()
- assert expected_value == getattr(
- parameterized_oracle_ds, attribute_name
- )
-
- @pytest.mark.parametrize(
- "ssh_keys,expected_value",
- [
- # No SSH keys in metadata => no keys detected
- (None, []),
- # Empty SSH keys in metadata => no keys detected
- ("", []),
- # Single SSH key in metadata => single key detected
- ("ssh-rsa ... test@test", ["ssh-rsa ... test@test"]),
- # Multiple SSH keys in metadata => multiple keys detected
- (
- "ssh-rsa ... test@test\nssh-rsa ... test2@test2",
- ["ssh-rsa ... test@test", "ssh-rsa ... test2@test2"],
- ),
- ],
- )
- def test_public_keys_handled_correctly(
- self, ssh_keys, expected_value, parameterized_oracle_ds
- ):
- instance_data = json.loads(OPC_V1_METADATA)
- if ssh_keys is None:
- del instance_data["metadata"]["ssh_authorized_keys"]
- else:
- instance_data["metadata"]["ssh_authorized_keys"] = ssh_keys
- metadata = OpcMetadata(None, instance_data, None)
- with mock.patch(
- DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
- ):
- assert parameterized_oracle_ds._get_data()
- assert (
- expected_value == parameterized_oracle_ds.get_public_ssh_keys()
- )
-
- def test_missing_user_data_handled_gracefully(
- self, parameterized_oracle_ds
- ):
- instance_data = json.loads(OPC_V1_METADATA)
- del instance_data["metadata"]["user_data"]
- metadata = OpcMetadata(None, instance_data, None)
- with mock.patch(
- DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
- ):
- assert parameterized_oracle_ds._get_data()
-
- assert parameterized_oracle_ds.userdata_raw is None
-
- def test_missing_metadata_handled_gracefully(
- self, parameterized_oracle_ds
- ):
- instance_data = json.loads(OPC_V1_METADATA)
- del instance_data["metadata"]
- metadata = OpcMetadata(None, instance_data, None)
- with mock.patch(
- DS_PATH + ".read_opc_metadata", mock.Mock(return_value=metadata),
- ):
- assert parameterized_oracle_ds._get_data()
-
- assert parameterized_oracle_ds.userdata_raw is None
- assert [] == parameterized_oracle_ds.get_public_ssh_keys()
-
-
-@mock.patch(DS_PATH + "._is_iscsi_root", lambda: False)
-class TestNonIscsiRoot_GetDataBehaviour:
- @mock.patch(DS_PATH + ".dhcp.EphemeralDHCPv4")
- @mock.patch(DS_PATH + ".net.find_fallback_nic")
- def test_read_opc_metadata_called_with_ephemeral_dhcp(
- self, m_find_fallback_nic, m_EphemeralDHCPv4, oracle_ds
- ):
- in_context_manager = False
-
- def enter_context_manager():
- nonlocal in_context_manager
- in_context_manager = True
-
- def exit_context_manager(*args):
- nonlocal in_context_manager
- in_context_manager = False
-
- m_EphemeralDHCPv4.return_value.__enter__.side_effect = (
- enter_context_manager
- )
- m_EphemeralDHCPv4.return_value.__exit__.side_effect = (
- exit_context_manager
- )
-
- def assert_in_context_manager(**kwargs):
- assert in_context_manager
- return mock.MagicMock()
-
- with mock.patch(
- DS_PATH + ".read_opc_metadata",
- mock.Mock(side_effect=assert_in_context_manager),
- ):
- assert oracle_ds._get_data()
-
- assert [
- mock.call(m_find_fallback_nic.return_value)
- ] == m_EphemeralDHCPv4.call_args_list
-
-
-@mock.patch(DS_PATH + ".get_interfaces_by_mac", lambda: {})
-@mock.patch(DS_PATH + ".cmdline.read_initramfs_config")
-class TestNetworkConfig:
- def test_network_config_cached(self, m_read_initramfs_config, oracle_ds):
- """.network_config should be cached"""
- assert 0 == m_read_initramfs_config.call_count
- oracle_ds.network_config # pylint: disable=pointless-statement
- assert 1 == m_read_initramfs_config.call_count
- oracle_ds.network_config # pylint: disable=pointless-statement
- assert 1 == m_read_initramfs_config.call_count
-
- def test_network_cmdline(self, m_read_initramfs_config, oracle_ds):
- """network_config should prefer initramfs config over fallback"""
- ncfg = {"version": 1, "config": [{"a": "b"}]}
- m_read_initramfs_config.return_value = copy.deepcopy(ncfg)
-
- assert ncfg == oracle_ds.network_config
- assert 0 == oracle_ds.distro.generate_fallback_config.call_count
-
- def test_network_fallback(self, m_read_initramfs_config, oracle_ds):
- """network_config should prefer initramfs config over fallback"""
- ncfg = {"version": 1, "config": [{"a": "b"}]}
-
- m_read_initramfs_config.return_value = None
- oracle_ds.distro.generate_fallback_config.return_value = copy.deepcopy(
- ncfg
- )
-
- assert ncfg == oracle_ds.network_config
-
- @pytest.mark.parametrize(
- "configure_secondary_nics,expect_secondary_nics",
- [(True, True), (False, False), (None, False)],
- )
- def test_secondary_nic_addition(
- self,
- m_read_initramfs_config,
- configure_secondary_nics,
- expect_secondary_nics,
- oracle_ds,
- ):
- """Test that _add_network_config_from_opc_imds is called as expected
-
- (configure_secondary_nics=None is used to test the default behaviour.)
- """
- m_read_initramfs_config.return_value = {"version": 1, "config": []}
-
- if configure_secondary_nics is not None:
- oracle_ds.ds_cfg[
- "configure_secondary_nics"
- ] = configure_secondary_nics
-
- def side_effect(self):
- self._network_config["secondary_added"] = mock.sentinel.needle
-
- oracle_ds._vnics_data = 'DummyData'
- with mock.patch.object(
- oracle.DataSourceOracle, "_add_network_config_from_opc_imds",
- new=side_effect,
- ):
- was_secondary_added = "secondary_added" in oracle_ds.network_config
- assert expect_secondary_nics == was_secondary_added
-
- def test_secondary_nic_failure_isnt_blocking(
- self,
- m_read_initramfs_config,
- caplog,
- oracle_ds,
- ):
- oracle_ds.ds_cfg["configure_secondary_nics"] = True
- oracle_ds._vnics_data = "DummyData"
-
- with mock.patch.object(
- oracle.DataSourceOracle, "_add_network_config_from_opc_imds",
- side_effect=Exception()
- ):
- network_config = oracle_ds.network_config
- assert network_config == m_read_initramfs_config.return_value
- assert "Failed to parse secondary network configuration" in caplog.text
-
- def test_ds_network_cfg_preferred_over_initramfs(self, _m):
- """Ensure that DS net config is preferred over initramfs config"""
- config_sources = oracle.DataSourceOracle.network_config_sources
- ds_idx = config_sources.index(NetworkConfigSource.ds)
- initramfs_idx = config_sources.index(NetworkConfigSource.initramfs)
- assert ds_idx < initramfs_idx
-
-
-# vi: ts=4 expandtab