From 0497c7b1f752c7011006b36f9c07ac141c0bb3c2 Mon Sep 17 00:00:00 2001 From: Antti Myyrä Date: Mon, 8 Feb 2021 17:24:36 +0200 Subject: Datasource for UpCloud (#743) New datasource utilizing UpCloud metadata API, including relevant unit tests and documentation. --- doc/rtd/topics/availability.rst | 1 + 1 file changed, 1 insertion(+) (limited to 'doc/rtd/topics/availability.rst') diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index 8f56a7d2..f58b2b38 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -55,6 +55,7 @@ environments in the public cloud: - CloudStack - AltCloud - SmartOS +- UpCloud Additionally, cloud-init is supported on these private clouds: -- cgit v1.2.3 From 0ae0b1d4336acdcab12bd49e9bddb46922fb19c7 Mon Sep 17 00:00:00 2001 From: David Dymko Date: Tue, 13 Apr 2021 14:15:34 -0400 Subject: Add Vultr support (#827) This PR adds in support so that cloud-init can run on instances deployed on Vultr cloud. This was originally brought up in #628. Co-authored-by: Eric Benner --- README.md | 2 +- cloudinit/apport.py | 1 + cloudinit/settings.py | 1 + cloudinit/sources/DataSourceVultr.py | 147 +++++++++++ cloudinit/sources/helpers/vultr.py | 242 +++++++++++++++++ doc/rtd/topics/availability.rst | 1 + doc/rtd/topics/datasources.rst | 2 +- doc/rtd/topics/datasources/vultr.rst | 35 +++ doc/rtd/topics/network-config.rst | 5 + tests/unittests/test_datasource/test_common.py | 2 + tests/unittests/test_datasource/test_vultr.py | 343 +++++++++++++++++++++++++ tools/.github-cla-signers | 1 + tools/ds-identify | 16 +- 13 files changed, 795 insertions(+), 3 deletions(-) create mode 100644 cloudinit/sources/DataSourceVultr.py create mode 100644 cloudinit/sources/helpers/vultr.py create mode 100644 doc/rtd/topics/datasources/vultr.rst create mode 100644 tests/unittests/test_datasource/test_vultr.py (limited to 'doc/rtd/topics/availability.rst') diff --git a/README.md b/README.md index 435405da..aa6d84ae 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| ## To start developing cloud-init diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 25f254e3..aadc638f 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -41,6 +41,7 @@ KNOWN_CLOUD_NAMES = [ 'SmartOS', 'UpCloud', 'VMware', + 'Vultr', 'ZStack', 'Other' ] diff --git a/cloudinit/settings.py b/cloudinit/settings.py index 91e1bfe7..23e4c0ad 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -30,6 +30,7 @@ CFG_BUILTIN = { 'GCE', 'OpenStack', 'AliYun', + 'Vultr', 'Ec2', 'CloudSigma', 'CloudStack', diff --git a/cloudinit/sources/DataSourceVultr.py b/cloudinit/sources/DataSourceVultr.py new file mode 100644 index 00000000..c08ff848 --- /dev/null +++ b/cloudinit/sources/DataSourceVultr.py @@ -0,0 +1,147 @@ +# Author: Eric Benner +# +# This file is part of cloud-init. See LICENSE file for license information. + +# Vultr Metadata API: +# https://www.vultr.com/metadata/ + +from cloudinit import log as log +from cloudinit import sources +from cloudinit import util + +import cloudinit.sources.helpers.vultr as vultr + +LOG = log.getLogger(__name__) +BUILTIN_DS_CONFIG = { + 'url': 'http://169.254.169.254', + 'retries': 30, + 'timeout': 2, + 'wait': 2 +} + + +class DataSourceVultr(sources.DataSource): + + dsname = 'Vultr' + + def __init__(self, sys_cfg, distro, paths): + super(DataSourceVultr, self).__init__(sys_cfg, distro, paths) + self.ds_cfg = util.mergemanydict([ + util.get_cfg_by_path(sys_cfg, ["datasource", "Vultr"], {}), + BUILTIN_DS_CONFIG]) + + # Initiate data and check if Vultr + def _get_data(self): + LOG.debug("Detecting if machine is a Vultr instance") + if not vultr.is_vultr(): + LOG.debug("Machine is not a Vultr instance") + return False + + LOG.debug("Machine is a Vultr instance") + + # Fetch metadata + md = self.get_metadata() + + self.metadata_full = md + self.metadata['instanceid'] = md['instanceid'] + self.metadata['local-hostname'] = md['hostname'] + self.metadata['public-keys'] = md["public-keys"] + self.userdata_raw = md["user-data"] + + # Generate config and process data + self.get_datasource_data(md) + + # Dump some data so diagnosing failures is manageable + LOG.debug("Vultr Vendor Config:") + LOG.debug(md['vendor-data']['config']) + LOG.debug("SUBID: %s", self.metadata['instanceid']) + LOG.debug("Hostname: %s", self.metadata['local-hostname']) + if self.userdata_raw is not None: + LOG.debug("User-Data:") + LOG.debug(self.userdata_raw) + + return True + + # Process metadata + def get_datasource_data(self, md): + # Grab config + config = md['vendor-data']['config'] + + # Generate network config + self.netcfg = vultr.generate_network_config(md['interfaces']) + + # This requires info generated in the vendor config + user_scripts = vultr.generate_user_scripts(md, self.netcfg['config']) + + # Default hostname is "guest" for whitelabel + if self.metadata['local-hostname'] == "": + self.metadata['local-hostname'] = "guest" + + self.userdata_raw = md["user-data"] + if self.userdata_raw == "": + self.userdata_raw = None + + # Assemble vendor-data + # This adds provided scripts and the config + self.vendordata_raw = [] + self.vendordata_raw.extend(user_scripts) + self.vendordata_raw.append("#cloud-config\n%s" % config) + + # Get the metadata by flag + def get_metadata(self): + return vultr.get_metadata(self.ds_cfg['url'], + self.ds_cfg['timeout'], + self.ds_cfg['retries'], + self.ds_cfg['wait']) + + # Compare subid as instance id + def check_instance_id(self, sys_cfg): + if not vultr.is_vultr(): + return False + + # Baremetal has no way to implement this in local + if vultr.is_baremetal(): + return False + + subid = vultr.get_sysinfo()['subid'] + return sources.instance_id_matches_system_uuid(subid) + + # Currently unsupported + @property + def launch_index(self): + return None + + @property + def network_config(self): + return self.netcfg + + +# Used to match classes to dependencies +datasources = [ + (DataSourceVultr, (sources.DEP_FILESYSTEM, )), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + + +if __name__ == "__main__": + import sys + + if not vultr.is_vultr(): + print("Machine is not a Vultr instance") + sys.exit(1) + + md = vultr.get_metadata(BUILTIN_DS_CONFIG['url'], + BUILTIN_DS_CONFIG['timeout'], + BUILTIN_DS_CONFIG['retries'], + BUILTIN_DS_CONFIG['wait']) + config = md['vendor-data']['config'] + sysinfo = vultr.get_sysinfo() + + print(util.json_dumps(sysinfo)) + print(config) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/vultr.py b/cloudinit/sources/helpers/vultr.py new file mode 100644 index 00000000..c22cd0b1 --- /dev/null +++ b/cloudinit/sources/helpers/vultr.py @@ -0,0 +1,242 @@ +# Author: Eric Benner +# +# This file is part of cloud-init. See LICENSE file for license information. + +import json + +from cloudinit import log as log +from cloudinit import url_helper +from cloudinit import dmi +from cloudinit import util +from cloudinit import net +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError +from functools import lru_cache + +# Get LOG +LOG = log.getLogger(__name__) + + +@lru_cache() +def get_metadata(url, timeout, retries, sec_between): + # Bring up interface + try: + with EphemeralDHCPv4(connectivity_url=url): + # Fetch the metadata + v1 = read_metadata(url, timeout, retries, sec_between) + except (NoDHCPLeaseError) as exc: + LOG.error("Bailing, DHCP Exception: %s", exc) + raise + + v1_json = json.loads(v1) + metadata = v1_json + + return metadata + + +# Read the system information from SMBIOS +def get_sysinfo(): + return { + 'manufacturer': dmi.read_dmi_data("system-manufacturer"), + 'subid': dmi.read_dmi_data("system-serial-number") + } + + +# Assumes is Vultr is already checked +def is_baremetal(): + if get_sysinfo()['manufacturer'] != "Vultr": + return True + return False + + +# Confirm is Vultr +def is_vultr(): + # VC2, VDC, and HFC use DMI + sysinfo = get_sysinfo() + + if sysinfo['manufacturer'] == "Vultr": + return True + + # Baremetal requires a kernel parameter + if "vultr" in util.get_cmdline().split(): + return True + + return False + + +# Read Metadata endpoint +def read_metadata(url, timeout, retries, sec_between): + url = "%s/v1.json" % url + response = url_helper.readurl(url, + timeout=timeout, + retries=retries, + headers={'Metadata-Token': 'vultr'}, + sec_between=sec_between) + + if not response.ok(): + raise RuntimeError("Failed to connect to %s: Code: %s" % + url, response.code) + + return response.contents.decode() + + +# Wrapped for caching +@lru_cache() +def get_interface_map(): + return net.get_interfaces_by_mac() + + +# Convert macs to nics +def get_interface_name(mac): + macs_to_nic = get_interface_map() + + if mac not in macs_to_nic: + return None + + return macs_to_nic.get(mac) + + +# Generate network configs +def generate_network_config(interfaces): + network = { + "version": 1, + "config": [ + { + "type": "nameserver", + "address": [ + "108.61.10.10" + ] + } + ] + } + + # Prepare interface 0, public + if len(interfaces) > 0: + public = generate_public_network_interface(interfaces[0]) + network['config'].append(public) + + # Prepare interface 1, private + if len(interfaces) > 1: + private = generate_private_network_interface(interfaces[1]) + network['config'].append(private) + + return network + + +# Input Metadata and generate public network config part +def generate_public_network_interface(interface): + interface_name = get_interface_name(interface['mac']) + if not interface_name: + raise RuntimeError( + "Interface: %s could not be found on the system" % + interface['mac']) + + netcfg = { + "name": interface_name, + "type": "physical", + "mac_address": interface['mac'], + "accept-ra": 1, + "subnets": [ + { + "type": "dhcp", + "control": "auto" + }, + { + "type": "dhcp6", + "control": "auto" + }, + ] + } + + # Check for additional IP's + additional_count = len(interface['ipv4']['additional']) + if "ipv4" in interface and additional_count > 0: + for additional in interface['ipv4']['additional']: + add = { + "type": "static", + "control": "auto", + "address": additional['address'], + "netmask": additional['netmask'] + } + netcfg['subnets'].append(add) + + # Check for additional IPv6's + additional_count = len(interface['ipv6']['additional']) + if "ipv6" in interface and additional_count > 0: + for additional in interface['ipv6']['additional']: + add = { + "type": "static6", + "control": "auto", + "address": additional['address'], + "netmask": additional['netmask'] + } + netcfg['subnets'].append(add) + + # Add config to template + return netcfg + + +# Input Metadata and generate private network config part +def generate_private_network_interface(interface): + interface_name = get_interface_name(interface['mac']) + if not interface_name: + raise RuntimeError( + "Interface: %s could not be found on the system" % + interface['mac']) + + netcfg = { + "name": interface_name, + "type": "physical", + "mac_address": interface['mac'], + "accept-ra": 1, + "subnets": [ + { + "type": "static", + "control": "auto", + "address": interface['ipv4']['address'], + "netmask": interface['ipv4']['netmask'] + } + ] + } + + return netcfg + + +# This is for the vendor and startup scripts +def generate_user_scripts(md, network_config): + user_scripts = [] + + # Raid 1 script + if md['vendor-data']['raid1-script']: + user_scripts.append(md['vendor-data']['raid1-script']) + + # Enable multi-queue on linux + if util.is_Linux() and md['vendor-data']['ethtool-script']: + ethtool_script = md['vendor-data']['ethtool-script'] + + # Tool location + tool = "/opt/vultr/ethtool" + + # Go through the interfaces + for netcfg in network_config: + # If the interface has a mac and is physical + if "mac_address" in netcfg and netcfg['type'] == "physical": + # Set its multi-queue to num of cores as per RHEL Docs + name = netcfg['name'] + command = "%s -L %s combined $(nproc --all)" % (tool, name) + ethtool_script = '%s\n%s' % (ethtool_script, command) + + user_scripts.append(ethtool_script) + + # This is for vendor scripts + if md['vendor-data']['vendor-script']: + user_scripts.append(md['vendor-data']['vendor-script']) + + # Startup script + script = md['startup-script'] + if script and script != "echo No configured startup script": + user_scripts.append(script) + + return user_scripts + + +# vi: ts=4 expandtab diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index f58b2b38..f3e13edc 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -56,6 +56,7 @@ environments in the public cloud: - AltCloud - SmartOS - UpCloud +- Vultr Additionally, cloud-init is supported on these private clouds: diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 228173d2..497b1467 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -49,7 +49,7 @@ The following is a list of documents for each supported datasource: datasources/smartos.rst datasources/upcloud.rst datasources/zstack.rst - + datasources/vultr.rst Creation ======== diff --git a/doc/rtd/topics/datasources/vultr.rst b/doc/rtd/topics/datasources/vultr.rst new file mode 100644 index 00000000..e73406a8 --- /dev/null +++ b/doc/rtd/topics/datasources/vultr.rst @@ -0,0 +1,35 @@ +.. _datasource_vultr: + +Vultr +===== + +The `Vultr`_ datasource retrieves basic configuration values from the locally +accessible `metadata service`_. All data is served over HTTP from the address +169.254.169.254. The endpoints are documented in, +`https://www.vultr.com/metadata/ +`_ + +Configuration +------------- + +Vultr's datasource can be configured as follows: + + datasource: + Vultr: + url: 'http://169.254.169.254' + retries: 3 + timeout: 2 + wait: 2 + +- *url*: The URL used to aquire the metadata configuration from +- *retries*: Determines the number of times to attempt to connect to the + metadata service +- *timeout*: Determines the timeout in seconds to wait for a response from the + metadata service +- *wait*: Determines the timeout in seconds to wait before retrying after + accessible failure + +.. _Vultr: https://www.vultr.com/ +.. _metadata service: https://www.vultr.com/metadata/ + +.. vi: textwidth=78 diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst index 07cad765..5f7a74f8 100644 --- a/doc/rtd/topics/network-config.rst +++ b/doc/rtd/topics/network-config.rst @@ -148,6 +148,10 @@ The following Datasources optionally provide network configuration: - `UpCloud JSON metadata`_ +- :ref:`datasource_vultr` + + - `Vultr JSON metadata`_ + For more information on network configuration formats .. toctree:: @@ -262,5 +266,6 @@ Example output converting V2 to sysconfig: .. _OpenStack Metadata Service Network: https://specs.openstack.org/openstack/nova-specs/specs/liberty/implemented/metadata-service-network-info.html .. _SmartOS JSON Metadata: https://eng.joyent.com/mdata/datadict.html .. _UpCloud JSON metadata: https://developers.upcloud.com/1.3/8-servers/#metadata-service +.. _Vultr JSON metadata: https://www.vultr.com/metadata/ .. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 5912f7ee..5e9c547a 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -28,6 +28,7 @@ from cloudinit.sources import ( DataSourceScaleway as Scaleway, DataSourceSmartOS as SmartOS, DataSourceUpCloud as UpCloud, + DataSourceVultr as Vultr, ) from cloudinit.sources import DataSourceNone as DSNone @@ -45,6 +46,7 @@ DEFAULT_LOCAL = [ Oracle.DataSourceOracle, OVF.DataSourceOVF, SmartOS.DataSourceSmartOS, + Vultr.DataSourceVultr, Ec2.DataSourceEc2Local, OpenStack.DataSourceOpenStackLocal, RbxCloud.DataSourceRbxCloud, diff --git a/tests/unittests/test_datasource/test_vultr.py b/tests/unittests/test_datasource/test_vultr.py new file mode 100644 index 00000000..bbea2aa3 --- /dev/null +++ b/tests/unittests/test_datasource/test_vultr.py @@ -0,0 +1,343 @@ +# Author: Eric Benner +# +# This file is part of cloud-init. See LICENSE file for license information. + +# Vultr Metadata API: +# https://www.vultr.com/metadata/ + +import json + +from cloudinit import helpers +from cloudinit import settings +from cloudinit.sources import DataSourceVultr +from cloudinit.sources.helpers import vultr + +from cloudinit.tests.helpers import mock, CiTestCase + +# Vultr metadata test data +VULTR_V1_1 = { + 'bgp': { + 'ipv4': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + }, + 'ipv6': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + } + }, + 'hostname': 'CLOUDINIT_1', + 'instanceid': '42506325', + 'interfaces': [ + { + 'ipv4': { + 'additional': [ + ], + 'address': '108.61.89.242', + 'gateway': '108.61.89.1', + 'netmask': '255.255.255.0' + }, + 'ipv6': { + 'additional': [ + ], + 'address': '2001:19f0:5:56c2:5400:03ff:fe15:c465', + 'network': '2001:19f0:5:56c2::', + 'prefix': '64' + }, + 'mac': '56:00:03:15:c4:65', + 'network-type': 'public' + } + ], + 'public-keys': [ + 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' + ], + 'region': { + 'regioncode': 'EWR' + }, + 'user-defined': [ + ], + 'startup-script': 'echo No configured startup script', + 'raid1-script': '', + 'user-data': [ + ], + 'vendor-data': { + 'vendor-script': '', + 'ethtool-script': '', + 'config': { + 'package_upgrade': 'true', + 'disable_root': 0, + 'ssh_pwauth': 1, + 'chpasswd': { + 'expire': False, + 'list': [ + 'root:$6$S2Smuj.../VqxmIR9Urw0jPZ88i4yvB/' + ] + }, + 'system_info': { + 'default_user': { + 'name': 'root' + } + } + } + } +} + +VULTR_V1_2 = { + 'bgp': { + 'ipv4': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + }, + 'ipv6': { + 'my-address': '', + 'my-asn': '', + 'peer-address': '', + 'peer-asn': '' + } + }, + 'hostname': 'CLOUDINIT_2', + 'instance-v2-id': '29bea708-2e6e-480a-90ad-0e6b5d5ad62f', + 'instanceid': '42872224', + 'interfaces': [ + { + 'ipv4': { + 'additional': [ + ], + 'address':'45.76.7.171', + 'gateway':'45.76.6.1', + 'netmask':'255.255.254.0' + }, + 'ipv6':{ + 'additional': [ + ], + 'address':'2001:19f0:5:28a7:5400:03ff:fe1b:4eca', + 'network':'2001:19f0:5:28a7::', + 'prefix':'64' + }, + 'mac':'56:00:03:1b:4e:ca', + 'network-type':'public' + }, + { + 'ipv4': { + 'additional': [ + ], + 'address':'10.1.112.3', + 'gateway':'', + 'netmask':'255.255.240.0' + }, + 'ipv6':{ + 'additional': [ + ], + 'network':'', + 'prefix':'' + }, + 'mac':'5a:00:03:1b:4e:ca', + 'network-type':'private', + 'network-v2-id':'fbbe2b5b-b986-4396-87f5-7246660ccb64', + 'networkid':'net5e7155329d730' + } + ], + 'public-keys': [ + 'ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key' + ], + 'region': { + 'regioncode': 'EWR' + }, + 'user-defined': [ + ], + 'startup-script': 'echo No configured startup script', + 'user-data': [ + ], + + 'vendor-data': { + 'vendor-script': '', + 'ethtool-script': '', + 'raid1-script': '', + 'config': { + 'package_upgrade': 'true', + 'disable_root': 0, + 'ssh_pwauth': 1, + 'chpasswd': { + 'expire': False, + 'list': [ + 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' + ] + }, + 'system_info': { + 'default_user': { + 'name': 'root' + } + } + } + } +} + +SSH_KEYS_1 = [ + "ssh-rsa AAAAB3NzaC1y...IQQhv5PAOKaIl+mM3c= test3@key" +] + +# Expected generated objects + +# Expected config +EXPECTED_VULTR_CONFIG = { + 'package_upgrade': 'true', + 'disable_root': 0, + 'ssh_pwauth': 1, + 'chpasswd': { + 'expire': False, + 'list': [ + 'root:$6$SxXx...k2mJNIzZB5vMCDBlYT1' + ] + }, + 'system_info': { + 'default_user': { + 'name': 'root' + } + } +} + +# Expected network config object from generator +EXPECTED_VULTR_NETWORK_1 = { + 'version': 1, + 'config': [ + { + 'type': 'nameserver', + 'address': ['108.61.10.10'] + }, + { + 'name': 'eth0', + 'type': 'physical', + 'mac_address': '56:00:03:15:c4:65', + 'accept-ra': 1, + 'subnets': [ + {'type': 'dhcp', 'control': 'auto'}, + {'type': 'dhcp6', 'control': 'auto'} + ], + } + ] +} + +EXPECTED_VULTR_NETWORK_2 = { + 'version': 1, + 'config': [ + { + 'type': 'nameserver', + 'address': ['108.61.10.10'] + }, + { + 'name': 'eth0', + 'type': 'physical', + 'mac_address': '56:00:03:1b:4e:ca', + 'accept-ra': 1, + 'subnets': [ + {'type': 'dhcp', 'control': 'auto'}, + {'type': 'dhcp6', 'control': 'auto'} + ], + }, + { + 'name': 'eth1', + 'type': 'physical', + 'mac_address': '5a:00:03:1b:4e:ca', + 'accept-ra': 1, + 'subnets': [ + { + "type": "static", + "control": "auto", + "address": "10.1.112.3", + "netmask": "255.255.240.0" + } + ], + } + ] +} + + +INTERFACE_MAP = { + '56:00:03:15:c4:65': 'eth0', + '56:00:03:1b:4e:ca': 'eth0', + '5a:00:03:1b:4e:ca': 'eth1' +} + + +class TestDataSourceVultr(CiTestCase): + def setUp(self): + super(TestDataSourceVultr, self).setUp() + + # Stored as a dict to make it easier to maintain + raw1 = json.dumps(VULTR_V1_1['vendor-data']['config']) + raw2 = json.dumps(VULTR_V1_2['vendor-data']['config']) + + # Make expected format + VULTR_V1_1['vendor-data']['config'] = raw1 + VULTR_V1_2['vendor-data']['config'] = raw2 + + self.tmp = self.tmp_dir() + + # Test the datasource itself + @mock.patch('cloudinit.net.get_interfaces_by_mac') + @mock.patch('cloudinit.sources.helpers.vultr.is_vultr') + @mock.patch('cloudinit.sources.helpers.vultr.get_metadata') + def test_datasource(self, + mock_getmeta, + mock_isvultr, + mock_netmap): + mock_getmeta.return_value = VULTR_V1_2 + mock_isvultr.return_value = True + mock_netmap.return_value = INTERFACE_MAP + + source = DataSourceVultr.DataSourceVultr( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + + # Test for failure + self.assertEqual(True, source._get_data()) + + # Test instance id + self.assertEqual("42872224", source.metadata['instanceid']) + + # Test hostname + self.assertEqual("CLOUDINIT_2", source.metadata['local-hostname']) + + # Test ssh keys + self.assertEqual(SSH_KEYS_1, source.metadata['public-keys']) + + # Test vendor data generation + orig_val = self.maxDiff + self.maxDiff = None + + vendordata = source.vendordata_raw + + # Test vendor config + self.assertEqual( + EXPECTED_VULTR_CONFIG, + json.loads(vendordata[0].replace("#cloud-config", ""))) + + self.maxDiff = orig_val + + # Test network config generation + self.assertEqual(EXPECTED_VULTR_NETWORK_2, source.network_config) + + # Test network config generation + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_network_config(self, mock_netmap): + mock_netmap.return_value = INTERFACE_MAP + interf = VULTR_V1_1['interfaces'] + + self.assertEqual(EXPECTED_VULTR_NETWORK_1, + vultr.generate_network_config(interf)) + + # Test Private Networking config generation + @mock.patch('cloudinit.net.get_interfaces_by_mac') + def test_private_network_config(self, mock_netmap): + mock_netmap.return_value = INTERFACE_MAP + interf = VULTR_V1_2['interfaces'] + + self.assertEqual(EXPECTED_VULTR_NETWORK_2, + vultr.generate_network_config(interf)) + +# vi: ts=4 expandtab diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index b39f4198..d6212d1d 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -11,6 +11,7 @@ BirknerAlex candlerb cawamata dankenigsberg +ddymko dermotbradley dhensby eandersson diff --git a/tools/ds-identify b/tools/ds-identify index 2f2486f7..73e27c71 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -124,7 +124,7 @@ DI_DSNAME="" # this has to match the builtin list in cloud-init, it is what will # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ -CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ +CloudSigma CloudStack DigitalOcean Vultr AliYun Ec2 GCE OpenNebula OpenStack \ OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud" DI_DSLIST="" DI_MODE="" @@ -1350,6 +1350,20 @@ dscheck_IBMCloud() { return ${DS_NOT_FOUND} } +dscheck_Vultr() { + dmi_sys_vendor_is Vultr && return $DS_FOUND + + case " $DI_KERNEL_CMDLINE " in + *\ vultr\ *) return $DS_FOUND ;; + esac + + if [ -f "${PATH_ROOT}/etc/vultr" ]; then + return $DS_FOUND + fi + + return $DS_NOT_FOUND +} + collect_info() { read_uname_info read_virt -- cgit v1.2.3 From 59a848c5929cbfca45d95860eb60dfebd0786c94 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Mon, 14 Jun 2021 15:39:05 -0400 Subject: add DragonFlyBSD support (#904) - Mostly based on FreeBSD, the main exception is that `find_devs_with_on_freebsd` does not work. - Since we cannot get the CDROM or the partition labels, `find_devs_with_on_dragonflybsd()` has a more naive approach and returns all the block devices. --- README.md | 2 +- cloudinit/config/cc_growpart.py | 4 ++ cloudinit/config/cc_resizefs.py | 5 +++ cloudinit/distros/dragonflybsd.py | 12 ++++++ cloudinit/distros/freebsd.py | 15 +++++++- cloudinit/net/__init__.py | 6 +-- cloudinit/net/freebsd.py | 11 +++++- cloudinit/util.py | 46 ++++++++++++++++++++++- config/cloud.cfg.tmpl | 22 +++++++++-- doc/rtd/topics/availability.rst | 5 ++- setup.py | 2 +- sysvinit/freebsd/cloudinit | 2 +- tests/unittests/test_distros/test_dragonflybsd.py | 25 ++++++++++++ tests/unittests/test_util.py | 18 +++++++++ 14 files changed, 157 insertions(+), 18 deletions(-) create mode 100644 cloudinit/distros/dragonflybsd.py create mode 100644 tests/unittests/test_distros/test_dragonflybsd.py (limited to 'doc/rtd/topics/availability.rst') diff --git a/README.md b/README.md index c382b592..02b2f666 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| ## To start developing cloud-init diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 9f338ad1..9f5525a1 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -224,6 +224,10 @@ def device_part_info(devpath): freebsd_part = "/dev/" + util.find_freebsd_part(devpath) m = re.search('^(/dev/.+)p([0-9])$', freebsd_part) return (m.group(1), m.group(2)) + elif util.is_DragonFlyBSD(): + dragonflybsd_part = "/dev/" + util.find_dragonflybsd_part(devpath) + m = re.search('^(/dev/.+)s([0-9])$', dragonflybsd_part) + return (m.group(1), m.group(2)) if not os.path.exists(syspath): raise ValueError("%s had no syspath (%s)" % (devpath, syspath)) diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 9afbb847..990a6939 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -85,6 +85,10 @@ def _resize_zfs(mount_point, devpth): return ('zpool', 'online', '-e', mount_point, devpth) +def _resize_hammer2(mount_point, devpth): + return ('hammer2', 'growfs', mount_point) + + def _can_skip_resize_ufs(mount_point, devpth): # possible errors cases on the code-path to growfs -N following: # https://github.com/freebsd/freebsd/blob/HEAD/sbin/growfs/growfs.c @@ -113,6 +117,7 @@ RESIZE_FS_PREFIXES_CMDS = [ ('xfs', _resize_xfs), ('ufs', _resize_ufs), ('zfs', _resize_zfs), + ('hammer2', _resize_hammer2), ] RESIZE_FS_PRECHECK_CMDS = { diff --git a/cloudinit/distros/dragonflybsd.py b/cloudinit/distros/dragonflybsd.py new file mode 100644 index 00000000..2d825518 --- /dev/null +++ b/cloudinit/distros/dragonflybsd.py @@ -0,0 +1,12 @@ +# Copyright (C) 2020-2021 Gonéri Le Bouder +# +# This file is part of cloud-init. See LICENSE file for license information. + +import cloudinit.distros.freebsd + + +class Distro(cloudinit.distros.freebsd.Distro): + home_dir = '/home' + + +# vi: ts=4 expandtab diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 9659843f..d94a52b8 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -18,6 +18,12 @@ LOG = logging.getLogger(__name__) class Distro(cloudinit.distros.bsd.BSD): + """ + Distro subclass for FreeBSD. + + (N.B. DragonFlyBSD inherits from this class.) + """ + usr_lib_exec = '/usr/local/lib' login_conf_fn = '/etc/login.conf' login_conf_fn_bak = '/etc/login.conf.orig' @@ -28,6 +34,7 @@ class Distro(cloudinit.distros.bsd.BSD): pkg_cmd_update_prefix = ["pkg", "update"] pkg_cmd_upgrade_prefix = ["pkg", "upgrade"] prefer_fqdn = True # See rc.conf(5) in FreeBSD + home_dir = '/usr/home' def _get_add_member_to_group_cmd(self, member_name, group_name): return ['pw', 'usermod', '-n', member_name, '-G', group_name] @@ -66,9 +73,12 @@ class Distro(cloudinit.distros.bsd.BSD): pw_useradd_cmd.append('-d/nonexistent') log_pw_useradd_cmd.append('-d/nonexistent') else: - pw_useradd_cmd.append('-d/usr/home/%s' % name) + pw_useradd_cmd.append('-d{home_dir}/{name}'.format( + home_dir=self.home_dir, name=name)) pw_useradd_cmd.append('-m') - log_pw_useradd_cmd.append('-d/usr/home/%s' % name) + log_pw_useradd_cmd.append('-d{home_dir}/{name}'.format( + home_dir=self.home_dir, name=name)) + log_pw_useradd_cmd.append('-m') # Run the command @@ -155,4 +165,5 @@ class Distro(cloudinit.distros.bsd.BSD): "update-sources", self.package_command, ["update"], freq=PER_INSTANCE) + # vi: ts=4 expandtab diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 6b3b84f7..b827d41a 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -351,7 +351,7 @@ def device_devid(devname): def get_devicelist(): - if util.is_FreeBSD(): + if util.is_FreeBSD() or util.is_DragonFlyBSD(): return list(get_interfaces_by_mac().values()) try: @@ -376,7 +376,7 @@ def is_disabled_cfg(cfg): def find_fallback_nic(blacklist_drivers=None): """Return the name of the 'fallback' network device.""" - if util.is_FreeBSD(): + if util.is_FreeBSD() or util.is_DragonFlyBSD(): return find_fallback_nic_on_freebsd(blacklist_drivers) elif util.is_NetBSD() or util.is_OpenBSD(): return find_fallback_nic_on_netbsd_or_openbsd(blacklist_drivers) @@ -816,7 +816,7 @@ def get_ib_interface_hwaddr(ifname, ethernet_format): def get_interfaces_by_mac(blacklist_drivers=None) -> dict: - if util.is_FreeBSD(): + if util.is_FreeBSD() or util.is_DragonFlyBSD(): return get_interfaces_by_mac_on_freebsd( blacklist_drivers=blacklist_drivers) elif util.is_NetBSD(): diff --git a/cloudinit/net/freebsd.py b/cloudinit/net/freebsd.py index c843d792..f8faf240 100644 --- a/cloudinit/net/freebsd.py +++ b/cloudinit/net/freebsd.py @@ -32,6 +32,13 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): LOG.debug("freebsd generate postcmd disabled") return + for dhcp_interface in self.dhcp_interfaces(): + # Observed on DragonFlyBSD 6. If we use the "restart" parameter, + # the routes are not recreated. + subp.subp(['service', 'dhclient', 'stop', dhcp_interface], + rcs=[0, 1], + capture=True) + subp.subp(['service', 'netif', 'restart'], capture=True) # On FreeBSD 10, the restart of routing and dhclient is likely to fail # because @@ -42,7 +49,7 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): subp.subp(['service', 'routing', 'restart'], capture=True, rcs=[0, 1]) for dhcp_interface in self.dhcp_interfaces(): - subp.subp(['service', 'dhclient', 'restart', dhcp_interface], + subp.subp(['service', 'dhclient', 'start', dhcp_interface], rcs=[0, 1], capture=True) @@ -57,4 +64,4 @@ class Renderer(cloudinit.net.bsd.BSDRenderer): def available(target=None): - return util.is_FreeBSD() + return util.is_FreeBSD() or util.is_DragonFlyBSD() diff --git a/cloudinit/util.py b/cloudinit/util.py index 2de1123e..f95dc435 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -392,7 +392,11 @@ def is_Linux(): @lru_cache() def is_BSD(): - return 'BSD' in platform.system() + if 'BSD' in platform.system(): + return True + if platform.system() == 'DragonFly': + return True + return False @lru_cache() @@ -400,6 +404,11 @@ def is_FreeBSD(): return system_info()['variant'] == "freebsd" +@lru_cache() +def is_DragonFlyBSD(): + return system_info()['variant'] == "dragonfly" + + @lru_cache() def is_NetBSD(): return system_info()['variant'] == "netbsd" @@ -534,7 +543,9 @@ def system_info(): var = 'suse' else: var = 'linux' - elif system in ('windows', 'darwin', "freebsd", "netbsd", "openbsd"): + elif system in ( + 'windows', 'darwin', "freebsd", "netbsd", + "openbsd", "dragonfly"): var = system info['variant'] = var @@ -1195,6 +1206,23 @@ def find_devs_with_openbsd(criteria=None, oformat='device', return ['/dev/' + i for i in devlist] +def find_devs_with_dragonflybsd(criteria=None, oformat='device', + tag=None, no_cache=False, path=None): + out, _err = subp.subp(['sysctl', '-n', 'kern.disks'], rcs=[0]) + devlist = [i for i in sorted(out.split(), reverse=True) + if not i.startswith("md") and not i.startswith("vn")] + + if criteria == "TYPE=iso9660": + devlist = [i for i in devlist + if i.startswith('cd') or i.startswith('acd')] + elif criteria in ["LABEL=CONFIG-2", "TYPE=vfat"]: + devlist = [i for i in devlist + if not (i.startswith('cd') or i.startswith('acd'))] + elif criteria: + LOG.debug("Unexpected criteria: %s", criteria) + return ['/dev/' + i for i in devlist] + + def find_devs_with(criteria=None, oformat='device', tag=None, no_cache=False, path=None): """ @@ -1213,6 +1241,9 @@ def find_devs_with(criteria=None, oformat='device', elif is_OpenBSD(): return find_devs_with_openbsd(criteria, oformat, tag, no_cache, path) + elif is_DragonFlyBSD(): + return find_devs_with_dragonflybsd(criteria, oformat, + tag, no_cache, path) blk_id_cmd = ['blkid'] options = [] @@ -2211,6 +2242,14 @@ def find_freebsd_part(fs): LOG.warning("Unexpected input in find_freebsd_part: %s", fs) +def find_dragonflybsd_part(fs): + splitted = fs.split('/') + if len(splitted) == 3 and splitted[1] == 'dev': + return splitted[2] + else: + LOG.warning("Unexpected input in find_dragonflybsd_part: %s", fs) + + def get_path_dev_freebsd(path, mnt_list): path_found = None for line in mnt_list.split("\n"): @@ -2264,6 +2303,9 @@ def parse_mount(path): # https://regex101.com/r/T2en7a/1 regex = (r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) ' r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))') + if is_DragonFlyBSD(): + regex = (r'^(/dev/[\S]+|\S*?) on (/[\S]*) ' + r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))') for line in mount_locs: m = re.search(regex, line) if not m: diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 2f6c3a7d..586384e4 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -1,8 +1,8 @@ ## template:jinja # The top level settings are used as module # and system configuration. - -{% if variant.endswith("bsd") %} +{% set is_bsd = variant in ["dragonfly", "freebsd", "netbsd", "openbsd"] %} +{% if is_bsd %} syslog_fix_perms: root:wheel {% elif variant in ["suse"] %} syslog_fix_perms: root:root @@ -61,11 +61,11 @@ cloud_init_modules: {% endif %} - bootcmd - write-files -{% if variant not in ["netbsd"] %} +{% if variant not in ["netbsd", "openbsd"] %} - growpart - resizefs {% endif %} -{% if variant not in ["freebsd", "netbsd"] %} +{% if not is_bsd %} - disk_setup - mounts {% endif %} @@ -158,6 +158,8 @@ system_info: "fedora", "freebsd", "netbsd", "openbsd", "rhel", "rocky", "suse", "ubuntu"] %} distro: {{ variant }} +{% elif variant in ["dragonfly"] %} + distro: dragonflybsd {% else %} # Unknown/fallback distro. distro: ubuntu @@ -249,6 +251,15 @@ system_info: groups: [wheel] sudo: ["ALL=(ALL) NOPASSWD:ALL"] shell: /bin/tcsh +{% elif variant in ["dragonfly"] %} + # Default user name + that default users groups (if added/used) + default_user: + name: dragonfly + lock_passwd: True + gecos: DragonFly + groups: [wheel] + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + shell: /bin/sh {% elif variant in ["netbsd"] %} default_user: name: netbsd @@ -269,4 +280,7 @@ system_info: {% if variant in ["freebsd", "netbsd", "openbsd"] %} network: renderers: ['{{ variant }}'] +{% elif variant in ["dragonfly"] %} + network: + renderers: ['freebsd'] {% endif %} diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index f3e13edc..e4480754 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -14,12 +14,13 @@ distributions and clouds, both public and private. Distributions ============= -Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD -and OpenBSD: +Cloud-init has support across all major Linux distributions, FreeBSD, NetBSD, +OpenBSD and DragonFlyBSD: - Alpine Linux - ArchLinux - Debian +- DragonFlyBSD - Fedora - FreeBSD - Gentoo Linux diff --git a/setup.py b/setup.py index cbacf48e..dcbe0843 100755 --- a/setup.py +++ b/setup.py @@ -156,7 +156,7 @@ USR = "usr" ETC = "etc" USR_LIB_EXEC = "usr/lib" LIB = "lib" -if os.uname()[0] == 'FreeBSD': +if os.uname()[0] in ['FreeBSD', 'DragonFly']: USR = "usr/local" USR_LIB_EXEC = "usr/local/lib" elif os.path.isfile('/etc/redhat-release'): diff --git a/sysvinit/freebsd/cloudinit b/sysvinit/freebsd/cloudinit index aa5bd118..d26f3d0f 100755 --- a/sysvinit/freebsd/cloudinit +++ b/sysvinit/freebsd/cloudinit @@ -2,7 +2,7 @@ # PROVIDE: cloudinit # REQUIRE: FILESYSTEMS NETWORKING cloudinitlocal ldconfig devd -# BEFORE: cloudconfig cloudfinal +# BEFORE: LOGIN cloudconfig cloudfinal . /etc/rc.subr diff --git a/tests/unittests/test_distros/test_dragonflybsd.py b/tests/unittests/test_distros/test_dragonflybsd.py new file mode 100644 index 00000000..df2c00f4 --- /dev/null +++ b/tests/unittests/test_distros/test_dragonflybsd.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 + + +import cloudinit.util +from cloudinit.tests.helpers import mock + + +def test_find_dragonflybsd_part(): + assert cloudinit.util.find_dragonflybsd_part("/dev/vbd0s3") == "vbd0s3" + + +@mock.patch("cloudinit.util.is_DragonFlyBSD") +@mock.patch("cloudinit.subp.subp") +def test_parse_mount(mock_subp, m_is_DragonFlyBSD): + mount_out = """ +vbd0s3 on / (hammer2, local) +devfs on /dev (devfs, nosymfollow, local) +/dev/vbd0s0a on /boot (ufs, local) +procfs on /proc (procfs, local) +tmpfs on /var/run/shm (tmpfs, local) +""" + + mock_subp.return_value = (mount_out, "") + m_is_DragonFlyBSD.return_value = True + assert cloudinit.util.parse_mount("/") == ("vbd0s3", "hammer2", "/") diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index e5292001..2290cab7 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -999,4 +999,22 @@ class TestFindDevs: devlist = util.find_devs_with_netbsd(criteria=criteria) assert devlist == expected_devlist + @pytest.mark.parametrize( + 'criteria,expected_devlist', ( + (None, ['/dev/vbd0', '/dev/cd0', '/dev/acd0']), + ('TYPE=iso9660', ['/dev/cd0', '/dev/acd0']), + ('TYPE=vfat', ['/dev/vbd0']), + ('LABEL_FATBOOT=A_LABEL', # lp: #1841466 + ['/dev/vbd0', '/dev/cd0', '/dev/acd0']), + ) + ) + @mock.patch("cloudinit.subp.subp") + def test_find_devs_with_dragonflybsd(self, m_subp, criteria, + expected_devlist): + m_subp.return_value = ( + 'md2 md1 cd0 vbd0 acd0 vn3 vn2 vn1 vn0 md0', '' + ) + devlist = util.find_devs_with_dragonflybsd(criteria=criteria) + assert devlist == expected_devlist + # vi: ts=4 expandtab -- cgit v1.2.3 From 1243c5a1fc1f3546b40e48a2033a9abab546e40f Mon Sep 17 00:00:00 2001 From: Mark Mercado Date: Thu, 17 Jun 2021 10:37:23 -0400 Subject: Fix the spelling of "DigitalOcean" (#924) The name "DigitalOcean" doesn't have a space in it; it's a single compound word written in Pascal case (upper camel case). --- README.md | 2 +- cloudinit/sources/DataSourceDigitalOcean.py | 2 +- doc/rtd/topics/availability.rst | 2 +- doc/rtd/topics/datasources/digitalocean.rst | 4 ++-- tools/.github-cla-signers | 1 + 5 files changed, 6 insertions(+), 5 deletions(-) (limited to 'doc/rtd/topics/availability.rst') diff --git a/README.md b/README.md index 02b2f666..73d9e780 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
Digital Ocean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| ## To start developing cloud-init diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 5040ce5b..08805d99 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -54,7 +54,7 @@ class DataSourceDigitalOcean(sources.DataSource): if not is_do: return False - LOG.info("Running on digital ocean. droplet_id=%s", droplet_id) + LOG.info("Running on DigitalOcean. droplet_id=%s", droplet_id) ipv4LL_nic = None if self.use_ip4LL: diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index e4480754..a45a49d6 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -43,7 +43,7 @@ environments in the public cloud: - Softlayer - Rackspace Public Cloud - IBM Cloud -- Digital Ocean +- DigitalOcean - Bigstep - Hetzner - Joyent diff --git a/doc/rtd/topics/datasources/digitalocean.rst b/doc/rtd/topics/datasources/digitalocean.rst index 88f1e5f5..a4910408 100644 --- a/doc/rtd/topics/datasources/digitalocean.rst +++ b/doc/rtd/topics/datasources/digitalocean.rst @@ -1,7 +1,7 @@ .. _datasource_digital_ocean: -Digital Ocean -============= +DigitalOcean +============ The `DigitalOcean`_ datasource consumes the content served from DigitalOcean's `metadata service`_. This metadata service serves information about the diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 0b9fbe9e..a7c36a8c 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -32,6 +32,7 @@ klausenbusk landon912 lucasmoura lungj +mamercad manuelisimo marlluslustosa matthewruffell -- cgit v1.2.3 From 6e7066ea2b06940c4931f0258c7982b09966582f Mon Sep 17 00:00:00 2001 From: sshedi <53473811+sshedi@users.noreply.github.com> Date: Fri, 23 Jul 2021 21:10:41 +0530 Subject: Add ability to manage fallback network config on PhotonOS (#941) Currently cloud-init generates fallback network config on various scenarios. For example: 1. When no DS found 2. There is no 'network' info given in DS metadata. 3. If a DS gives a network config once and upon reboot if DS doesn't give any network info, previously set network data will be overridden. A newly introduced key in cloud.cfg.tmpl can be used to control this behavior on PhotonOS. Also, if OS comes with a set of default network files(configs), like in PhotonOS, cloud-init should not overwrite them by default. This change also includes some nitpicking changes of reorganizing few config variables. Signed-off-by: Shreenidhi Shedi --- cloudinit/distros/photon.py | 15 +++++++++ config/cloud.cfg.tmpl | 10 ++++-- doc/rtd/topics/availability.rst | 1 + doc/rtd/topics/network-config.rst | 7 ++++ tests/unittests/test_distros/test_photon.py | 51 +++++++++++++++++++++++++++++ 5 files changed, 82 insertions(+), 2 deletions(-) create mode 100644 tests/unittests/test_distros/test_photon.py (limited to 'doc/rtd/topics/availability.rst') diff --git a/cloudinit/distros/photon.py b/cloudinit/distros/photon.py index 3ef5dd40..61e270c0 100644 --- a/cloudinit/distros/photon.py +++ b/cloudinit/distros/photon.py @@ -5,6 +5,7 @@ # # This file is part of cloud-init. See LICENSE file for license information. +from cloudinit import net from cloudinit import util from cloudinit import subp from cloudinit import distros @@ -54,6 +55,20 @@ class Distro(distros.Distro): util.logexc(LOG, 'Command %s failed', cmd) return False, None, None + def generate_fallback_config(self): + key = 'disable_fallback_netcfg' + disable_fallback_netcfg = self._cfg.get(key, True) + LOG.debug('%s value is: %s', key, disable_fallback_netcfg) + + if not disable_fallback_netcfg: + return net.generate_fallback_config() + + LOG.info( + 'Skipping generate_fallback_config. Rely on PhotonOS default ' + 'network config' + ) + return None + def apply_locale(self, locale, out_fn=None): # This has a dependancy on glibc-i18n, user need to manually install it # and enable the option in cloud.cfg diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index f918d919..2314d893 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -18,9 +18,10 @@ users: - default {% endif %} -# VMware guest customization. {% if variant in ["photon"] %} +# VMware guest customization. disable_vmware_customization: true +manage_etc_hosts: false {% endif %} # If this is set, 'root' will not be able to ssh in and they @@ -306,10 +307,15 @@ system_info: paths: cloud_dir: /var/lib/cloud/ templates_dir: /etc/cloud/templates/ + network: + renderers: ['networkd'] ssh_svcname: sshd -#manage_etc_hosts: true + # If set to true, cloud-init will not use fallback network config. + # In Photon, we have default network settings, hence if network settings are + # not explicitly given in metadata, don't use fallback network config. + disable_fallback_netcfg: true {% endif %} {% if variant in ["freebsd", "netbsd", "openbsd"] %} network: diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index a45a49d6..b84b6076 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -26,6 +26,7 @@ OpenBSD and DragonFlyBSD: - Gentoo Linux - NetBSD - OpenBSD +- Photon OS - RHEL/CentOS - SLES/openSUSE - Ubuntu diff --git a/doc/rtd/topics/network-config.rst b/doc/rtd/topics/network-config.rst index 5f7a74f8..8eb7a31b 100644 --- a/doc/rtd/topics/network-config.rst +++ b/doc/rtd/topics/network-config.rst @@ -104,6 +104,13 @@ interface given the information it has available. Finally after selecting the "right" interface, a configuration is generated and applied to the system. +.. note:: + + PhotonOS disables fallback networking configuration by default leaving + network unrendered when no other network config is provided. + If fallback config is still desired on PhotonOS, it can be enabled by + providing `disable_fallback_netcfg: false` in + `/etc/cloud/cloud.cfg:sys_config` settings. Network Configuration Sources ============================= diff --git a/tests/unittests/test_distros/test_photon.py b/tests/unittests/test_distros/test_photon.py new file mode 100644 index 00000000..775f37ac --- /dev/null +++ b/tests/unittests/test_distros/test_photon.py @@ -0,0 +1,51 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from . import _get_distro +from cloudinit import util +from cloudinit.tests.helpers import mock +from cloudinit.tests.helpers import CiTestCase + +SYSTEM_INFO = { + 'paths': { + 'cloud_dir': '/var/lib/cloud/', + 'templates_dir': '/etc/cloud/templates/', + }, + 'network': {'renderers': 'networkd'}, +} + + +class TestPhoton(CiTestCase): + with_logs = True + distro = _get_distro('photon', SYSTEM_INFO) + expected_log_line = 'Rely on PhotonOS default network config' + + def test_network_renderer(self): + self.assertEqual(self.distro._cfg['network']['renderers'], 'networkd') + + def test_get_distro(self): + self.assertEqual(self.distro.osfamily, 'photon') + + def test_write_hostname(self): + hostname = 'myhostname' + hostfile = self.tmp_path('hostfile') + self.distro._write_hostname(hostname, hostfile) + self.assertEqual(hostname + '\n', util.load_file(hostfile)) + + @mock.patch('cloudinit.net.generate_fallback_config') + def test_fallback_netcfg(self, m_fallback_cfg): + + key = 'disable_fallback_netcfg' + # Don't use fallback if no setting given + self.logs.truncate(0) + assert(self.distro.generate_fallback_config() is None) + self.assertIn(self.expected_log_line, self.logs.getvalue()) + + self.logs.truncate(0) + self.distro._cfg[key] = True + assert(self.distro.generate_fallback_config() is None) + self.assertIn(self.expected_log_line, self.logs.getvalue()) + + self.logs.truncate(0) + self.distro._cfg[key] = False + assert(self.distro.generate_fallback_config() is not None) + self.assertNotIn(self.expected_log_line, self.logs.getvalue()) -- cgit v1.2.3 From 3d9c862b6ded798031fad827328fa437bc14ac97 Mon Sep 17 00:00:00 2001 From: Aleksander Baranowski Date: Thu, 5 Aug 2021 18:32:36 +0200 Subject: Add support for EuroLinux 7 && EuroLinux 8 (#957) --- README.md | 2 +- cloudinit/config/cc_ntp.py | 9 +++-- cloudinit/config/cc_yum_add_repo.py | 8 ++-- cloudinit/distros/__init__.py | 4 +- cloudinit/distros/eurolinux.py | 9 +++++ cloudinit/net/sysconfig.py | 4 +- cloudinit/tests/.test_util.py.swp | Bin 16384 -> 0 bytes cloudinit/tests/test_util.py | 66 ++++++++++++++++++++++++++++++++ cloudinit/util.py | 4 +- config/cloud.cfg.tmpl | 12 +++--- doc/rtd/topics/availability.rst | 2 +- packages/pkg-deps.json | 14 +++++++ systemd/cloud-init-generator.tmpl | 3 +- systemd/cloud-init.service.tmpl | 3 +- tests/unittests/test_cli.py | 3 +- tests/unittests/test_net.py | 1 + tests/unittests/test_render_cloudcfg.py | 6 +-- tools/read-dependencies | 8 +++- tools/render-cloudcfg | 4 +- 19 files changed, 130 insertions(+), 32 deletions(-) create mode 100644 cloudinit/distros/eurolinux.py delete mode 100644 cloudinit/tests/.test_util.py.swp (limited to 'doc/rtd/topics/availability.rst') diff --git a/README.md b/README.md index 832d8b43..caf9a6e9 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| ## To start developing cloud-init diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index d227efb9..7c371a49 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -24,8 +24,9 @@ LOG = logging.getLogger(__name__) frequency = PER_INSTANCE NTP_CONF = '/etc/ntp.conf' NR_POOL_SERVERS = 4 -distros = ['almalinux', 'alpine', 'centos', 'debian', 'fedora', 'opensuse', - 'photon', 'rhel', 'rocky', 'sles', 'ubuntu', 'virtuozzo'] +distros = ['almalinux', 'alpine', 'centos', 'debian', 'eurolinux', 'fedora', + 'opensuse', 'photon', 'rhel', 'rocky', 'sles', 'ubuntu', + 'virtuozzo'] NTP_CLIENT_CONFIG = { 'chrony': { @@ -405,9 +406,9 @@ def generate_server_names(distro): # For legal reasons x.pool.sles.ntp.org does not exist, # use the opensuse pool pool_distro = 'opensuse' - elif distro == 'alpine': + elif distro == 'alpine' or distro == 'eurolinux': # Alpine-specific pool (i.e. x.alpine.pool.ntp.org) does not exist - # so use general x.pool.ntp.org instead. + # so use general x.pool.ntp.org instead. The same applies to EuroLinux pool_distro = '' for x in range(0, NR_POOL_SERVERS): diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 6e6133d1..b7a48dcc 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -18,8 +18,8 @@ entry, the config entry will be skipped. **Module frequency:** per always -**Supported distros:** almalinux, centos, fedora, photon, rhel, rocky, - virtuozzo +**Supported distros:** almalinux, centos, eurolinux, fedora, photon, rhel, + rocky, virtuozzo **Config keys**:: @@ -37,8 +37,8 @@ from configparser import ConfigParser from cloudinit import util -distros = ['almalinux', 'centos', 'fedora', 'photon', 'rhel', 'rocky', - 'virtuozzo'] +distros = ['almalinux', 'centos', 'eurolinux', 'fedora', 'photon', 'rhel', + 'rocky', 'virtuozzo'] def _canonicalize_id(repo_id): diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 7b813167..a634623a 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -49,8 +49,8 @@ OSFAMILIES = { 'debian': ['debian', 'ubuntu'], 'freebsd': ['freebsd'], 'gentoo': ['gentoo'], - 'redhat': ['almalinux', 'amazon', 'centos', 'fedora', 'photon', 'rhel', - 'rocky', 'virtuozzo'], + 'redhat': ['almalinux', 'amazon', 'centos', 'eurolinux', 'fedora', + 'photon', 'rhel', 'rocky', 'virtuozzo'], 'suse': ['opensuse', 'sles'], } diff --git a/cloudinit/distros/eurolinux.py b/cloudinit/distros/eurolinux.py new file mode 100644 index 00000000..edb3165d --- /dev/null +++ b/cloudinit/distros/eurolinux.py @@ -0,0 +1,9 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.distros import rhel + + +class Distro(rhel.Distro): + pass + +# vi: ts=4 expandtab diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 49f52e9d..06f7255e 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -18,8 +18,8 @@ from .network_state import ( is_ipv6_addr, net_prefix_to_ipv4_mask, subnet_is_ipv6, IPV6_DYNAMIC_TYPES) LOG = logging.getLogger(__name__) -KNOWN_DISTROS = ['almalinux', 'centos', 'fedora', 'rhel', 'rocky', 'suse', - 'virtuozzo'] +KNOWN_DISTROS = ['almalinux', 'centos', 'eurolinux', 'fedora', 'rhel', 'rocky', + 'suse', 'virtuozzo'] NM_CFG_FILE = "/etc/NetworkManager/NetworkManager.conf" diff --git a/cloudinit/tests/.test_util.py.swp b/cloudinit/tests/.test_util.py.swp deleted file mode 100644 index 78ef5865..00000000 Binary files a/cloudinit/tests/.test_util.py.swp and /dev/null differ diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index bd7720d1..9dd01158 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -124,6 +124,38 @@ OS_RELEASE_ALMALINUX_8 = dedent("""\ ALMALINUX_MANTISBT_PROJECT_VERSION="8.3" """) +OS_RELEASE_EUROLINUX_7 = dedent("""\ + VERSION="7.9 (Minsk)" + ID="eurolinux" + ID_LIKE="rhel scientific centos fedora" + VERSION_ID="7.9" + PRETTY_NAME="EuroLinux 7.9 (Minsk)" + ANSI_COLOR="0;31" + CPE_NAME="cpe:/o:eurolinux:eurolinux:7.9:GA" + HOME_URL="http://www.euro-linux.com/" + BUG_REPORT_URL="mailto:support@euro-linux.com" + REDHAT_BUGZILLA_PRODUCT="EuroLinux 7" + REDHAT_BUGZILLA_PRODUCT_VERSION=7.9 + REDHAT_SUPPORT_PRODUCT="EuroLinux" + REDHAT_SUPPORT_PRODUCT_VERSION="7.9" +""") + +OS_RELEASE_EUROLINUX_8 = dedent("""\ + NAME="EuroLinux" + VERSION="8.4 (Vaduz)" + ID="eurolinux" + ID_LIKE="rhel fedora centos" + VERSION_ID="8.4" + PLATFORM_ID="platform:el8" + PRETTY_NAME="EuroLinux 8.4 (Vaduz)" + ANSI_COLOR="0;34" + CPE_NAME="cpe:/o:eurolinux:eurolinux:8" + HOME_URL="https://www.euro-linux.com/" + BUG_REPORT_URL="https://github.com/EuroLinux/eurolinux-distro-bugs-and-rfc/" + REDHAT_SUPPORT_PRODUCT="EuroLinux" + REDHAT_SUPPORT_PRODUCT_VERSION="8" +""") + OS_RELEASE_ROCKY_8 = dedent("""\ NAME="Rocky Linux" VERSION="8.3 (Green Obsidian)" @@ -162,6 +194,8 @@ REDHAT_RELEASE_REDHAT_7 = ( "Red Hat Enterprise Linux Server release 7.5 (Maipo)") REDHAT_RELEASE_ALMALINUX_8 = ( "AlmaLinux release 8.3 (Purple Manul)") +REDHAT_RELEASE_EUROLINUX_7 = "EuroLinux release 7.9 (Minsk)" +REDHAT_RELEASE_EUROLINUX_8 = "EuroLinux release 8.4 (Vaduz)" REDHAT_RELEASE_ROCKY_8 = ( "Rocky Linux release 8.3 (Green Obsidian)") REDHAT_RELEASE_VIRTUOZZO_8 = ( @@ -581,6 +615,38 @@ class TestGetLinuxDistro(CiTestCase): dist = util.get_linux_distro() self.assertEqual(('almalinux', '8.3', 'Purple Manul'), dist) + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux7_rhrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 7 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_7 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux7_osrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 7 read from os-release.""" + m_os_release.return_value = OS_RELEASE_EUROLINUX_7 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '7.9', 'Minsk'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux8_rhrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 8 read from redhat-release.""" + m_os_release.return_value = REDHAT_RELEASE_EUROLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.redhat_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) + + @mock.patch('cloudinit.util.load_file') + def test_get_linux_eurolinux8_osrelease(self, m_os_release, m_path_exists): + """Verify eurolinux 8 read from os-release.""" + m_os_release.return_value = OS_RELEASE_EUROLINUX_8 + m_path_exists.side_effect = TestGetLinuxDistro.os_release_exists + dist = util.get_linux_distro() + self.assertEqual(('eurolinux', '8.4', 'Vaduz'), dist) + @mock.patch('cloudinit.util.load_file') def test_get_linux_rocky8_rhrelease(self, m_os_release, m_path_exists): """Verify rocky linux 8 read from redhat-release.""" diff --git a/cloudinit/util.py b/cloudinit/util.py index 3bed1aed..d3ced463 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -547,8 +547,8 @@ def system_info(): if system == "linux": linux_dist = info['dist'][0].lower() if linux_dist in ( - 'almalinux', 'alpine', 'arch', 'centos', 'debian', 'fedora', - 'photon', 'rhel', 'rocky', 'suse', 'virtuozzo'): + 'almalinux', 'alpine', 'arch', 'centos', 'debian', 'eurolinux', + 'fedora', 'photon', 'rhel', 'rocky', 'suse', 'virtuozzo'): var = linux_dist elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): var = 'ubuntu' diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 2314d893..825deff4 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -32,8 +32,8 @@ disable_root: false disable_root: true {% endif %} -{% if variant in ["almalinux", "alpine", "amazon", "centos", "fedora", - "rhel", "rocky", "virtuozzo"] %} +{% if variant in ["almalinux", "alpine", "amazon", "centos", "eurolinux", + "fedora", "rhel", "rocky", "virtuozzo"] %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] {% if variant == "amazon" %} resize_rootfs: noblock @@ -174,8 +174,8 @@ cloud_final_modules: system_info: # This will affect which distro class gets used {% if variant in ["almalinux", "alpine", "amazon", "arch", "centos", "debian", - "fedora", "freebsd", "netbsd", "openbsd", "photon", "rhel", - "rocky", "suse", "ubuntu", "virtuozzo"] %} + "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", + "photon", "rhel", "rocky", "suse", "ubuntu", "virtuozzo"] %} distro: {{ variant }} {% elif variant in ["dragonfly"] %} distro: dragonflybsd @@ -228,8 +228,8 @@ system_info: primary: http://ports.ubuntu.com/ubuntu-ports security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh -{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "fedora", - "rhel", "rocky", "suse", "virtuozzo"] %} +{% elif variant in ["almalinux", "alpine", "amazon", "arch", "centos", "eurolinux", + "fedora", "rhel", "rocky", "suse", "virtuozzo"] %} # Default user name + that default users groups (if added/used) default_user: {% if variant == "amazon" %} diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index b84b6076..e0644534 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -27,7 +27,7 @@ OpenBSD and DragonFlyBSD: - NetBSD - OpenBSD - Photon OS -- RHEL/CentOS +- RHEL/CentOS/AlmaLinux/Rocky Linux/EuroLinux - SLES/openSUSE - Ubuntu diff --git a/packages/pkg-deps.json b/packages/pkg-deps.json index 80028396..eaf13469 100644 --- a/packages/pkg-deps.json +++ b/packages/pkg-deps.json @@ -27,6 +27,20 @@ "sudo" ] }, + "eurolinux" : { + "build-requires" : [ + "python3-devel" + ], + "requires" : [ + "e2fsprogs", + "iproute", + "net-tools", + "procps", + "rsyslog", + "shadow-utils", + "sudo" + ] + }, "redhat" : { "build-requires" : [ "python3-devel" diff --git a/systemd/cloud-init-generator.tmpl b/systemd/cloud-init-generator.tmpl index 1d6af5ae..3dbe5947 100644 --- a/systemd/cloud-init-generator.tmpl +++ b/systemd/cloud-init-generator.tmpl @@ -83,7 +83,8 @@ default() { check_for_datasource() { local ds_rc="" -{% if variant in ["almalinux", "rhel", "fedora", "centos", "rocky", "virtuozzo"] %} +{% if variant in ["almalinux", "centos", "eurolinux", "fedora", "rhel", + "rocky", "virtuozzo"] %} local dsidentify="/usr/libexec/cloud-init/ds-identify" {% else %} local dsidentify="/usr/lib/cloud-init/ds-identify" diff --git a/systemd/cloud-init.service.tmpl b/systemd/cloud-init.service.tmpl index cab3ec51..636f59be 100644 --- a/systemd/cloud-init.service.tmpl +++ b/systemd/cloud-init.service.tmpl @@ -12,7 +12,8 @@ After=systemd-networkd-wait-online.service {% if variant in ["ubuntu", "unknown", "debian"] %} After=networking.service {% endif %} -{% if variant in ["almalinux", "centos", "fedora", "rhel", "rocky", "virtuozzo"] %} +{% if variant in ["almalinux", "centos", "eurolinux", "fedora", "rhel", + "rocky", "virtuozzo"] %} After=network.service After=NetworkManager.service {% endif %} diff --git a/tests/unittests/test_cli.py b/tests/unittests/test_cli.py index 90d8f7b9..a39e1d0c 100644 --- a/tests/unittests/test_cli.py +++ b/tests/unittests/test_cli.py @@ -225,7 +225,8 @@ class TestCLI(test_helpers.FilesystemMockingTestCase): expected_doc_sections = [ '**Supported distros:** all', ('**Supported distros:** almalinux, alpine, centos, debian, ' - 'fedora, opensuse, photon, rhel, rocky, sles, ubuntu, virtuozzo'), + 'eurolinux, fedora, opensuse, photon, rhel, rocky, sles, ubuntu, ' + 'virtuozzo'), '**Config schema**:\n **resize_rootfs:** (true/false/noblock)', '**Examples**::\n\n runcmd:\n - [ ls, -l, / ]\n' ] diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 43e209c1..fc77b11e 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -5308,6 +5308,7 @@ class TestNetRenderers(CiTestCase): ('opensuse-tumbleweed', '', ''), ('sles', '', ''), ('centos', '', ''), + ('eurolinux', '', ''), ('fedora', '', ''), ('redhat', '', ''), ] diff --git a/tests/unittests/test_render_cloudcfg.py b/tests/unittests/test_render_cloudcfg.py index 275879af..00d50e66 100644 --- a/tests/unittests/test_render_cloudcfg.py +++ b/tests/unittests/test_render_cloudcfg.py @@ -9,9 +9,9 @@ from cloudinit import subp from cloudinit import util # TODO(Look to align with tools.render-cloudcfg or cloudinit.distos.OSFAMILIES) -DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd", - "netbsd", "openbsd", "photon", "rhel", "suse", "ubuntu", - "unknown"] +DISTRO_VARIANTS = ["amazon", "arch", "centos", "debian", "eurolinux", "fedora", + "freebsd", "netbsd", "openbsd", "photon", "rhel", "suse", + "ubuntu", "unknown"] @pytest.mark.allow_subp_for(sys.executable) diff --git a/tools/read-dependencies b/tools/read-dependencies index e52720d4..810154e4 100755 --- a/tools/read-dependencies +++ b/tools/read-dependencies @@ -23,6 +23,7 @@ DEFAULT_REQUIREMENTS = 'requirements.txt' # Map the appropriate package dir needed for each distro choice DISTRO_PKG_TYPE_MAP = { 'centos': 'redhat', + 'eurolinux': 'redhat', 'rocky': 'redhat', 'redhat': 'redhat', 'debian': 'debian', @@ -68,11 +69,13 @@ ZYPPER_INSTALL = [ DRY_DISTRO_INSTALL_PKG_CMD = { 'rocky': ['yum', 'install', '--assumeyes'], 'centos': ['yum', 'install', '--assumeyes'], + 'eurolinux': ['yum', 'install', '--assumeyes'], 'redhat': ['yum', 'install', '--assumeyes'], } DISTRO_INSTALL_PKG_CMD = { 'rocky': MAYBE_RELIABLE_YUM_INSTALL, + 'eurolinux': MAYBE_RELIABLE_YUM_INSTALL, 'centos': MAYBE_RELIABLE_YUM_INSTALL, 'redhat': MAYBE_RELIABLE_YUM_INSTALL, 'debian': ['apt', 'install', '-y'], @@ -85,6 +88,7 @@ DISTRO_INSTALL_PKG_CMD = { # List of base system packages required to enable ci automation CI_SYSTEM_BASE_PKGS = { 'common': ['make', 'sudo', 'tar'], + 'eurolinux': ['python3-tox'], 'redhat': ['python3-tox'], 'centos': ['python3-tox'], 'ubuntu': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild'], @@ -277,10 +281,10 @@ def pkg_install(pkg_list, distro, test_distro=False, dry_run=False): cmd = DRY_DISTRO_INSTALL_PKG_CMD[distro] install_cmd.extend(cmd) - if distro in ['centos', 'redhat', 'rocky']: + if distro in ['centos', 'redhat', 'rocky', 'eurolinux']: # CentOS and Redhat need epel-release to access oauthlib and jsonschema subprocess.check_call(install_cmd + ['epel-release']) - if distro in ['suse', 'opensuse', 'redhat', 'rocky', 'centos']: + if distro in ['suse', 'opensuse', 'redhat', 'rocky', 'centos', 'eurolinux']: pkg_list.append('rpm-build') subprocess.check_call(install_cmd + pkg_list) diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index 227bd8ab..30f82521 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -5,8 +5,8 @@ import os import sys VARIANTS = ["almalinux", "alpine", "amazon", "arch", "centos", "debian", - "fedora", "freebsd", "netbsd", "openbsd", "photon", "rhel", - "suse","rocky", "ubuntu", "unknown", "virtuozzo"] + "eurolinux", "fedora", "freebsd", "netbsd", "openbsd", "photon", + "rhel", "suse","rocky", "ubuntu", "unknown", "virtuozzo"] if "avoid-pep8-E402-import-not-top-of-file": -- cgit v1.2.3 From 8b4a9bc7b81e61943af873bad92e2133f8275b0b Mon Sep 17 00:00:00 2001 From: Andrew Kutz <101085+akutz@users.noreply.github.com> Date: Mon, 9 Aug 2021 21:24:07 -0500 Subject: Datasource for VMware (#953) This patch finally introduces the Cloud-Init Datasource for VMware GuestInfo as a part of cloud-init proper. This datasource has existed since 2018, and rapidly became the de facto datasource for developers working with Packer, Terraform, for projects like kube-image-builder, and the de jure datasource for Photon OS. The major change to the datasource from its previous incarnation is the name. Now named DatasourceVMware, this new version of the datasource will allow multiple transport types in addition to GuestInfo keys. This datasource includes several unique features developed to address real-world situations: * Support for reading any key (metadata, userdata, vendordata) both from the guestinfo table when running on a VM in vSphere as well as from an environment variable when running inside of a container, useful for rapid dev/test. * Allows booting with DHCP while still providing full participation in Cloud-Init instance data and Jinja queries. The netifaces library provides the ability to inspect the network after it is online, and the runtime network configuration is then merged into the existing metadata and persisted to disk. * Advertises the local_ipv4 and local_ipv6 addresses via guestinfo as well. This is useful as Guest Tools is not always able to identify what would be considered the local address. The primary author and current steward of this datasource spoke at Cloud-Init Con 2020 where there was interest in contributing this datasource to the Cloud-Init codebase. The datasource currently lives in its own GitHub repository at https://github.com/vmware/cloud-init-vmware-guestinfo. Once the datasource is merged into Cloud-Init, the old repository will be deprecated. --- README.md | 2 +- cloudinit/settings.py | 1 + cloudinit/sources/DataSourceVMware.py | 871 +++++++++++++++++++++++++ doc/rtd/topics/availability.rst | 1 + doc/rtd/topics/datasources.rst | 1 + doc/rtd/topics/datasources/vmware.rst | 359 ++++++++++ requirements.txt | 9 + tests/unittests/test_datasource/test_common.py | 3 + tests/unittests/test_datasource/test_vmware.py | 377 +++++++++++ tests/unittests/test_ds_identify.py | 279 +++++++- tools/.github-cla-signers | 1 + tools/ds-identify | 76 ++- 12 files changed, 1977 insertions(+), 3 deletions(-) create mode 100644 cloudinit/sources/DataSourceVMware.py create mode 100644 doc/rtd/topics/datasources/vmware.rst create mode 100644 tests/unittests/test_datasource/test_vmware.py (limited to 'doc/rtd/topics/availability.rst') diff --git a/README.md b/README.md index caf9a6e9..5828c2fa 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ get in contact with that distribution and send them our way! | Supported OSes | Supported Public Clouds | Supported Private Clouds | | --- | --- | --- | -| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)















| +| Alpine Linux
ArchLinux
Debian
DragonFlyBSD
Fedora
FreeBSD
Gentoo Linux
NetBSD
OpenBSD
RHEL/CentOS/AlmaLinux/Rocky/PhotonOS/Virtuozzo/EuroLinux
SLES/openSUSE
Ubuntu










| Amazon Web Services
Microsoft Azure
Google Cloud Platform
Oracle Cloud Infrastructure
Softlayer
Rackspace Public Cloud
IBM Cloud
DigitalOcean
Bigstep
Hetzner
Joyent
CloudSigma
Alibaba Cloud
OVH
OpenNebula
Exoscale
Scaleway
CloudStack
AltCloud
SmartOS
HyperOne
Vultr
Rootbox
| Bare metal installs
OpenStack
LXD
KVM
Metal-as-a-Service (MAAS)
VMware















| ## To start developing cloud-init diff --git a/cloudinit/settings.py b/cloudinit/settings.py index 23e4c0ad..f69005ea 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -43,6 +43,7 @@ CFG_BUILTIN = { 'Exoscale', 'RbxCloud', 'UpCloud', + 'VMware', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceVMware.py b/cloudinit/sources/DataSourceVMware.py new file mode 100644 index 00000000..22ca63de --- /dev/null +++ b/cloudinit/sources/DataSourceVMware.py @@ -0,0 +1,871 @@ +# Cloud-Init DataSource for VMware +# +# Copyright (c) 2018-2021 VMware, Inc. All Rights Reserved. +# +# Authors: Anish Swaminathan +# Andrew Kutz +# +# This file is part of cloud-init. See LICENSE file for license information. + +"""Cloud-Init DataSource for VMware + +This module provides a cloud-init datasource for VMware systems and supports +multiple transports types, including: + + * EnvVars + * GuestInfo + +Netifaces (https://github.com/al45tair/netifaces) + + Please note this module relies on the netifaces project to introspect the + runtime, network configuration of the host on which this datasource is + running. This is in contrast to the rest of cloud-init which uses the + cloudinit/netinfo module. + + The reasons for using netifaces include: + + * Netifaces is built in C and is more portable across multiple systems + and more deterministic than shell exec'ing local network commands and + parsing their output. + + * Netifaces provides a stable way to determine the view of the host's + network after DHCP has brought the network online. Unlike most other + datasources, this datasource still provides support for JINJA queries + based on networking information even when the network is based on a + DHCP lease. While this does not tie this datasource directly to + netifaces, it does mean the ability to consistently obtain the + correct information is paramount. + + * It is currently possible to execute this datasource on macOS + (which many developers use today) to print the output of the + get_host_info function. This function calls netifaces to obtain + the same runtime network configuration that the datasource would + persist to the local system's instance data. + + However, the netinfo module fails on macOS. The result is either a + hung operation that requires a SIGINT to return control to the user, + or, if brew is used to install iproute2mac, the ip commands are used + but produce output the netinfo module is unable to parse. + + While macOS is not a target of cloud-init, this feature is quite + useful when working on this datasource. + + For more information about this behavior, please see the following + PR comment, https://bit.ly/3fG7OVh. + + The authors of this datasource are not opposed to moving away from + netifaces. The goal may be to eventually do just that. This proviso was + added to the top of this module as a way to remind future-us and others + why netifaces was used in the first place in order to either smooth the + transition away from netifaces or embrace it further up the cloud-init + stack. +""" + +import collections +import copy +from distutils.spawn import find_executable +import ipaddress +import json +import os +import socket +import time + +from cloudinit import dmi, log as logging +from cloudinit import sources +from cloudinit import util +from cloudinit.subp import subp, ProcessExecutionError + +import netifaces + + +PRODUCT_UUID_FILE_PATH = "/sys/class/dmi/id/product_uuid" + +LOG = logging.getLogger(__name__) +NOVAL = "No value found" + +DATA_ACCESS_METHOD_ENVVAR = "envvar" +DATA_ACCESS_METHOD_GUESTINFO = "guestinfo" + +VMWARE_RPCTOOL = find_executable("vmware-rpctool") +REDACT = "redact" +CLEANUP_GUESTINFO = "cleanup-guestinfo" +VMX_GUESTINFO = "VMX_GUESTINFO" +GUESTINFO_EMPTY_YAML_VAL = "---" + +LOCAL_IPV4 = "local-ipv4" +LOCAL_IPV6 = "local-ipv6" +WAIT_ON_NETWORK = "wait-on-network" +WAIT_ON_NETWORK_IPV4 = "ipv4" +WAIT_ON_NETWORK_IPV6 = "ipv6" + + +class DataSourceVMware(sources.DataSource): + """ + Setting the hostname: + The hostname is set by way of the metadata key "local-hostname". + + Setting the instance ID: + The instance ID may be set by way of the metadata key "instance-id". + However, if this value is absent then the instance ID is read + from the file /sys/class/dmi/id/product_uuid. + + Configuring the network: + The network is configured by setting the metadata key "network" + with a value consistent with Network Config Versions 1 or 2, + depending on the Linux distro's version of cloud-init: + + Network Config Version 1 - http://bit.ly/cloudinit-net-conf-v1 + Network Config Version 2 - http://bit.ly/cloudinit-net-conf-v2 + + For example, CentOS 7's official cloud-init package is version + 0.7.9 and does not support Network Config Version 2. However, + this datasource still supports supplying Network Config Version 2 + data as long as the Linux distro's cloud-init package is new + enough to parse the data. + + The metadata key "network.encoding" may be used to indicate the + format of the metadata key "network". Valid encodings are base64 + and gzip+base64. + """ + + dsname = "VMware" + + def __init__(self, sys_cfg, distro, paths, ud_proc=None): + sources.DataSource.__init__(self, sys_cfg, distro, paths, ud_proc) + + self.data_access_method = None + self.vmware_rpctool = VMWARE_RPCTOOL + + def _get_data(self): + """ + _get_data loads the metadata, userdata, and vendordata from one of + the following locations in the given order: + + * envvars + * guestinfo + + Please note when updating this function with support for new data + transports, the order should match the order in the dscheck_VMware + function from the file ds-identify. + """ + + # Initialize the locally scoped metadata, userdata, and vendordata + # variables. They are assigned below depending on the detected data + # access method. + md, ud, vd = None, None, None + + # First check to see if there is data via env vars. + if os.environ.get(VMX_GUESTINFO, ""): + md = guestinfo_envvar("metadata") + ud = guestinfo_envvar("userdata") + vd = guestinfo_envvar("vendordata") + + if md or ud or vd: + self.data_access_method = DATA_ACCESS_METHOD_ENVVAR + + # At this point, all additional data transports are valid only on + # a VMware platform. + if not self.data_access_method: + system_type = dmi.read_dmi_data("system-product-name") + if system_type is None: + LOG.debug("No system-product-name found") + return False + if "vmware" not in system_type.lower(): + LOG.debug("Not a VMware platform") + return False + + # If no data was detected, check the guestinfo transport next. + if not self.data_access_method: + if self.vmware_rpctool: + md = guestinfo("metadata", self.vmware_rpctool) + ud = guestinfo("userdata", self.vmware_rpctool) + vd = guestinfo("vendordata", self.vmware_rpctool) + + if md or ud or vd: + self.data_access_method = DATA_ACCESS_METHOD_GUESTINFO + + if not self.data_access_method: + LOG.error("failed to find a valid data access method") + return False + + LOG.info("using data access method %s", self._get_subplatform()) + + # Get the metadata. + self.metadata = process_metadata(load_json_or_yaml(md)) + + # Get the user data. + self.userdata_raw = ud + + # Get the vendor data. + self.vendordata_raw = vd + + # Redact any sensitive information. + self.redact_keys() + + # get_data returns true if there is any available metadata, + # userdata, or vendordata. + if self.metadata or self.userdata_raw or self.vendordata_raw: + return True + else: + return False + + def setup(self, is_new_instance): + """setup(is_new_instance) + + This is called before user-data and vendor-data have been processed. + + Unless the datasource has set mode to 'local', then networking + per 'fallback' or per 'network_config' will have been written and + brought up the OS at this point. + """ + + host_info = wait_on_network(self.metadata) + LOG.info("got host-info: %s", host_info) + + # Reflect any possible local IPv4 or IPv6 addresses in the guest + # info. + advertise_local_ip_addrs(host_info) + + # Ensure the metadata gets updated with information about the + # host, including the network interfaces, default IP addresses, + # etc. + self.metadata = util.mergemanydict([self.metadata, host_info]) + + # Persist the instance data for versions of cloud-init that support + # doing so. This occurs here rather than in the get_data call in + # order to ensure that the network interfaces are up and can be + # persisted with the metadata. + self.persist_instance_data() + + def _get_subplatform(self): + get_key_name_fn = None + if self.data_access_method == DATA_ACCESS_METHOD_ENVVAR: + get_key_name_fn = get_guestinfo_envvar_key_name + elif self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO: + get_key_name_fn = get_guestinfo_key_name + else: + return sources.METADATA_UNKNOWN + + return "%s (%s)" % ( + self.data_access_method, + get_key_name_fn("metadata"), + ) + + @property + def network_config(self): + if "network" in self.metadata: + LOG.debug("using metadata network config") + else: + LOG.debug("using fallback network config") + self.metadata["network"] = { + "config": self.distro.generate_fallback_config(), + } + return self.metadata["network"]["config"] + + def get_instance_id(self): + # Pull the instance ID out of the metadata if present. Otherwise + # read the file /sys/class/dmi/id/product_uuid for the instance ID. + if self.metadata and "instance-id" in self.metadata: + return self.metadata["instance-id"] + with open(PRODUCT_UUID_FILE_PATH, "r") as id_file: + self.metadata["instance-id"] = str(id_file.read()).rstrip().lower() + return self.metadata["instance-id"] + + def get_public_ssh_keys(self): + for key_name in ( + "public-keys-data", + "public_keys_data", + "public-keys", + "public_keys", + ): + if key_name in self.metadata: + return sources.normalize_pubkey_data(self.metadata[key_name]) + return [] + + def redact_keys(self): + # Determine if there are any keys to redact. + keys_to_redact = None + if REDACT in self.metadata: + keys_to_redact = self.metadata[REDACT] + elif CLEANUP_GUESTINFO in self.metadata: + # This is for backwards compatibility. + keys_to_redact = self.metadata[CLEANUP_GUESTINFO] + + if self.data_access_method == DATA_ACCESS_METHOD_GUESTINFO: + guestinfo_redact_keys(keys_to_redact, self.vmware_rpctool) + + +def decode(key, enc_type, data): + """ + decode returns the decoded string value of data + key is a string used to identify the data being decoded in log messages + """ + LOG.debug("Getting encoded data for key=%s, enc=%s", key, enc_type) + + raw_data = None + if enc_type in ["gzip+base64", "gz+b64"]: + LOG.debug("Decoding %s format %s", enc_type, key) + raw_data = util.decomp_gzip(util.b64d(data)) + elif enc_type in ["base64", "b64"]: + LOG.debug("Decoding %s format %s", enc_type, key) + raw_data = util.b64d(data) + else: + LOG.debug("Plain-text data %s", key) + raw_data = data + + return util.decode_binary(raw_data) + + +def get_none_if_empty_val(val): + """ + get_none_if_empty_val returns None if the provided value, once stripped + of its trailing whitespace, is empty or equal to GUESTINFO_EMPTY_YAML_VAL. + + The return value is always a string, regardless of whether the input is + a bytes class or a string. + """ + + # If the provided value is a bytes class, convert it to a string to + # simplify the rest of this function's logic. + val = util.decode_binary(val) + val = val.rstrip() + if len(val) == 0 or val == GUESTINFO_EMPTY_YAML_VAL: + return None + return val + + +def advertise_local_ip_addrs(host_info): + """ + advertise_local_ip_addrs gets the local IP address information from + the provided host_info map and sets the addresses in the guestinfo + namespace + """ + if not host_info: + return + + # Reflect any possible local IPv4 or IPv6 addresses in the guest + # info. + local_ipv4 = host_info.get(LOCAL_IPV4) + if local_ipv4: + guestinfo_set_value(LOCAL_IPV4, local_ipv4) + LOG.info("advertised local ipv4 address %s in guestinfo", local_ipv4) + + local_ipv6 = host_info.get(LOCAL_IPV6) + if local_ipv6: + guestinfo_set_value(LOCAL_IPV6, local_ipv6) + LOG.info("advertised local ipv6 address %s in guestinfo", local_ipv6) + + +def handle_returned_guestinfo_val(key, val): + """ + handle_returned_guestinfo_val returns the provided value if it is + not empty or set to GUESTINFO_EMPTY_YAML_VAL, otherwise None is + returned + """ + val = get_none_if_empty_val(val) + if val: + return val + LOG.debug("No value found for key %s", key) + return None + + +def get_guestinfo_key_name(key): + return "guestinfo." + key + + +def get_guestinfo_envvar_key_name(key): + return ("vmx." + get_guestinfo_key_name(key)).upper().replace(".", "_", -1) + + +def guestinfo_envvar(key): + val = guestinfo_envvar_get_value(key) + if not val: + return None + enc_type = guestinfo_envvar_get_value(key + ".encoding") + return decode(get_guestinfo_envvar_key_name(key), enc_type, val) + + +def guestinfo_envvar_get_value(key): + env_key = get_guestinfo_envvar_key_name(key) + return handle_returned_guestinfo_val(key, os.environ.get(env_key, "")) + + +def guestinfo(key, vmware_rpctool=VMWARE_RPCTOOL): + """ + guestinfo returns the guestinfo value for the provided key, decoding + the value when required + """ + val = guestinfo_get_value(key, vmware_rpctool) + if not val: + return None + enc_type = guestinfo_get_value(key + ".encoding", vmware_rpctool) + return decode(get_guestinfo_key_name(key), enc_type, val) + + +def guestinfo_get_value(key, vmware_rpctool=VMWARE_RPCTOOL): + """ + Returns a guestinfo value for the specified key. + """ + LOG.debug("Getting guestinfo value for key %s", key) + + try: + (stdout, stderr) = subp( + [ + vmware_rpctool, + "info-get " + get_guestinfo_key_name(key), + ] + ) + if stderr == NOVAL: + LOG.debug("No value found for key %s", key) + elif not stdout: + LOG.error("Failed to get guestinfo value for key %s", key) + return handle_returned_guestinfo_val(key, stdout) + except ProcessExecutionError as error: + if error.stderr == NOVAL: + LOG.debug("No value found for key %s", key) + else: + util.logexc( + LOG, + "Failed to get guestinfo value for key %s: %s", + key, + error, + ) + except Exception: + util.logexc( + LOG, + "Unexpected error while trying to get " + + "guestinfo value for key %s", + key, + ) + + return None + + +def guestinfo_set_value(key, value, vmware_rpctool=VMWARE_RPCTOOL): + """ + Sets a guestinfo value for the specified key. Set value to an empty string + to clear an existing guestinfo key. + """ + + # If value is an empty string then set it to a single space as it is not + # possible to set a guestinfo key to an empty string. Setting a guestinfo + # key to a single space is as close as it gets to clearing an existing + # guestinfo key. + if value == "": + value = " " + + LOG.debug("Setting guestinfo key=%s to value=%s", key, value) + + try: + subp( + [ + vmware_rpctool, + ("info-set %s %s" % (get_guestinfo_key_name(key), value)), + ] + ) + return True + except ProcessExecutionError as error: + util.logexc( + LOG, + "Failed to set guestinfo key=%s to value=%s: %s", + key, + value, + error, + ) + except Exception: + util.logexc( + LOG, + "Unexpected error while trying to set " + + "guestinfo key=%s to value=%s", + key, + value, + ) + + return None + + +def guestinfo_redact_keys(keys, vmware_rpctool=VMWARE_RPCTOOL): + """ + guestinfo_redact_keys redacts guestinfo of all of the keys in the given + list. each key will have its value set to "---". Since the value is valid + YAML, cloud-init can still read it if it tries. + """ + if not keys: + return + if not type(keys) in (list, tuple): + keys = [keys] + for key in keys: + key_name = get_guestinfo_key_name(key) + LOG.info("clearing %s", key_name) + if not guestinfo_set_value( + key, GUESTINFO_EMPTY_YAML_VAL, vmware_rpctool + ): + LOG.error("failed to clear %s", key_name) + LOG.info("clearing %s.encoding", key_name) + if not guestinfo_set_value(key + ".encoding", "", vmware_rpctool): + LOG.error("failed to clear %s.encoding", key_name) + + +def load_json_or_yaml(data): + """ + load first attempts to unmarshal the provided data as JSON, and if + that fails then attempts to unmarshal the data as YAML. If data is + None then a new dictionary is returned. + """ + if not data: + return {} + try: + return util.load_json(data) + except (json.JSONDecodeError, TypeError): + return util.load_yaml(data) + + +def process_metadata(data): + """ + process_metadata processes metadata and loads the optional network + configuration. + """ + network = None + if "network" in data: + network = data["network"] + del data["network"] + + network_enc = None + if "network.encoding" in data: + network_enc = data["network.encoding"] + del data["network.encoding"] + + if network: + if isinstance(network, collections.abc.Mapping): + LOG.debug("network data copied to 'config' key") + network = {"config": copy.deepcopy(network)} + else: + LOG.debug("network data to be decoded %s", network) + dec_net = decode("metadata.network", network_enc, network) + network = { + "config": load_json_or_yaml(dec_net), + } + + LOG.debug("network data %s", network) + data["network"] = network + + return data + + +# Used to match classes to dependencies +datasources = [ + (DataSourceVMware, (sources.DEP_FILESYSTEM,)), # Run at init-local + (DataSourceVMware, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +def get_datasource_list(depends): + """ + Return a list of data sources that match this set of dependencies + """ + return sources.list_from_depends(depends, datasources) + + +def get_default_ip_addrs(): + """ + Returns the default IPv4 and IPv6 addresses based on the device(s) used for + the default route. Please note that None may be returned for either address + family if that family has no default route or if there are multiple + addresses associated with the device used by the default route for a given + address. + """ + # TODO(promote and use netifaces in cloudinit.net* modules) + gateways = netifaces.gateways() + if "default" not in gateways: + return None, None + + default_gw = gateways["default"] + if ( + netifaces.AF_INET not in default_gw + and netifaces.AF_INET6 not in default_gw + ): + return None, None + + ipv4 = None + ipv6 = None + + gw4 = default_gw.get(netifaces.AF_INET) + if gw4: + _, dev4 = gw4 + addr4_fams = netifaces.ifaddresses(dev4) + if addr4_fams: + af_inet4 = addr4_fams.get(netifaces.AF_INET) + if af_inet4: + if len(af_inet4) > 1: + LOG.warning( + "device %s has more than one ipv4 address: %s", + dev4, + af_inet4, + ) + elif "addr" in af_inet4[0]: + ipv4 = af_inet4[0]["addr"] + + # Try to get the default IPv6 address by first seeing if there is a default + # IPv6 route. + gw6 = default_gw.get(netifaces.AF_INET6) + if gw6: + _, dev6 = gw6 + addr6_fams = netifaces.ifaddresses(dev6) + if addr6_fams: + af_inet6 = addr6_fams.get(netifaces.AF_INET6) + if af_inet6: + if len(af_inet6) > 1: + LOG.warning( + "device %s has more than one ipv6 address: %s", + dev6, + af_inet6, + ) + elif "addr" in af_inet6[0]: + ipv6 = af_inet6[0]["addr"] + + # If there is a default IPv4 address but not IPv6, then see if there is a + # single IPv6 address associated with the same device associated with the + # default IPv4 address. + if ipv4 and not ipv6: + af_inet6 = addr4_fams.get(netifaces.AF_INET6) + if af_inet6: + if len(af_inet6) > 1: + LOG.warning( + "device %s has more than one ipv6 address: %s", + dev4, + af_inet6, + ) + elif "addr" in af_inet6[0]: + ipv6 = af_inet6[0]["addr"] + + # If there is a default IPv6 address but not IPv4, then see if there is a + # single IPv4 address associated with the same device associated with the + # default IPv6 address. + if not ipv4 and ipv6: + af_inet4 = addr6_fams.get(netifaces.AF_INET) + if af_inet4: + if len(af_inet4) > 1: + LOG.warning( + "device %s has more than one ipv4 address: %s", + dev6, + af_inet4, + ) + elif "addr" in af_inet4[0]: + ipv4 = af_inet4[0]["addr"] + + return ipv4, ipv6 + + +# patched socket.getfqdn() - see https://bugs.python.org/issue5004 + + +def getfqdn(name=""): + """Get fully qualified domain name from name. + An empty argument is interpreted as meaning the local host. + """ + # TODO(may want to promote this function to util.getfqdn) + # TODO(may want to extend util.get_hostname to accept fqdn=True param) + name = name.strip() + if not name or name == "0.0.0.0": + name = util.get_hostname() + try: + addrs = socket.getaddrinfo( + name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME + ) + except socket.error: + pass + else: + for addr in addrs: + if addr[3]: + name = addr[3] + break + return name + + +def is_valid_ip_addr(val): + """ + Returns false if the address is loopback, link local or unspecified; + otherwise true is returned. + """ + # TODO(extend cloudinit.net.is_ip_addr exclude link_local/loopback etc) + # TODO(migrate to use cloudinit.net.is_ip_addr)# + + addr = None + try: + addr = ipaddress.ip_address(val) + except ipaddress.AddressValueError: + addr = ipaddress.ip_address(str(val)) + except Exception: + return None + + if addr.is_link_local or addr.is_loopback or addr.is_unspecified: + return False + return True + + +def get_host_info(): + """ + Returns host information such as the host name and network interfaces. + """ + # TODO(look to promote netifices use up in cloud-init netinfo funcs) + host_info = { + "network": { + "interfaces": { + "by-mac": collections.OrderedDict(), + "by-ipv4": collections.OrderedDict(), + "by-ipv6": collections.OrderedDict(), + }, + }, + } + hostname = getfqdn(util.get_hostname()) + if hostname: + host_info["hostname"] = hostname + host_info["local-hostname"] = hostname + host_info["local_hostname"] = hostname + + default_ipv4, default_ipv6 = get_default_ip_addrs() + if default_ipv4: + host_info[LOCAL_IPV4] = default_ipv4 + if default_ipv6: + host_info[LOCAL_IPV6] = default_ipv6 + + by_mac = host_info["network"]["interfaces"]["by-mac"] + by_ipv4 = host_info["network"]["interfaces"]["by-ipv4"] + by_ipv6 = host_info["network"]["interfaces"]["by-ipv6"] + + ifaces = netifaces.interfaces() + for dev_name in ifaces: + addr_fams = netifaces.ifaddresses(dev_name) + af_link = addr_fams.get(netifaces.AF_LINK) + af_inet4 = addr_fams.get(netifaces.AF_INET) + af_inet6 = addr_fams.get(netifaces.AF_INET6) + + mac = None + if af_link and "addr" in af_link[0]: + mac = af_link[0]["addr"] + + # Do not bother recording localhost + if mac == "00:00:00:00:00:00": + continue + + if mac and (af_inet4 or af_inet6): + key = mac + val = {} + if af_inet4: + af_inet4_vals = [] + for ip_info in af_inet4: + if not is_valid_ip_addr(ip_info["addr"]): + continue + af_inet4_vals.append(ip_info) + val["ipv4"] = af_inet4_vals + if af_inet6: + af_inet6_vals = [] + for ip_info in af_inet6: + if not is_valid_ip_addr(ip_info["addr"]): + continue + af_inet6_vals.append(ip_info) + val["ipv6"] = af_inet6_vals + by_mac[key] = val + + if af_inet4: + for ip_info in af_inet4: + key = ip_info["addr"] + if not is_valid_ip_addr(key): + continue + val = copy.deepcopy(ip_info) + del val["addr"] + if mac: + val["mac"] = mac + by_ipv4[key] = val + + if af_inet6: + for ip_info in af_inet6: + key = ip_info["addr"] + if not is_valid_ip_addr(key): + continue + val = copy.deepcopy(ip_info) + del val["addr"] + if mac: + val["mac"] = mac + by_ipv6[key] = val + + return host_info + + +def wait_on_network(metadata): + # Determine whether we need to wait on the network coming online. + wait_on_ipv4 = False + wait_on_ipv6 = False + if WAIT_ON_NETWORK in metadata: + wait_on_network = metadata[WAIT_ON_NETWORK] + if WAIT_ON_NETWORK_IPV4 in wait_on_network: + wait_on_ipv4_val = wait_on_network[WAIT_ON_NETWORK_IPV4] + if isinstance(wait_on_ipv4_val, bool): + wait_on_ipv4 = wait_on_ipv4_val + else: + wait_on_ipv4 = util.translate_bool(wait_on_ipv4_val) + if WAIT_ON_NETWORK_IPV6 in wait_on_network: + wait_on_ipv6_val = wait_on_network[WAIT_ON_NETWORK_IPV6] + if isinstance(wait_on_ipv6_val, bool): + wait_on_ipv6 = wait_on_ipv6_val + else: + wait_on_ipv6 = util.translate_bool(wait_on_ipv6_val) + + # Get information about the host. + host_info = None + while host_info is None: + # This loop + sleep results in two logs every second while waiting + # for either ipv4 or ipv6 up. Do we really need to log each iteration + # or can we log once and log on successful exit? + host_info = get_host_info() + + network = host_info.get("network") or {} + interfaces = network.get("interfaces") or {} + by_ipv4 = interfaces.get("by-ipv4") or {} + by_ipv6 = interfaces.get("by-ipv6") or {} + + if wait_on_ipv4: + ipv4_ready = len(by_ipv4) > 0 if by_ipv4 else False + if not ipv4_ready: + host_info = None + + if wait_on_ipv6: + ipv6_ready = len(by_ipv6) > 0 if by_ipv6 else False + if not ipv6_ready: + host_info = None + + if host_info is None: + LOG.debug( + "waiting on network: wait4=%s, ready4=%s, wait6=%s, ready6=%s", + wait_on_ipv4, + ipv4_ready, + wait_on_ipv6, + ipv6_ready, + ) + time.sleep(1) + + LOG.debug("waiting on network complete") + return host_info + + +def main(): + """ + Executed when this file is used as a program. + """ + try: + logging.setupBasicLogging() + except Exception: + pass + metadata = { + "wait-on-network": {"ipv4": True, "ipv6": "false"}, + "network": {"config": {"dhcp": True}}, + } + host_info = wait_on_network(metadata) + metadata = util.mergemanydict([metadata, host_info]) + print(util.json_dumps(metadata)) + + +if __name__ == "__main__": + main() + +# vi: ts=4 expandtab diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index e0644534..71827177 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -67,5 +67,6 @@ Additionally, cloud-init is supported on these private clouds: - LXD - KVM - Metal-as-a-Service (MAAS) +- VMware .. vi: textwidth=79 diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 497b1467..f5aee1c2 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -50,6 +50,7 @@ The following is a list of documents for each supported datasource: datasources/upcloud.rst datasources/zstack.rst datasources/vultr.rst + datasources/vmware.rst Creation ======== diff --git a/doc/rtd/topics/datasources/vmware.rst b/doc/rtd/topics/datasources/vmware.rst new file mode 100644 index 00000000..996eb61f --- /dev/null +++ b/doc/rtd/topics/datasources/vmware.rst @@ -0,0 +1,359 @@ +.. _datasource_vmware: + +VMware +====== + +This datasource is for use with systems running on a VMware platform such as +vSphere and currently supports the following data transports: + + +* `GuestInfo `_ keys + +Configuration +------------- + +The configuration method is dependent upon the transport: + +GuestInfo Keys +^^^^^^^^^^^^^^ + +One method of providing meta, user, and vendor data is by setting the following +key/value pairs on a VM's ``extraConfig`` `property `_ : + +.. list-table:: + :header-rows: 1 + + * - Property + - Description + * - ``guestinfo.metadata`` + - A YAML or JSON document containing the cloud-init metadata. + * - ``guestinfo.metadata.encoding`` + - The encoding type for ``guestinfo.metadata``. + * - ``guestinfo.userdata`` + - A YAML document containing the cloud-init user data. + * - ``guestinfo.userdata.encoding`` + - The encoding type for ``guestinfo.userdata``. + * - ``guestinfo.vendordata`` + - A YAML document containing the cloud-init vendor data. + * - ``guestinfo.vendordata.encoding`` + - The encoding type for ``guestinfo.vendordata``. + + +All ``guestinfo.*.encoding`` values may be set to ``base64`` or +``gzip+base64``. + +Features +-------- + +This section reviews several features available in this datasource, regardless +of how the meta, user, and vendor data was discovered. + +Instance data and lazy networks +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +One of the hallmarks of cloud-init is `its use of instance-data and JINJA +queries <../instancedata.html#using-instance-data>`_ +-- the ability to write queries in user and vendor data that reference runtime +information present in ``/run/cloud-init/instance-data.json``. This works well +when the metadata provides all of the information up front, such as the network +configuration. For systems that rely on DHCP, however, this information may not +be available when the metadata is persisted to disk. + +This datasource ensures that even if the instance is using DHCP to configure +networking, the same details about the configured network are available in +``/run/cloud-init/instance-data.json`` as if static networking was used. This +information collected at runtime is easy to demonstrate by executing the +datasource on the command line. From the root of this repository, run the +following command: + +.. code-block:: bash + + PYTHONPATH="$(pwd)" python3 cloudinit/sources/DataSourceVMware.py + +The above command will result in output similar to the below JSON: + +.. code-block:: json + + { + "hostname": "akutz.localhost", + "local-hostname": "akutz.localhost", + "local-ipv4": "192.168.0.188", + "local_hostname": "akutz.localhost", + "network": { + "config": { + "dhcp": true + }, + "interfaces": { + "by-ipv4": { + "172.0.0.2": { + "netmask": "255.255.255.255", + "peer": "172.0.0.2" + }, + "192.168.0.188": { + "broadcast": "192.168.0.255", + "mac": "64:4b:f0:18:9a:21", + "netmask": "255.255.255.0" + } + }, + "by-ipv6": { + "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2": { + "flags": 208, + "mac": "64:4b:f0:18:9a:21", + "netmask": "ffff:ffff:ffff:ffff::/64" + } + }, + "by-mac": { + "64:4b:f0:18:9a:21": { + "ipv4": [ + { + "addr": "192.168.0.188", + "broadcast": "192.168.0.255", + "netmask": "255.255.255.0" + } + ], + "ipv6": [ + { + "addr": "fd8e:d25e:c5b6:1:1f5:b2fd:8973:22f2", + "flags": 208, + "netmask": "ffff:ffff:ffff:ffff::/64" + } + ] + }, + "ac:de:48:00:11:22": { + "ipv6": [] + } + } + } + }, + "wait-on-network": { + "ipv4": true, + "ipv6": "false" + } + } + + +Redacting sensitive information +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Sometimes the cloud-init userdata might contain sensitive information, and it +may be desirable to have the ``guestinfo.userdata`` key (or other guestinfo +keys) redacted as soon as its data is read by the datasource. This is possible +by adding the following to the metadata: + +.. code-block:: yaml + + redact: # formerly named cleanup-guestinfo, which will also work + - userdata + - vendordata + +When the above snippet is added to the metadata, the datasource will iterate +over the elements in the ``redact`` array and clear each of the keys. For +example, when the guestinfo transport is used, the above snippet will cause +the following commands to be executed: + +.. code-block:: shell + + vmware-rpctool "info-set guestinfo.userdata ---" + vmware-rpctool "info-set guestinfo.userdata.encoding " + vmware-rpctool "info-set guestinfo.vendordata ---" + vmware-rpctool "info-set guestinfo.vendordata.encoding " + +Please note that keys are set to the valid YAML string ``---`` as it is not +possible remove an existing key from the guestinfo key-space. A key's analogous +encoding property will be set to a single white-space character, causing the +datasource to treat the actual key value as plain-text, thereby loading it as +an empty YAML doc (hence the aforementioned ``---``\ ). + +Reading the local IP addresses +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This datasource automatically discovers the local IPv4 and IPv6 addresses for +a guest operating system based on the default routes. However, when inspecting +a VM externally, it's not possible to know what the *default* IP address is for +the guest OS. That's why this datasource sets the discovered, local IPv4 and +IPv6 addresses back in the guestinfo namespace as the following keys: + + +* ``guestinfo.local-ipv4`` +* ``guestinfo.local-ipv6`` + +It is possible that a host may not have any default, local IP addresses. It's +also possible the reported, local addresses are link-local addresses. But these +two keys may be used to discover what this datasource determined were the local +IPv4 and IPv6 addresses for a host. + +Waiting on the network +^^^^^^^^^^^^^^^^^^^^^^ + +Sometimes cloud-init may bring up the network, but it will not finish coming +online before the datasource's ``setup`` function is called, resulting in an +``/var/run/cloud-init/instance-data.json`` file that does not have the correct +network information. It is possible to instruct the datasource to wait until an +IPv4 or IPv6 address is available before writing the instance data with the +following metadata properties: + +.. code-block:: yaml + + wait-on-network: + ipv4: true + ipv6: true + +If either of the above values are true, then the datasource will sleep for a +second, check the network status, and repeat until one or both addresses from +the specified families are available. + +Walkthrough +----------- + +The following series of steps is a demonstration on how to configure a VM with +this datasource: + + +#. Create the metadata file for the VM. Save the following YAML to a file named + ``metadata.yaml``\ : + + .. code-block:: yaml + + instance-id: cloud-vm + local-hostname: cloud-vm + network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes + +#. Create the userdata file ``userdata.yaml``\ : + + .. code-block:: yaml + + #cloud-config + + users: + - default + - name: akutz + primary_group: akutz + sudo: ALL=(ALL) NOPASSWD:ALL + groups: sudo, wheel + ssh_import_id: None + lock_passwd: true + ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDE0c5FczvcGSh/tG4iw+Fhfi/O5/EvUM/96js65tly4++YTXK1d9jcznPS5ruDlbIZ30oveCBd3kT8LLVFwzh6hepYTf0YmCTpF4eDunyqmpCXDvVscQYRXyasEm5olGmVe05RrCJSeSShAeptv4ueIn40kZKOghinGWLDSZG4+FFfgrmcMCpx5YSCtX2gvnEYZJr0czt4rxOZuuP7PkJKgC/mt2PcPjooeX00vAj81jjU2f3XKrjjz2u2+KIt9eba+vOQ6HiC8c2IzRkUAJ5i1atLy8RIbejo23+0P4N2jjk17QySFOVHwPBDTYb0/0M/4ideeU74EN/CgVsvO6JrLsPBR4dojkV5qNbMNxIVv5cUwIy2ThlLgqpNCeFIDLCWNZEFKlEuNeSQ2mPtIO7ETxEL2Cz5y/7AIuildzYMc6wi2bofRC8HmQ7rMXRWdwLKWsR0L7SKjHblIwarxOGqLnUI+k2E71YoP7SZSlxaKi17pqkr0OMCF+kKqvcvHAQuwGqyumTEWOlH6TCx1dSPrW+pVCZSHSJtSTfDW2uzL6y8k10MT06+pVunSrWo5LHAXcS91htHV1M1UrH/tZKSpjYtjMb5+RonfhaFRNzvj7cCE1f3Kp8UVqAdcGBTtReoE8eRUT63qIxjw03a7VwAyB2w+9cu1R9/vAo8SBeRqw== sakutz@gmail.com + +#. Please note this step requires that the VM be powered off. All of the + commands below use the VMware CLI tool, `govc `_. + + Go ahead and assign the path to the VM to the environment variable ``VM``\ : + + .. code-block:: shell + + export VM="/inventory/path/to/the/vm" + +#. Power off the VM: + + .. raw:: html + +
+ + ⚠️ First Boot Mode + + To ensure the next power-on operation results in a first-boot scenario for + cloud-init, it may be necessary to run the following command just before + powering off the VM: + + .. code-block:: bash + + cloud-init clean + + Otherwise cloud-init may not run in first-boot mode. For more information + on how the boot mode is determined, please see the + `First Boot Documentation <../boot.html#first-boot-determination>`_. + + .. raw:: html + +
+ + .. code-block:: shell + + govc vm.power -off "${VM}" + +#. + Export the environment variables that contain the cloud-init metadata and + userdata: + + .. code-block:: shell + + export METADATA=$(gzip -c9 /dev/null || base64; }) \ + USERDATA=$(gzip -c9 /dev/null || base64; }) + +#. + Assign the metadata and userdata to the VM: + + .. code-block:: shell + + govc vm.change -vm "${VM}" \ + -e guestinfo.metadata="${METADATA}" \ + -e guestinfo.metadata.encoding="gzip+base64" \ + -e guestinfo.userdata="${USERDATA}" \ + -e guestinfo.userdata.encoding="gzip+base64" + + Please note the above commands include specifying the encoding for the + properties. This is important as it informs the datasource how to decode + the data for cloud-init. Valid values for ``metadata.encoding`` and + ``userdata.encoding`` include: + + + * ``base64`` + * ``gzip+base64`` + +#. + Power on the VM: + + .. code-block:: shell + + govc vm.power -vm "${VM}" -on + +If all went according to plan, the CentOS box is: + +* Locked down, allowing SSH access only for the user in the userdata +* Configured for a dynamic IP address via DHCP +* Has a hostname of ``cloud-vm`` + +Examples +-------- + +This section reviews common configurations: + +Setting the hostname +^^^^^^^^^^^^^^^^^^^^ + +The hostname is set by way of the metadata key ``local-hostname``. + +Setting the instance ID +^^^^^^^^^^^^^^^^^^^^^^^ + +The instance ID may be set by way of the metadata key ``instance-id``. However, +if this value is absent then then the instance ID is read from the file +``/sys/class/dmi/id/product_uuid``. + +Providing public SSH keys +^^^^^^^^^^^^^^^^^^^^^^^^^ + +The public SSH keys may be set by way of the metadata key ``public-keys-data``. +Each newline-terminated string will be interpreted as a separate SSH public +key, which will be placed in distro's default user's +``~/.ssh/authorized_keys``. If the value is empty or absent, then nothing will +be written to ``~/.ssh/authorized_keys``. + +Configuring the network +^^^^^^^^^^^^^^^^^^^^^^^ + +The network is configured by setting the metadata key ``network`` with a value +consistent with Network Config Versions +`1 <../network-config-format-v1.html>`_ or +`2 <../network-config-format-v2.html>`_\ , depending on the Linux +distro's version of cloud-init. + +The metadata key ``network.encoding`` may be used to indicate the format of +the metadata key "network". Valid encodings are ``base64`` and ``gzip+base64``. diff --git a/requirements.txt b/requirements.txt index 5817da3b..41d01d62 100644 --- a/requirements.txt +++ b/requirements.txt @@ -32,3 +32,12 @@ jsonpatch # For validating cloud-config sections per schema definitions jsonschema + +# Used by DataSourceVMware to inspect the host's network configuration during +# the "setup()" function. +# +# This allows a host that uses DHCP to bring up the network during BootLocal +# and still participate in instance-data by gathering the network in detail at +# runtime and merge that information into the metadata and repersist that to +# disk. +netifaces>=0.10.9 diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 5e9c547a..00f0a78c 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -29,6 +29,7 @@ from cloudinit.sources import ( DataSourceSmartOS as SmartOS, DataSourceUpCloud as UpCloud, DataSourceVultr as Vultr, + DataSourceVMware as VMware, ) from cloudinit.sources import DataSourceNone as DSNone @@ -52,6 +53,7 @@ DEFAULT_LOCAL = [ RbxCloud.DataSourceRbxCloud, Scaleway.DataSourceScaleway, UpCloud.DataSourceUpCloudLocal, + VMware.DataSourceVMware, ] DEFAULT_NETWORK = [ @@ -68,6 +70,7 @@ DEFAULT_NETWORK = [ OpenStack.DataSourceOpenStack, OVF.DataSourceOVFNet, UpCloud.DataSourceUpCloud, + VMware.DataSourceVMware, ] diff --git a/tests/unittests/test_datasource/test_vmware.py b/tests/unittests/test_datasource/test_vmware.py new file mode 100644 index 00000000..597db7c8 --- /dev/null +++ b/tests/unittests/test_datasource/test_vmware.py @@ -0,0 +1,377 @@ +# Copyright (c) 2021 VMware, Inc. All Rights Reserved. +# +# Authors: Andrew Kutz +# +# This file is part of cloud-init. See LICENSE file for license information. + +import base64 +import gzip +from cloudinit import dmi, helpers, safeyaml +from cloudinit import settings +from cloudinit.sources import DataSourceVMware +from cloudinit.tests.helpers import ( + mock, + CiTestCase, + FilesystemMockingTestCase, + populate_dir, +) + +import os + +PRODUCT_NAME_FILE_PATH = "/sys/class/dmi/id/product_name" +PRODUCT_NAME = "VMware7,1" +PRODUCT_UUID = "82343CED-E4C7-423B-8F6B-0D34D19067AB" +REROOT_FILES = { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, +} + +VMW_MULTIPLE_KEYS = [ + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test1@vmw.com", + "ssh-rsa AAAAB3NzaC1yc2EAAAA... test2@vmw.com", +] +VMW_SINGLE_KEY = "ssh-rsa AAAAB3NzaC1yc2EAAAA... test@vmw.com" + +VMW_METADATA_YAML = """instance-id: cloud-vm +local-hostname: cloud-vm +network: + version: 2 + ethernets: + nics: + match: + name: ens* + dhcp4: yes +""" + +VMW_USERDATA_YAML = """## template: jinja +#cloud-config +users: +- default +""" + +VMW_VENDORDATA_YAML = """## template: jinja +#cloud-config +runcmd: +- echo "Hello, world." +""" + + +class TestDataSourceVMware(CiTestCase): + """ + Test common functionality that is not transport specific. + """ + + def setUp(self): + super(TestDataSourceVMware, self).setUp() + self.tmp = self.tmp_dir() + + def test_no_data_access_method(self): + ds = get_ds(self.tmp) + ds.vmware_rpctool = None + ret = ds.get_data() + self.assertFalse(ret) + + def test_get_host_info(self): + host_info = DataSourceVMware.get_host_info() + self.assertTrue(host_info) + self.assertTrue(host_info["hostname"]) + self.assertTrue(host_info["local-hostname"]) + self.assertTrue(host_info["local_hostname"]) + self.assertTrue(host_info[DataSourceVMware.LOCAL_IPV4]) + + +class TestDataSourceVMwareEnvVars(FilesystemMockingTestCase): + """ + Test the envvar transport. + """ + + def setUp(self): + super(TestDataSourceVMwareEnvVars, self).setUp() + self.tmp = self.tmp_dir() + os.environ[DataSourceVMware.VMX_GUESTINFO] = "1" + self.create_system_files() + + def tearDown(self): + del os.environ[DataSourceVMware.VMX_GUESTINFO] + return super(TestDataSourceVMwareEnvVars, self).tearDown() + + def create_system_files(self): + rootd = self.tmp_dir() + populate_dir( + rootd, + { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + }, + ) + self.assertTrue(self.reRoot(rootd)) + + def assert_get_data_ok(self, m_fn, m_fn_call_count=6): + ds = get_ds(self.tmp) + ds.vmware_rpctool = None + ret = ds.get_data() + self.assertTrue(ret) + self.assertEqual(m_fn_call_count, m_fn.call_count) + self.assertEqual( + ds.data_access_method, DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR + ) + return ds + + def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): + ds = self.assert_get_data_ok(m_fn, m_fn_call_count) + assert_metadata(self, ds, metadata) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_subplatform(self, m_fn): + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) + self.assertEqual( + ds.subplatform, + "%s (%s)" + % ( + DataSourceVMware.DATA_ACCESS_METHOD_ENVVAR, + DataSourceVMware.get_guestinfo_envvar_key_name("metadata"), + ), + ) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_only(self, m_fn): + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_userdata_only(self, m_fn): + m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_vendordata_only(self, m_fn): + m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_base64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_b64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_gzip_base64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gzip+base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_get_data_metadata_gz_b64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gz+b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_metadata_single_ssh_key(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_SINGLE_KEY + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + @mock.patch( + "cloudinit.sources.DataSourceVMware.guestinfo_envvar_get_value" + ) + def test_metadata_multiple_ssh_keys(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_MULTIPLE_KEYS + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + +class TestDataSourceVMwareGuestInfo(FilesystemMockingTestCase): + """ + Test the guestinfo transport on a VMware platform. + """ + + def setUp(self): + super(TestDataSourceVMwareGuestInfo, self).setUp() + self.tmp = self.tmp_dir() + self.create_system_files() + + def create_system_files(self): + rootd = self.tmp_dir() + populate_dir( + rootd, + { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + PRODUCT_NAME_FILE_PATH: PRODUCT_NAME, + }, + ) + self.assertTrue(self.reRoot(rootd)) + + def assert_get_data_ok(self, m_fn, m_fn_call_count=6): + ds = get_ds(self.tmp) + ds.vmware_rpctool = "vmware-rpctool" + ret = ds.get_data() + self.assertTrue(ret) + self.assertEqual(m_fn_call_count, m_fn.call_count) + self.assertEqual( + ds.data_access_method, + DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, + ) + return ds + + def assert_metadata(self, metadata, m_fn, m_fn_call_count=6): + ds = self.assert_get_data_ok(m_fn, m_fn_call_count) + assert_metadata(self, ds, metadata) + + def test_ds_valid_on_vmware_platform(self): + system_type = dmi.read_dmi_data("system-product-name") + self.assertEqual(system_type, PRODUCT_NAME) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_subplatform(self, m_fn): + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + ds = self.assert_get_data_ok(m_fn, m_fn_call_count=4) + self.assertEqual( + ds.subplatform, + "%s (%s)" + % ( + DataSourceVMware.DATA_ACCESS_METHOD_GUESTINFO, + DataSourceVMware.get_guestinfo_key_name("metadata"), + ), + ) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_userdata_only(self, m_fn): + m_fn.side_effect = ["", VMW_USERDATA_YAML, "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_vendordata_only(self, m_fn): + m_fn.side_effect = ["", "", VMW_VENDORDATA_YAML, ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_metadata_single_ssh_key(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_SINGLE_KEY + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_metadata_multiple_ssh_keys(self, m_fn): + metadata = DataSourceVMware.load_json_or_yaml(VMW_METADATA_YAML) + metadata["public_keys"] = VMW_MULTIPLE_KEYS + metadata_yaml = safeyaml.dumps(metadata) + m_fn.side_effect = [metadata_yaml, "", "", ""] + self.assert_metadata(metadata, m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_base64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_b64(self, m_fn): + data = base64.b64encode(VMW_METADATA_YAML.encode("utf-8")) + m_fn.side_effect = [data, "b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_gzip_base64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gzip+base64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_get_data_metadata_gz_b64(self, m_fn): + data = VMW_METADATA_YAML.encode("utf-8") + data = gzip.compress(data) + data = base64.b64encode(data) + m_fn.side_effect = [data, "gz+b64", "", ""] + self.assert_get_data_ok(m_fn, m_fn_call_count=4) + + +class TestDataSourceVMwareGuestInfo_InvalidPlatform(FilesystemMockingTestCase): + """ + Test the guestinfo transport on a non-VMware platform. + """ + + def setUp(self): + super(TestDataSourceVMwareGuestInfo_InvalidPlatform, self).setUp() + self.tmp = self.tmp_dir() + self.create_system_files() + + def create_system_files(self): + rootd = self.tmp_dir() + populate_dir( + rootd, + { + DataSourceVMware.PRODUCT_UUID_FILE_PATH: PRODUCT_UUID, + }, + ) + self.assertTrue(self.reRoot(rootd)) + + @mock.patch("cloudinit.sources.DataSourceVMware.guestinfo_get_value") + def test_ds_invalid_on_non_vmware_platform(self, m_fn): + system_type = dmi.read_dmi_data("system-product-name") + self.assertEqual(system_type, None) + + m_fn.side_effect = [VMW_METADATA_YAML, "", "", "", "", ""] + ds = get_ds(self.tmp) + ds.vmware_rpctool = "vmware-rpctool" + ret = ds.get_data() + self.assertFalse(ret) + + +def assert_metadata(test_obj, ds, metadata): + test_obj.assertEqual(metadata.get("instance-id"), ds.get_instance_id()) + test_obj.assertEqual(metadata.get("local-hostname"), ds.get_hostname()) + + expected_public_keys = metadata.get("public_keys") + if not isinstance(expected_public_keys, list): + expected_public_keys = [expected_public_keys] + + test_obj.assertEqual(expected_public_keys, ds.get_public_ssh_keys()) + test_obj.assertIsInstance(ds.get_public_ssh_keys(), list) + + +def get_ds(temp_dir): + ds = DataSourceVMware.DataSourceVMware( + settings.CFG_BUILTIN, None, helpers.Paths({"run_dir": temp_dir}) + ) + ds.vmware_rpctool = "vmware-rpctool" + return ds + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 1d8aaf18..8617d7bd 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -649,6 +649,50 @@ class TestDsIdentify(DsIdentifyBase): """EC2: bobrightbox.com in product_serial is not brightbox'""" self._test_ds_not_found('Ec2-E24Cloud-negative') + def test_vmware_no_valid_transports(self): + """VMware: no valid transports""" + self._test_ds_not_found('VMware-NoValidTransports') + + def test_vmware_envvar_no_data(self): + """VMware: envvar transport no data""" + self._test_ds_not_found('VMware-EnvVar-NoData') + + def test_vmware_envvar_no_virt_id(self): + """VMware: envvar transport success if no virt id""" + self._test_ds_found('VMware-EnvVar-NoVirtID') + + def test_vmware_envvar_activated_by_metadata(self): + """VMware: envvar transport activated by metadata""" + self._test_ds_found('VMware-EnvVar-Metadata') + + def test_vmware_envvar_activated_by_userdata(self): + """VMware: envvar transport activated by userdata""" + self._test_ds_found('VMware-EnvVar-Userdata') + + def test_vmware_envvar_activated_by_vendordata(self): + """VMware: envvar transport activated by vendordata""" + self._test_ds_found('VMware-EnvVar-Vendordata') + + def test_vmware_guestinfo_no_data(self): + """VMware: guestinfo transport no data""" + self._test_ds_not_found('VMware-GuestInfo-NoData') + + def test_vmware_guestinfo_no_virt_id(self): + """VMware: guestinfo transport fails if no virt id""" + self._test_ds_not_found('VMware-GuestInfo-NoVirtID') + + def test_vmware_guestinfo_activated_by_metadata(self): + """VMware: guestinfo transport activated by metadata""" + self._test_ds_found('VMware-GuestInfo-Metadata') + + def test_vmware_guestinfo_activated_by_userdata(self): + """VMware: guestinfo transport activated by userdata""" + self._test_ds_found('VMware-GuestInfo-Userdata') + + def test_vmware_guestinfo_activated_by_vendordata(self): + """VMware: guestinfo transport activated by vendordata""" + self._test_ds_found('VMware-GuestInfo-Vendordata') + class TestBSDNoSys(DsIdentifyBase): """Test *BSD code paths @@ -1136,7 +1180,240 @@ VALID_CFG = { 'Ec2-E24Cloud-negative': { 'ds': 'Ec2', 'files': {P_SYS_VENDOR: 'e24cloudyday\n'}, - } + }, + 'VMware-NoValidTransports': { + 'ds': 'VMware', + 'mocks': [ + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-EnvVar-NoData': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_envvar_vmx_guestinfo', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', + 'ret': 1, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', + 'ret': 1, + }, + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-EnvVar-NoVirtID': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_envvar_vmx_guestinfo', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', + 'ret': 1, + }, + ], + }, + 'VMware-EnvVar-Metadata': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_envvar_vmx_guestinfo', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', + 'ret': 1, + }, + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-EnvVar-Userdata': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_envvar_vmx_guestinfo', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', + 'ret': 1, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', + 'ret': 1, + }, + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-EnvVar-Vendordata': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_envvar_vmx_guestinfo', + 'ret': 0, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_metadata', + 'ret': 1, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_has_envvar_vmx_guestinfo_vendordata', + 'ret': 0, + }, + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-GuestInfo-NoData': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_rpctool', + 'ret': 0, + 'out': '/usr/bin/vmware-rpctool', + }, + { + 'name': 'vmware_rpctool_guestinfo_metadata', + 'ret': 1, + }, + { + 'name': 'vmware_rpctool_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_rpctool_guestinfo_vendordata', + 'ret': 1, + }, + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-GuestInfo-NoVirtID': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_rpctool', + 'ret': 0, + 'out': '/usr/bin/vmware-rpctool', + }, + { + 'name': 'vmware_rpctool_guestinfo_metadata', + 'ret': 0, + 'out': '---', + }, + { + 'name': 'vmware_rpctool_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_rpctool_guestinfo_vendordata', + 'ret': 1, + }, + ], + }, + 'VMware-GuestInfo-Metadata': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_rpctool', + 'ret': 0, + 'out': '/usr/bin/vmware-rpctool', + }, + { + 'name': 'vmware_rpctool_guestinfo_metadata', + 'ret': 0, + 'out': '---', + }, + { + 'name': 'vmware_rpctool_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_rpctool_guestinfo_vendordata', + 'ret': 1, + }, + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-GuestInfo-Userdata': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_rpctool', + 'ret': 0, + 'out': '/usr/bin/vmware-rpctool', + }, + { + 'name': 'vmware_rpctool_guestinfo_metadata', + 'ret': 1, + }, + { + 'name': 'vmware_rpctool_guestinfo_userdata', + 'ret': 0, + 'out': '---', + }, + { + 'name': 'vmware_rpctool_guestinfo_vendordata', + 'ret': 1, + }, + MOCK_VIRT_IS_VMWARE, + ], + }, + 'VMware-GuestInfo-Vendordata': { + 'ds': 'VMware', + 'mocks': [ + { + 'name': 'vmware_has_rpctool', + 'ret': 0, + 'out': '/usr/bin/vmware-rpctool', + }, + { + 'name': 'vmware_rpctool_guestinfo_metadata', + 'ret': 1, + }, + { + 'name': 'vmware_rpctool_guestinfo_userdata', + 'ret': 1, + }, + { + 'name': 'vmware_rpctool_guestinfo_vendordata', + 'ret': 0, + 'out': '---', + }, + MOCK_VIRT_IS_VMWARE, + ], + }, } # vi: ts=4 expandtab diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index 3c2c6d14..5089dd70 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -1,5 +1,6 @@ ader1990 ajmyyra +akutz AlexBaranowski Aman306 andrewbogott diff --git a/tools/ds-identify b/tools/ds-identify index 73e27c71..234ffa81 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -125,7 +125,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean Vultr AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud" +OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud UpCloud VMware" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -1364,6 +1364,80 @@ dscheck_Vultr() { return $DS_NOT_FOUND } +vmware_has_envvar_vmx_guestinfo() { + [ -n "${VMX_GUESTINFO:-}" ] +} + +vmware_has_envvar_vmx_guestinfo_metadata() { + [ -n "${VMX_GUESTINFO_METADATA:-}" ] +} + +vmware_has_envvar_vmx_guestinfo_userdata() { + [ -n "${VMX_GUESTINFO_USERDATA:-}" ] +} + +vmware_has_envvar_vmx_guestinfo_vendordata() { + [ -n "${VMX_GUESTINFO_VENDORDATA:-}" ] +} + +vmware_has_rpctool() { + command -v vmware-rpctool >/dev/null 2>&1 +} + +vmware_rpctool_guestinfo_metadata() { + vmware-rpctool "info-get guestinfo.metadata" +} + +vmware_rpctool_guestinfo_userdata() { + vmware-rpctool "info-get guestinfo.userdata" +} + +vmware_rpctool_guestinfo_vendordata() { + vmware-rpctool "info-get guestinfo.vendordata" +} + +dscheck_VMware() { + # Checks to see if there is valid data for the VMware datasource. + # The data transports are checked in the following order: + # + # * envvars + # * guestinfo + # + # Please note when updating this function with support for new data + # transports, the order should match the order in the _get_data + # function from the file DataSourceVMware.py. + + # Check to see if running in a container and the VMware + # datasource is configured via environment variables. + if vmware_has_envvar_vmx_guestinfo; then + if vmware_has_envvar_vmx_guestinfo_metadata || \ + vmware_has_envvar_vmx_guestinfo_userdata || \ + vmware_has_envvar_vmx_guestinfo_vendordata; then + return "${DS_FOUND}" + fi + fi + + # Do not proceed unless the detected platform is VMware. + if [ ! "${DI_VIRT}" = "vmware" ]; then + return "${DS_NOT_FOUND}" + fi + + # Do not proceed if the vmware-rpctool command is not present. + if ! vmware_has_rpctool; then + return "${DS_NOT_FOUND}" + fi + + # Activate the VMware datasource only if any of the fields used + # by the datasource are present in the guestinfo table. + if { vmware_rpctool_guestinfo_metadata || \ + vmware_rpctool_guestinfo_userdata || \ + vmware_rpctool_guestinfo_vendordata; } >/dev/null 2>&1; then + return "${DS_FOUND}" + fi + + return "${DS_NOT_FOUND}" +} + collect_info() { read_uname_info read_virt -- cgit v1.2.3 From 0bf8d575dc91b68084ef4d88869ac719b23924d0 Mon Sep 17 00:00:00 2001 From: sarahwzadara <83585923+sarahwzadara@users.noreply.github.com> Date: Mon, 16 Aug 2021 17:55:10 +0300 Subject: add Zadara Edge Cloud Platform to the supported clouds list (#963) --- doc/rtd/topics/availability.rst | 1 + tools/.github-cla-signers | 1 + 2 files changed, 2 insertions(+) (limited to 'doc/rtd/topics/availability.rst') diff --git a/doc/rtd/topics/availability.rst b/doc/rtd/topics/availability.rst index 71827177..d8ca9d16 100644 --- a/doc/rtd/topics/availability.rst +++ b/doc/rtd/topics/availability.rst @@ -59,6 +59,7 @@ environments in the public cloud: - SmartOS - UpCloud - Vultr +- Zadara Edge Cloud Platform Additionally, cloud-init is supported on these private clouds: diff --git a/tools/.github-cla-signers b/tools/.github-cla-signers index cf06ca3d..4fa108aa 100644 --- a/tools/.github-cla-signers +++ b/tools/.github-cla-signers @@ -51,6 +51,7 @@ onitake qubidt renanrodrigo riedel +sarahwzadara slyon smoser sshedi -- cgit v1.2.3