From f0bc02d7e221c9aa5982b267739481420c761ead Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 9 Oct 2018 21:46:35 +0000 Subject: instance-data: Add standard keys platform and subplatform. Refactor ec2. Add the following instance-data.json standardized keys: * v1._beta_keys: List any v1 keys in beta development, e.g. ['subplatform']. * v1.public_ssh_keys: List of any cloud-provided ssh keys for the instance. * v1.platform: String representing the cloud platform api supporting the datasource. For example: 'ec2' for aws, aliyun and brightbox cloud names. * v1.subplatform: String with more details about the source of the metadata consumed. For example, metadata uri, config drive device path or seed directory. To support the new platform and subplatform standardized instance-data, DataSource and its subclasses grew platform and subplatform attributes. The platform attribute defaults to the lowercase string datasource name at self.dsname. This method is overridden in NoCloud, Ec2 and ConfigDrive datasources. The subplatform attribute calls a _get_subplatform method which will return a string containing a simple slug for subplatform type such as metadata, seed-dir or config-drive followed by a detailed uri, device or directory path where the datasource consumed its configuration. As part of this work, DatasourceEC2 methods _get_data and _crawl_metadata have been refactored for a few reasons: - crawl_metadata is now a read-only operation, persisting no attributes on the datasource instance and returns a dictionary of consumed metadata. - crawl_metadata now closely represents the raw stucture of the ec2 metadata consumed, so that end-users can leverage public ec2 metadata documentation where possible. - crawl_metadata adds a '_metadata_api_version' key to the crawled ds.metadata to advertise what version of EC2's api was consumed by cloud-init. - _get_data now does all the processing of crawl_metadata and saves datasource instance attributes userdata_raw, metadata etc. Additional drive-bys: * unit test rework for test_altcloud and test_azure to simplify mocks and make use of existing util and test_helpers functions. --- cloudinit/sources/DataSourceAliYun.py | 20 ++--- cloudinit/sources/DataSourceAltCloud.py | 33 ++++++--- cloudinit/sources/DataSourceAzure.py | 8 ++ cloudinit/sources/DataSourceBigstep.py | 4 + cloudinit/sources/DataSourceCloudSigma.py | 6 +- cloudinit/sources/DataSourceConfigDrive.py | 12 +++ cloudinit/sources/DataSourceEc2.py | 115 +++++++++++++++-------------- cloudinit/sources/DataSourceIBMCloud.py | 4 + cloudinit/sources/DataSourceMAAS.py | 4 + cloudinit/sources/DataSourceNoCloud.py | 21 ++++++ cloudinit/sources/DataSourceNone.py | 4 + cloudinit/sources/DataSourceOVF.py | 6 ++ cloudinit/sources/DataSourceOpenNebula.py | 8 ++ cloudinit/sources/DataSourceOracle.py | 4 + cloudinit/sources/DataSourceSmartOS.py | 3 + cloudinit/sources/__init__.py | 98 ++++++++++++++++++------ cloudinit/sources/tests/test_init.py | 12 ++- cloudinit/sources/tests/test_oracle.py | 8 ++ 18 files changed, 264 insertions(+), 106 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 858e0827..45cc9f00 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -1,7 +1,5 @@ # This file is part of cloud-init. See LICENSE file for license information. -import os - from cloudinit import sources from cloudinit.sources import DataSourceEc2 as EC2 from cloudinit import util @@ -18,25 +16,17 @@ class DataSourceAliYun(EC2.DataSourceEc2): min_metadata_version = '2016-01-01' extended_metadata_versions = [] - def __init__(self, sys_cfg, distro, paths): - super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths) - self.seed_dir = os.path.join(paths.seed_dir, "AliYun") - def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): return self.metadata.get('hostname', 'localhost.localdomain') def get_public_ssh_keys(self): return parse_public_keys(self.metadata.get('public-keys', {})) - @property - def cloud_platform(self): - if self._cloud_platform is None: - if _is_aliyun(): - self._cloud_platform = EC2.Platforms.ALIYUN - else: - self._cloud_platform = EC2.Platforms.NO_EC2_METADATA - - return self._cloud_platform + def _get_cloud_name(self): + if _is_aliyun(): + return EC2.CloudNames.ALIYUN + else: + return EC2.CloudNames.NO_EC2_METADATA def _is_aliyun(): diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 8cd312d0..5270fda8 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -89,7 +89,9 @@ class DataSourceAltCloud(sources.DataSource): ''' Description: Get the type for the cloud back end this instance is running on - by examining the string returned by reading the dmi data. + by examining the string returned by reading either: + CLOUD_INFO_FILE or + the dmi data. Input: None @@ -99,7 +101,14 @@ class DataSourceAltCloud(sources.DataSource): 'RHEV', 'VSPHERE' or 'UNKNOWN' ''' - + if os.path.exists(CLOUD_INFO_FILE): + try: + cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper() + except IOError: + util.logexc(LOG, 'Unable to access cloud info file at %s.', + CLOUD_INFO_FILE) + return 'UNKNOWN' + return cloud_type system_name = util.read_dmi_data("system-product-name") if not system_name: return 'UNKNOWN' @@ -134,15 +143,7 @@ class DataSourceAltCloud(sources.DataSource): LOG.debug('Invoked get_data()') - if os.path.exists(CLOUD_INFO_FILE): - try: - cloud_type = util.load_file(CLOUD_INFO_FILE).strip().upper() - except IOError: - util.logexc(LOG, 'Unable to access cloud info file at %s.', - CLOUD_INFO_FILE) - return False - else: - cloud_type = self.get_cloud_type() + cloud_type = self.get_cloud_type() LOG.debug('cloud_type: %s', str(cloud_type)) @@ -161,6 +162,15 @@ class DataSourceAltCloud(sources.DataSource): util.logexc(LOG, 'Failed accessing user data.') return False + def _get_subplatform(self): + """Return the subplatform metadata details.""" + cloud_type = self.get_cloud_type() + if not hasattr(self, 'source'): + self.source = sources.METADATA_UNKNOWN + if cloud_type == 'RHEV': + self.source = '/dev/fd0' + return '%s (%s)' % (cloud_type.lower(), self.source) + def user_data_rhevm(self): ''' RHEVM specific userdata read @@ -232,6 +242,7 @@ class DataSourceAltCloud(sources.DataSource): try: return_str = util.mount_cb(cdrom_dev, read_user_data_callback) if return_str: + self.source = cdrom_dev break except OSError as err: if err.errno != errno.ENOENT: diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 783445e1..39391d01 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -351,6 +351,14 @@ class DataSourceAzure(sources.DataSource): metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) return metadata + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + if self.seed.startswith('/dev'): + subplatform_type = 'config-disk' + else: + subplatform_type = 'seed-dir' + return '%s (%s)' % (subplatform_type, self.seed) + def crawl_metadata(self): """Walk all instance metadata sources returning a dict on success. diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py index 699a85b5..52fff20a 100644 --- a/cloudinit/sources/DataSourceBigstep.py +++ b/cloudinit/sources/DataSourceBigstep.py @@ -36,6 +36,10 @@ class DataSourceBigstep(sources.DataSource): self.userdata_raw = decoded["userdata_raw"] return True + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + return 'metadata (%s)' % get_url_from_file() + def get_url_from_file(): try: diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index c816f349..2955d3f0 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -7,7 +7,7 @@ from base64 import b64decode import re -from cloudinit.cs_utils import Cepko +from cloudinit.cs_utils import Cepko, SERIAL_PORT from cloudinit import log as logging from cloudinit import sources @@ -84,6 +84,10 @@ class DataSourceCloudSigma(sources.DataSource): return True + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + return 'cepko (%s)' % SERIAL_PORT + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): """ Cleans up and uses the server's name if the latter is set. Otherwise diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 664dc4b7..564e3eb3 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -160,6 +160,18 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): LOG.debug("no network configuration available") return self._network_config + @property + def platform(self): + return 'openstack' + + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + if self.seed_dir in self.source: + subplatform_type = 'seed-dir' + elif self.source.startswith('/dev'): + subplatform_type = 'config-disk' + return '%s (%s)' % (subplatform_type, self.source) + def read_config_drive(source_dir): reader = openstack.ConfigDriveReader(source_dir) diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 968ab3f7..9ccf2cdc 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -28,18 +28,16 @@ STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") STRICT_ID_DEFAULT = "warn" -class Platforms(object): - # TODO Rename and move to cloudinit.cloud.CloudNames - ALIYUN = "AliYun" - AWS = "AWS" - BRIGHTBOX = "Brightbox" - SEEDED = "Seeded" +class CloudNames(object): + ALIYUN = "aliyun" + AWS = "aws" + BRIGHTBOX = "brightbox" # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', # then an attempt at the Ec2 Metadata service will be made. - UNKNOWN = "Unknown" + UNKNOWN = "unknown" # NO_EC2_METADATA indicates this platform does not have a Ec2 metadata # service available. No attempt at the Ec2 Metadata service will be made. - NO_EC2_METADATA = "No-EC2-Metadata" + NO_EC2_METADATA = "no-ec2-metadata" class DataSourceEc2(sources.DataSource): @@ -61,8 +59,6 @@ class DataSourceEc2(sources.DataSource): url_max_wait = 120 url_timeout = 50 - _cloud_platform = None - _network_config = sources.UNSET # Used to cache calculated network cfg v1 # Whether we want to get network configuration from the metadata service. @@ -71,30 +67,21 @@ class DataSourceEc2(sources.DataSource): def __init__(self, sys_cfg, distro, paths): super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) self.metadata_address = None - self.seed_dir = os.path.join(paths.seed_dir, "ec2") def _get_cloud_name(self): """Return the cloud name as identified during _get_data.""" - return self.cloud_platform + return identify_platform() def _get_data(self): - seed_ret = {} - if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): - self.userdata_raw = seed_ret['user-data'] - self.metadata = seed_ret['meta-data'] - LOG.debug("Using seeded ec2 data from %s", self.seed_dir) - self._cloud_platform = Platforms.SEEDED - return True - strict_mode, _sleep = read_strict_mode( util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT), ("warn", None)) - LOG.debug("strict_mode: %s, cloud_platform=%s", - strict_mode, self.cloud_platform) - if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN: + LOG.debug("strict_mode: %s, cloud_name=%s cloud_platform=%s", + strict_mode, self.cloud_name, self.platform) + if strict_mode == "true" and self.cloud_name == CloudNames.UNKNOWN: return False - elif self.cloud_platform == Platforms.NO_EC2_METADATA: + elif self.cloud_name == CloudNames.NO_EC2_METADATA: return False if self.perform_dhcp_setup: # Setup networking in init-local stage. @@ -103,13 +90,22 @@ class DataSourceEc2(sources.DataSource): return False try: with EphemeralDHCPv4(self.fallback_interface): - return util.log_time( + self._crawled_metadata = util.log_time( logfunc=LOG.debug, msg='Crawl of metadata service', - func=self._crawl_metadata) + func=self.crawl_metadata) except NoDHCPLeaseError: return False else: - return self._crawl_metadata() + self._crawled_metadata = util.log_time( + logfunc=LOG.debug, msg='Crawl of metadata service', + func=self.crawl_metadata) + if not self._crawled_metadata: + return False + self.metadata = self._crawled_metadata.get('meta-data', None) + self.userdata_raw = self._crawled_metadata.get('user-data', None) + self.identity = self._crawled_metadata.get( + 'dynamic', {}).get('instance-identity', {}).get('document', {}) + return True @property def launch_index(self): @@ -117,6 +113,15 @@ class DataSourceEc2(sources.DataSource): return None return self.metadata.get('ami-launch-index') + @property + def platform(self): + # Handle upgrade path of pickled ds + if not hasattr(self, '_platform_type'): + self._platform_type = DataSourceEc2.dsname.lower() + if not self._platform_type: + self._platform_type = DataSourceEc2.dsname.lower() + return self._platform_type + def get_metadata_api_version(self): """Get the best supported api version from the metadata service. @@ -144,7 +149,7 @@ class DataSourceEc2(sources.DataSource): return self.min_metadata_version def get_instance_id(self): - if self.cloud_platform == Platforms.AWS: + if self.cloud_name == CloudNames.AWS: # Prefer the ID from the instance identity document, but fall back if not getattr(self, 'identity', None): # If re-using cached datasource, it's get_data run didn't @@ -254,7 +259,7 @@ class DataSourceEc2(sources.DataSource): @property def availability_zone(self): try: - if self.cloud_platform == Platforms.AWS: + if self.cloud_name == CloudNames.AWS: return self.identity.get( 'availabilityZone', self.metadata['placement']['availability-zone']) @@ -265,7 +270,7 @@ class DataSourceEc2(sources.DataSource): @property def region(self): - if self.cloud_platform == Platforms.AWS: + if self.cloud_name == CloudNames.AWS: region = self.identity.get('region') # Fallback to trimming the availability zone if region is missing if self.availability_zone and not region: @@ -277,16 +282,10 @@ class DataSourceEc2(sources.DataSource): return az[:-1] return None - @property - def cloud_platform(self): # TODO rename cloud_name - if self._cloud_platform is None: - self._cloud_platform = identify_platform() - return self._cloud_platform - def activate(self, cfg, is_new_instance): if not is_new_instance: return - if self.cloud_platform == Platforms.UNKNOWN: + if self.cloud_name == CloudNames.UNKNOWN: warn_if_necessary( util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT), cfg) @@ -306,13 +305,13 @@ class DataSourceEc2(sources.DataSource): result = None no_network_metadata_on_aws = bool( 'network' not in self.metadata and - self.cloud_platform == Platforms.AWS) + self.cloud_name == CloudNames.AWS) if no_network_metadata_on_aws: LOG.debug("Metadata 'network' not present:" " Refreshing stale metadata from prior to upgrade.") util.log_time( logfunc=LOG.debug, msg='Re-crawl of metadata service', - func=self._crawl_metadata) + func=self.get_data) # Limit network configuration to only the primary/fallback nic iface = self.fallback_interface @@ -340,28 +339,32 @@ class DataSourceEc2(sources.DataSource): return super(DataSourceEc2, self).fallback_interface return self._fallback_interface - def _crawl_metadata(self): + def crawl_metadata(self): """Crawl metadata service when available. - @returns: True on success, False otherwise. + @returns: Dictionary of crawled metadata content containing the keys: + meta-data, user-data and dynamic. """ if not self.wait_for_metadata_service(): - return False + return {} api_version = self.get_metadata_api_version() + crawled_metadata = {} try: - self.userdata_raw = ec2.get_instance_userdata( + crawled_metadata['user-data'] = ec2.get_instance_userdata( api_version, self.metadata_address) - self.metadata = ec2.get_instance_metadata( + crawled_metadata['meta-data'] = ec2.get_instance_metadata( api_version, self.metadata_address) - if self.cloud_platform == Platforms.AWS: - self.identity = ec2.get_instance_identity( - api_version, self.metadata_address).get('document', {}) + if self.cloud_name == CloudNames.AWS: + identity = ec2.get_instance_identity( + api_version, self.metadata_address) + crawled_metadata['dynamic'] = {'instance-identity': identity} except Exception: util.logexc( LOG, "Failed reading from metadata address %s", self.metadata_address) - return False - return True + return {} + crawled_metadata['_metadata_api_version'] = api_version + return crawled_metadata class DataSourceEc2Local(DataSourceEc2): @@ -375,10 +378,10 @@ class DataSourceEc2Local(DataSourceEc2): perform_dhcp_setup = True # Use dhcp before querying metadata def get_data(self): - supported_platforms = (Platforms.AWS,) - if self.cloud_platform not in supported_platforms: + supported_platforms = (CloudNames.AWS,) + if self.cloud_name not in supported_platforms: LOG.debug("Local Ec2 mode only supported on %s, not %s", - supported_platforms, self.cloud_platform) + supported_platforms, self.cloud_name) return False return super(DataSourceEc2Local, self).get_data() @@ -439,20 +442,20 @@ def identify_aws(data): if (data['uuid'].startswith('ec2') and (data['uuid_source'] == 'hypervisor' or data['uuid'] == data['serial'])): - return Platforms.AWS + return CloudNames.AWS return None def identify_brightbox(data): if data['serial'].endswith('brightbox.com'): - return Platforms.BRIGHTBOX + return CloudNames.BRIGHTBOX def identify_platform(): - # identify the platform and return an entry in Platforms. + # identify the platform and return an entry in CloudNames. data = _collect_platform_data() - checks = (identify_aws, identify_brightbox, lambda x: Platforms.UNKNOWN) + checks = (identify_aws, identify_brightbox, lambda x: CloudNames.UNKNOWN) for checker in checks: try: result = checker(data) diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py index a5358148..21e6ae6b 100644 --- a/cloudinit/sources/DataSourceIBMCloud.py +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -157,6 +157,10 @@ class DataSourceIBMCloud(sources.DataSource): return True + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + return '%s (%s)' % (self.platform, self.source) + def check_instance_id(self, sys_cfg): """quickly (local check only) if self.instance_id is still valid diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index bcb38544..61aa6d7e 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -109,6 +109,10 @@ class DataSourceMAAS(sources.DataSource): LOG.warning("Invalid content in vendor-data: %s", e) self.vendordata_raw = None + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + return 'seed-dir (%s)' % self.base_url + def wait_for_metadata_service(self, url): mcfg = self.ds_cfg max_wait = 120 diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 2daea59d..9010f06c 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -186,6 +186,27 @@ class DataSourceNoCloud(sources.DataSource): self._network_eni = mydata['meta-data'].get('network-interfaces') return True + @property + def platform_type(self): + # Handle upgrade path of pickled ds + if not hasattr(self, '_platform_type'): + self._platform_type = None + if not self._platform_type: + self._platform_type = 'lxd' if util.is_lxd() else 'nocloud' + return self._platform_type + + def _get_cloud_name(self): + """Return unknown when 'cloud-name' key is absent from metadata.""" + return sources.METADATA_UNKNOWN + + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + if self.seed.startswith('/dev'): + subplatform_type = 'config-disk' + else: + subplatform_type = 'seed-dir' + return '%s (%s)' % (subplatform_type, self.seed) + def check_instance_id(self, sys_cfg): # quickly (local check only) if self.instance_id is still valid # we check kernel command line or files. diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py index e63a7e39..e6250801 100644 --- a/cloudinit/sources/DataSourceNone.py +++ b/cloudinit/sources/DataSourceNone.py @@ -28,6 +28,10 @@ class DataSourceNone(sources.DataSource): self.metadata = self.ds_cfg['metadata'] return True + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + return 'config' + def get_instance_id(self): return 'iid-datasource-none' diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 178ccb0f..045291e7 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -275,6 +275,12 @@ class DataSourceOVF(sources.DataSource): self.cfg = cfg return True + def _get_subplatform(self): + system_type = util.read_dmi_data("system-product-name").lower() + if system_type == 'vmware': + return 'vmware (%s)' % self.seed + return 'ovf (%s)' % self.seed + def get_public_ssh_keys(self): if 'public-keys' not in self.metadata: return [] diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 77ccd128..e62e9729 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -95,6 +95,14 @@ class DataSourceOpenNebula(sources.DataSource): self.userdata_raw = results.get('userdata') return True + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + if self.seed_dir in self.seed: + subplatform_type = 'seed-dir' + else: + subplatform_type = 'config-disk' + return '%s (%s)' % (subplatform_type, self.seed) + @property def network_config(self): if self.network is not None: diff --git a/cloudinit/sources/DataSourceOracle.py b/cloudinit/sources/DataSourceOracle.py index fab39af3..70b9c58a 100644 --- a/cloudinit/sources/DataSourceOracle.py +++ b/cloudinit/sources/DataSourceOracle.py @@ -91,6 +91,10 @@ class DataSourceOracle(sources.DataSource): def crawl_metadata(self): return read_metadata() + def _get_subplatform(self): + """Return the subplatform metadata source details.""" + return 'metadata (%s)' % METADATA_ENDPOINT + def check_instance_id(self, sys_cfg): """quickly check (local only) if self.instance_id is still valid diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 593ac91a..32b57cdd 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -303,6 +303,9 @@ class DataSourceSmartOS(sources.DataSource): self._set_provisioned() return True + def _get_subplatform(self): + return 'serial (%s)' % SERIAL_DEVICE + def device_name_to_device(self, name): return self.ds_cfg['disk_aliases'].get(name) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 5ac98826..9b90680f 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -54,6 +54,7 @@ REDACT_SENSITIVE_VALUE = 'redacted for non-root user' METADATA_CLOUD_NAME_KEY = 'cloud-name' UNSET = "_unset" +METADATA_UNKNOWN = 'unknown' LOG = logging.getLogger(__name__) @@ -133,6 +134,14 @@ class DataSource(object): # Cached cloud_name as determined by _get_cloud_name _cloud_name = None + # Cached cloud platform api type: e.g. ec2, openstack, kvm, lxd, azure etc. + _platform_type = None + + # More details about the cloud platform: + # - metadata (http://169.254.169.254/) + # - seed-dir () + _subplatform = None + # Track the discovered fallback nic for use in configuration generation. _fallback_interface = None @@ -192,21 +201,24 @@ class DataSource(object): local_hostname = self.get_hostname() instance_id = self.get_instance_id() availability_zone = self.availability_zone - cloud_name = self.cloud_name - # When adding new standard keys prefer underscore-delimited instead - # of hyphen-delimted to support simple variable references in jinja - # templates. + # In the event of upgrade from existing cloudinit, pickled datasource + # will not contain these new class attributes. So we need to recrawl + # metadata to discover that content. return { 'v1': { + '_beta_keys': ['subplatform'], 'availability-zone': availability_zone, 'availability_zone': availability_zone, - 'cloud-name': cloud_name, - 'cloud_name': cloud_name, + 'cloud-name': self.cloud_name, + 'cloud_name': self.cloud_name, + 'platform': self.platform_type, + 'public_ssh_keys': self.get_public_ssh_keys(), 'instance-id': instance_id, 'instance_id': instance_id, 'local-hostname': local_hostname, 'local_hostname': local_hostname, - 'region': self.region}} + 'region': self.region, + 'subplatform': self.subplatform}} def clear_cached_attrs(self, attr_defaults=()): """Reset any cached metadata attributes to datasource defaults. @@ -247,19 +259,27 @@ class DataSource(object): @return True on successful write, False otherwise. """ - instance_data = { - 'ds': {'_doc': EXPERIMENTAL_TEXT, - 'meta_data': self.metadata}} - if hasattr(self, 'network_json'): - network_json = getattr(self, 'network_json') - if network_json != UNSET: - instance_data['ds']['network_json'] = network_json - if hasattr(self, 'ec2_metadata'): - ec2_metadata = getattr(self, 'ec2_metadata') - if ec2_metadata != UNSET: - instance_data['ds']['ec2_metadata'] = ec2_metadata + if hasattr(self, '_crawled_metadata'): + # Any datasource with _crawled_metadata will best represent + # most recent, 'raw' metadata + crawled_metadata = copy.deepcopy( + getattr(self, '_crawled_metadata')) + crawled_metadata.pop('user-data', None) + crawled_metadata.pop('vendor-data', None) + instance_data = {'ds': crawled_metadata} + else: + instance_data = {'ds': {'meta_data': self.metadata}} + if hasattr(self, 'network_json'): + network_json = getattr(self, 'network_json') + if network_json != UNSET: + instance_data['ds']['network_json'] = network_json + if hasattr(self, 'ec2_metadata'): + ec2_metadata = getattr(self, 'ec2_metadata') + if ec2_metadata != UNSET: + instance_data['ds']['ec2_metadata'] = ec2_metadata instance_data.update( self._get_standardized_metadata()) + instance_data['ds']['_doc'] = EXPERIMENTAL_TEXT try: # Process content base64encoding unserializable values content = util.json_dumps(instance_data) @@ -346,6 +366,40 @@ class DataSource(object): self.cloud_name) return self._fallback_interface + @property + def platform_type(self): + if not hasattr(self, '_platform_type'): + # Handle upgrade path where pickled datasource has no _platform. + self._platform_type = self.dsname.lower() + if not self._platform_type: + self._platform_type = self.dsname.lower() + return self._platform_type + + @property + def subplatform(self): + """Return a string representing subplatform details for the datasource. + + This should be guidance for where the metadata is sourced. + Examples of this on different clouds: + ec2: metadata (http://169.254.169.254) + openstack: configdrive (/dev/path) + openstack: metadata (http://169.254.169.254) + nocloud: seed-dir (/seed/dir/path) + lxd: nocloud (/seed/dir/path) + """ + if not hasattr(self, '_subplatform'): + # Handle upgrade path where pickled datasource has no _platform. + self._subplatform = self._get_subplatform() + if not self._subplatform: + self._subplatform = self._get_subplatform() + return self._subplatform + + def _get_subplatform(self): + """Subclasses should implement to return a "slug (detail)" string.""" + if hasattr(self, 'metadata_address'): + return 'metadata (%s)' % getattr(self, 'metadata_address') + return METADATA_UNKNOWN + @property def cloud_name(self): """Return lowercase cloud name as determined by the datasource. @@ -359,9 +413,11 @@ class DataSource(object): cloud_name = self.metadata.get(METADATA_CLOUD_NAME_KEY) if isinstance(cloud_name, six.string_types): self._cloud_name = cloud_name.lower() - LOG.debug( - 'Ignoring metadata provided key %s: non-string type %s', - METADATA_CLOUD_NAME_KEY, type(cloud_name)) + else: + self._cloud_name = self._get_cloud_name().lower() + LOG.debug( + 'Ignoring metadata provided key %s: non-string type %s', + METADATA_CLOUD_NAME_KEY, type(cloud_name)) else: self._cloud_name = self._get_cloud_name().lower() return self._cloud_name diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 8082019e..391b3436 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -295,6 +295,7 @@ class TestDataSource(CiTestCase): 'base64_encoded_keys': [], 'sensitive_keys': [], 'v1': { + '_beta_keys': ['subplatform'], 'availability-zone': 'myaz', 'availability_zone': 'myaz', 'cloud-name': 'subclasscloudname', @@ -303,7 +304,10 @@ class TestDataSource(CiTestCase): 'instance_id': 'iid-datasource', 'local-hostname': 'test-subclass-hostname', 'local_hostname': 'test-subclass-hostname', - 'region': 'myregion'}, + 'platform': 'mytestsubclass', + 'public_ssh_keys': [], + 'region': 'myregion', + 'subplatform': 'unknown'}, 'ds': { '_doc': EXPERIMENTAL_TEXT, 'meta_data': {'availability_zone': 'myaz', @@ -339,6 +343,7 @@ class TestDataSource(CiTestCase): 'base64_encoded_keys': [], 'sensitive_keys': ['ds/meta_data/some/security-credentials'], 'v1': { + '_beta_keys': ['subplatform'], 'availability-zone': 'myaz', 'availability_zone': 'myaz', 'cloud-name': 'subclasscloudname', @@ -347,7 +352,10 @@ class TestDataSource(CiTestCase): 'instance_id': 'iid-datasource', 'local-hostname': 'test-subclass-hostname', 'local_hostname': 'test-subclass-hostname', - 'region': 'myregion'}, + 'platform': 'mytestsubclass', + 'public_ssh_keys': [], + 'region': 'myregion', + 'subplatform': 'unknown'}, 'ds': { '_doc': EXPERIMENTAL_TEXT, 'meta_data': { diff --git a/cloudinit/sources/tests/test_oracle.py b/cloudinit/sources/tests/test_oracle.py index 7599126c..97d62947 100644 --- a/cloudinit/sources/tests/test_oracle.py +++ b/cloudinit/sources/tests/test_oracle.py @@ -71,6 +71,14 @@ class TestDataSourceOracle(test_helpers.CiTestCase): self.assertFalse(ds._get_data()) mocks._is_platform_viable.assert_called_once_with() + def test_platform_info(self): + """Return platform-related information for Oracle Datasource.""" + ds, _mocks = self._get_ds() + self.assertEqual('oracle', ds.cloud_name) + self.assertEqual('oracle', ds.platform_type) + self.assertEqual( + 'metadata (http://169.254.169.254/openstack/)', ds.subplatform) + @mock.patch(DS_PATH + "._is_iscsi_root", return_value=True) def test_without_userdata(self, m_is_iscsi_root): """If no user-data is provided, it should not be in return dict.""" -- cgit v1.2.3 From 6ee8a2c557ccdc8be54bcf8a8762800c10f3ef49 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 9 Oct 2018 22:19:20 +0000 Subject: tools: Add cloud-id command line utility Add a quick cloud lookup utility in order to more easily determine the cloud on which an instance is running. The utility parses standardized attributes from /run/cloud-init/instance-data.json to print the canonical cloud-id for the instance. It uses known region maps if necessary to determine on which specific cloud the instance is running. Examples: aws, aws-gov, aws-china, rackspace, azure-china, lxd, openstack, unknown --- cloudinit/cmd/cloud_id.py | 90 +++++++++++++++++++++++++ cloudinit/cmd/tests/test_cloud_id.py | 127 +++++++++++++++++++++++++++++++++++ cloudinit/sources/__init__.py | 27 ++++++++ cloudinit/sources/tests/test_init.py | 74 +++++++++++++++++++- setup.py | 3 +- 5 files changed, 319 insertions(+), 2 deletions(-) create mode 100755 cloudinit/cmd/cloud_id.py create mode 100644 cloudinit/cmd/tests/test_cloud_id.py (limited to 'cloudinit/sources') diff --git a/cloudinit/cmd/cloud_id.py b/cloudinit/cmd/cloud_id.py new file mode 100755 index 00000000..97608921 --- /dev/null +++ b/cloudinit/cmd/cloud_id.py @@ -0,0 +1,90 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Commandline utility to list the canonical cloud-id for an instance.""" + +import argparse +import json +import sys + +from cloudinit.sources import ( + INSTANCE_JSON_FILE, METADATA_UNKNOWN, canonical_cloud_id) + +DEFAULT_INSTANCE_JSON = '/run/cloud-init/%s' % INSTANCE_JSON_FILE + +NAME = 'cloud-id' + + +def get_parser(parser=None): + """Build or extend an arg parser for the cloud-id utility. + + @param parser: Optional existing ArgumentParser instance representing the + query subcommand which will be extended to support the args of + this utility. + + @returns: ArgumentParser with proper argument configuration. + """ + if not parser: + parser = argparse.ArgumentParser( + prog=NAME, + description='Report the canonical cloud-id for this instance') + parser.add_argument( + '-j', '--json', action='store_true', default=False, + help='Report all standardized cloud-id information as json.') + parser.add_argument( + '-l', '--long', action='store_true', default=False, + help='Report extended cloud-id information as tab-delimited string.') + parser.add_argument( + '-i', '--instance-data', type=str, default=DEFAULT_INSTANCE_JSON, + help=('Path to instance-data.json file. Default is %s' % + DEFAULT_INSTANCE_JSON)) + return parser + + +def error(msg): + sys.stderr.write('ERROR: %s\n' % msg) + return 1 + + +def handle_args(name, args): + """Handle calls to 'cloud-id' cli. + + Print the canonical cloud-id on which the instance is running. + + @return: 0 on success, 1 otherwise. + """ + try: + instance_data = json.load(open(args.instance_data)) + except IOError: + return error( + "File not found '%s'. Provide a path to instance data json file" + ' using --instance-data' % args.instance_data) + except ValueError as e: + return error( + "File '%s' is not valid json. %s" % (args.instance_data, e)) + v1 = instance_data.get('v1', {}) + cloud_id = canonical_cloud_id( + v1.get('cloud_name', METADATA_UNKNOWN), + v1.get('region', METADATA_UNKNOWN), + v1.get('platform', METADATA_UNKNOWN)) + if args.json: + v1['cloud_id'] = cloud_id + response = json.dumps( # Pretty, sorted json + v1, indent=1, sort_keys=True, separators=(',', ': ')) + elif args.long: + response = '%s\t%s' % (cloud_id, v1.get('region', METADATA_UNKNOWN)) + else: + response = cloud_id + sys.stdout.write('%s\n' % response) + return 0 + + +def main(): + """Tool to query specific instance-data values.""" + parser = get_parser() + sys.exit(handle_args(NAME, parser.parse_args())) + + +if __name__ == '__main__': + main() + +# vi: ts=4 expandtab diff --git a/cloudinit/cmd/tests/test_cloud_id.py b/cloudinit/cmd/tests/test_cloud_id.py new file mode 100644 index 00000000..73738170 --- /dev/null +++ b/cloudinit/cmd/tests/test_cloud_id.py @@ -0,0 +1,127 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""Tests for cloud-id command line utility.""" + +from cloudinit import util +from collections import namedtuple +from six import StringIO + +from cloudinit.cmd import cloud_id + +from cloudinit.tests.helpers import CiTestCase, mock + + +class TestCloudId(CiTestCase): + + args = namedtuple('cloudidargs', ('instance_data json long')) + + def setUp(self): + super(TestCloudId, self).setUp() + self.tmp = self.tmp_dir() + self.instance_data = self.tmp_path('instance-data.json', dir=self.tmp) + + def test_cloud_id_arg_parser_defaults(self): + """Validate the argument defaults when not provided by the end-user.""" + cmd = ['cloud-id'] + with mock.patch('sys.argv', cmd): + args = cloud_id.get_parser().parse_args() + self.assertEqual( + '/run/cloud-init/instance-data.json', + args.instance_data) + self.assertEqual(False, args.long) + self.assertEqual(False, args.json) + + def test_cloud_id_arg_parse_overrides(self): + """Override argument defaults by specifying values for each param.""" + util.write_file(self.instance_data, '{}') + cmd = ['cloud-id', '--instance-data', self.instance_data, '--long', + '--json'] + with mock.patch('sys.argv', cmd): + args = cloud_id.get_parser().parse_args() + self.assertEqual(self.instance_data, args.instance_data) + self.assertEqual(True, args.long) + self.assertEqual(True, args.json) + + def test_cloud_id_missing_instance_data_json(self): + """Exit error when the provided instance-data.json does not exist.""" + cmd = ['cloud-id', '--instance-data', self.instance_data] + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(1, context_manager.exception.code) + self.assertIn( + "ERROR: File not found '%s'" % self.instance_data, + m_stderr.getvalue()) + + def test_cloud_id_non_json_instance_data(self): + """Exit error when the provided instance-data.json is not json.""" + cmd = ['cloud-id', '--instance-data', self.instance_data] + util.write_file(self.instance_data, '{') + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(1, context_manager.exception.code) + self.assertIn( + "ERROR: File '%s' is not valid json." % self.instance_data, + m_stderr.getvalue()) + + def test_cloud_id_from_cloud_name_in_instance_data(self): + """Report canonical cloud-id from cloud_name in instance-data.""" + util.write_file( + self.instance_data, + '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}') + cmd = ['cloud-id', '--instance-data', self.instance_data] + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(0, context_manager.exception.code) + self.assertEqual("mycloud\n", m_stdout.getvalue()) + + def test_cloud_id_long_name_from_instance_data(self): + """Report long cloud-id format from cloud_name and region.""" + util.write_file( + self.instance_data, + '{"v1": {"cloud_name": "mycloud", "region": "somereg"}}') + cmd = ['cloud-id', '--instance-data', self.instance_data, '--long'] + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(0, context_manager.exception.code) + self.assertEqual("mycloud\tsomereg\n", m_stdout.getvalue()) + + def test_cloud_id_lookup_from_instance_data_region(self): + """Report discovered canonical cloud_id when region lookup matches.""" + util.write_file( + self.instance_data, + '{"v1": {"cloud_name": "aws", "region": "cn-north-1",' + ' "platform": "ec2"}}') + cmd = ['cloud-id', '--instance-data', self.instance_data, '--long'] + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(0, context_manager.exception.code) + self.assertEqual("aws-china\tcn-north-1\n", m_stdout.getvalue()) + + def test_cloud_id_lookup_json_instance_data_adds_cloud_id_to_json(self): + """Report v1 instance-data content with cloud_id when --json set.""" + util.write_file( + self.instance_data, + '{"v1": {"cloud_name": "unknown", "region": "dfw",' + ' "platform": "openstack", "public_ssh_keys": []}}') + expected = util.json_dumps({ + 'cloud_id': 'openstack', 'cloud_name': 'unknown', + 'platform': 'openstack', 'public_ssh_keys': [], 'region': 'dfw'}) + cmd = ['cloud-id', '--instance-data', self.instance_data, '--json'] + with mock.patch('sys.argv', cmd): + with mock.patch('sys.stdout', new_callable=StringIO) as m_stdout: + with self.assertRaises(SystemExit) as context_manager: + cloud_id.main() + self.assertEqual(0, context_manager.exception.code) + self.assertEqual(expected + '\n', m_stdout.getvalue()) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 9b90680f..e6966b31 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -58,6 +58,14 @@ METADATA_UNKNOWN = 'unknown' LOG = logging.getLogger(__name__) +# CLOUD_ID_REGION_PREFIX_MAP format is: +# : (: ) +CLOUD_ID_REGION_PREFIX_MAP = { + 'cn-': ('aws-china', lambda c: c == 'aws'), # only change aws regions + 'us-gov-': ('aws-gov', lambda c: c == 'aws'), # only change aws regions + 'china': ('azure-china', lambda c: c == 'azure'), # only change azure +} + class DataSourceNotFoundException(Exception): pass @@ -770,6 +778,25 @@ def instance_id_matches_system_uuid(instance_id, field='system-uuid'): return instance_id.lower() == dmi_value.lower() +def canonical_cloud_id(cloud_name, region, platform): + """Lookup the canonical cloud-id for a given cloud_name and region.""" + if not cloud_name: + cloud_name = METADATA_UNKNOWN + if not region: + region = METADATA_UNKNOWN + if region == METADATA_UNKNOWN: + if cloud_name != METADATA_UNKNOWN: + return cloud_name + return platform + for prefix, cloud_id_test in CLOUD_ID_REGION_PREFIX_MAP.items(): + (cloud_id, valid_cloud) = cloud_id_test + if region.startswith(prefix) and valid_cloud(cloud_name): + return cloud_id + if cloud_name != METADATA_UNKNOWN: + return cloud_name + return platform + + def convert_vendordata(data, recurse=True): """data: a loaded object (strings, arrays, dicts). return something suitable for cloudinit vendordata_raw. diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 391b3436..6378e98b 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -11,7 +11,8 @@ from cloudinit.helpers import Paths from cloudinit import importer from cloudinit.sources import ( EXPERIMENTAL_TEXT, INSTANCE_JSON_FILE, INSTANCE_JSON_SENSITIVE_FILE, - REDACT_SENSITIVE_VALUE, UNSET, DataSource, redact_sensitive_keys) + METADATA_UNKNOWN, REDACT_SENSITIVE_VALUE, UNSET, DataSource, + canonical_cloud_id, redact_sensitive_keys) from cloudinit.tests.helpers import CiTestCase, skipIf, mock from cloudinit.user_data import UserDataProcessor from cloudinit import util @@ -607,4 +608,75 @@ class TestRedactSensitiveData(CiTestCase): redact_sensitive_keys(md)) +class TestCanonicalCloudID(CiTestCase): + + def test_cloud_id_returns_platform_on_unknowns(self): + """When region and cloud_name are unknown, return platform.""" + self.assertEqual( + 'platform', + canonical_cloud_id(cloud_name=METADATA_UNKNOWN, + region=METADATA_UNKNOWN, + platform='platform')) + + def test_cloud_id_returns_platform_on_none(self): + """When region and cloud_name are unknown, return platform.""" + self.assertEqual( + 'platform', + canonical_cloud_id(cloud_name=None, + region=None, + platform='platform')) + + def test_cloud_id_returns_cloud_name_on_unknown_region(self): + """When region is unknown, return cloud_name.""" + for region in (None, METADATA_UNKNOWN): + self.assertEqual( + 'cloudname', + canonical_cloud_id(cloud_name='cloudname', + region=region, + platform='platform')) + + def test_cloud_id_returns_platform_on_unknown_cloud_name(self): + """When region is set but cloud_name is unknown return cloud_name.""" + self.assertEqual( + 'platform', + canonical_cloud_id(cloud_name=METADATA_UNKNOWN, + region='region', + platform='platform')) + + def test_cloud_id_aws_based_on_region_and_cloud_name(self): + """When cloud_name is aws, return proper cloud-id based on region.""" + self.assertEqual( + 'aws-china', + canonical_cloud_id(cloud_name='aws', + region='cn-north-1', + platform='platform')) + self.assertEqual( + 'aws', + canonical_cloud_id(cloud_name='aws', + region='us-east-1', + platform='platform')) + self.assertEqual( + 'aws-gov', + canonical_cloud_id(cloud_name='aws', + region='us-gov-1', + platform='platform')) + self.assertEqual( # Overrideen non-aws cloud_name is returned + '!aws', + canonical_cloud_id(cloud_name='!aws', + region='us-gov-1', + platform='platform')) + + def test_cloud_id_azure_based_on_region_and_cloud_name(self): + """Report cloud-id when cloud_name is azure and region is in china.""" + self.assertEqual( + 'azure-china', + canonical_cloud_id(cloud_name='azure', + region='chinaeast', + platform='platform')) + self.assertEqual( + 'azure', + canonical_cloud_id(cloud_name='azure', + region='!chinaeast', + platform='platform')) + # vi: ts=4 expandtab diff --git a/setup.py b/setup.py index 5ed8eae2..ea37efc3 100755 --- a/setup.py +++ b/setup.py @@ -282,7 +282,8 @@ setuptools.setup( cmdclass=cmdclass, entry_points={ 'console_scripts': [ - 'cloud-init = cloudinit.cmd.main:main' + 'cloud-init = cloudinit.cmd.main:main', + 'cloud-id = cloudinit.cmd.cloud_id:main' ], } ) -- cgit v1.2.3 From 1d5e9aefdab06a2574d78e644deed6c6fa1da171 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 17 Oct 2018 18:47:35 +0000 Subject: azure: Add apply_network_config option to disable network from IMDS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Azure generates network configuration from the IMDS service and removes any preexisting hotplug network scripts which exist in Azure cloud images. Add a datasource configuration option which allows for writing a default network configuration which sets up dhcp on eth0 and leave the hotplug handling to the cloud-image scripts. To disable network-config from Azure IMDS, add the following to /etc/cloud/cloud.cfg.d/99-azure-no-imds-network.cfg: datasource:   Azure:     apply_network_config: False LP: #1798424 --- cloudinit/sources/DataSourceAzure.py | 11 +++++- doc/rtd/topics/datasources/azure.rst | 46 ++++++++++++++++++++++ tests/unittests/test_datasource/test_azure.py | 56 +++++++++++++++++++++++++-- 3 files changed, 107 insertions(+), 6 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 39391d01..d0358e96 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -207,7 +207,9 @@ BUILTIN_DS_CONFIG = { }, 'disk_aliases': {'ephemeral0': RESOURCE_DISK_PATH}, 'dhclient_lease_file': LEASE_FILE, + 'apply_network_config': True, # Use IMDS published network configuration } +# RELEASE_BLOCKER: Xenial and earlier apply_network_config default is False BUILTIN_CLOUD_CONFIG = { 'disk_setup': { @@ -458,7 +460,8 @@ class DataSourceAzure(sources.DataSource): except sources.InvalidMetaDataException as e: LOG.warning('Could not crawl Azure metadata: %s', e) return False - if self.distro and self.distro.name == 'ubuntu': + if (self.distro and self.distro.name == 'ubuntu' and + self.ds_cfg.get('apply_network_config')): maybe_remove_ubuntu_network_config_scripts() # Process crawled data and augment with various config defaults @@ -619,7 +622,11 @@ class DataSourceAzure(sources.DataSource): the blacklisted devices. """ if not self._network_config: - self._network_config = parse_network_config(self._metadata_imds) + if self.ds_cfg.get('apply_network_config'): + nc_src = self._metadata_imds + else: + nc_src = None + self._network_config = parse_network_config(nc_src) return self._network_config diff --git a/doc/rtd/topics/datasources/azure.rst b/doc/rtd/topics/datasources/azure.rst index 559011ef..f73c3694 100644 --- a/doc/rtd/topics/datasources/azure.rst +++ b/doc/rtd/topics/datasources/azure.rst @@ -57,6 +57,52 @@ in order to use waagent.conf with cloud-init, the following settings are recomme ResourceDisk.MountPoint=/mnt +Configuration +------------- +The following configuration can be set for the datasource in system +configuration (in `/etc/cloud/cloud.cfg` or `/etc/cloud/cloud.cfg.d/`). + +The settings that may be configured are: + + * **agent_command**: Either __builtin__ (default) or a command to run to getcw + metadata. If __builtin__, get metadata from walinuxagent. Otherwise run the + provided command to obtain metadata. + * **apply_network_config**: Boolean set to True to use network configuration + described by Azure's IMDS endpoint instead of fallback network config of + dhcp on eth0. Default is True. For Ubuntu 16.04 or earlier, default is False. + * **data_dir**: Path used to read metadata files and write crawled data. + * **dhclient_lease_file**: The fallback lease file to source when looking for + custom DHCP option 245 from Azure fabric. + * **disk_aliases**: A dictionary defining which device paths should be + interpreted as ephemeral images. See cc_disk_setup module for more info. + * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to + metadata changes. + * **hostname_bounce**: A dictionary Azure hostname bounce behavior to react to + metadata changes. Azure will throttle ifup/down in some cases after metadata + has been updated to inform dhcp server about updated hostnames. + * **set_hostname**: Boolean set to True when we want Azure to set the hostname + based on metadata. + +An example configuration with the default values is provided below: + +.. sourcecode:: yaml + + datasource: + Azure: + agent_command: __builtin__ + apply_network_config: true + data_dir: /var/lib/waagent + dhclient_lease_file: /var/lib/dhcp/dhclient.eth0.leases + disk_aliases: + ephemeral0: /dev/disk/cloud/azure_resource + hostname_bounce: + interface: eth0 + command: builtin + policy: true + hostname_command: hostname + set_hostname: true + + Userdata -------- Userdata is provided to cloud-init inside the ovf-env.xml file. Cloud-init diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 0f4b7bf7..cd6e7e74 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -256,7 +256,8 @@ scbus-1 on xpt0 bus 0 ]) return dsaz - def _get_ds(self, data, agent_command=None, distro=None): + def _get_ds(self, data, agent_command=None, distro=None, + apply_network=None): def dsdevs(): return data.get('dsdevs', []) @@ -312,6 +313,8 @@ scbus-1 on xpt0 bus 0 data.get('sys_cfg', {}), distro=distro, paths=self.paths) if agent_command is not None: dsrc.ds_cfg['agent_command'] = agent_command + if apply_network is not None: + dsrc.ds_cfg['apply_network_config'] = apply_network return dsrc @@ -434,14 +437,26 @@ fdescfs /dev/fd fdescfs rw 0 0 def test_get_data_on_ubuntu_will_remove_network_scripts(self): """get_data will remove ubuntu net scripts on Ubuntu distro.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} + 'sys_cfg': sys_cfg} dsrc = self._get_ds(data, distro='ubuntu') dsrc.get_data() self.m_remove_ubuntu_network_scripts.assert_called_once_with() + def test_get_data_on_ubuntu_will_not_remove_network_scripts_disabled(self): + """When apply_network_config false, do not remove scripts on Ubuntu.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + + dsrc = self._get_ds(data, distro='ubuntu') + dsrc.get_data() + self.m_remove_ubuntu_network_scripts.assert_not_called() + def test_crawl_metadata_returns_structured_data_and_caches_nothing(self): """Return all structured metadata and cache no class attributes.""" yaml_cfg = "{agent_command: my_command}\n" @@ -523,8 +538,10 @@ fdescfs /dev/fd fdescfs rw 0 0 def test_network_config_set_from_imds(self): """Datasource.network_config returns IMDS network data.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} odata = {} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} expected_network_config = { 'ethernets': { 'eth0': {'set-name': 'eth0', @@ -803,9 +820,10 @@ fdescfs /dev/fd fdescfs rw 0 0 @mock.patch('cloudinit.net.generate_fallback_config') def test_imds_network_config(self, mock_fallback): """Network config is generated from IMDS network data when present.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': True}}} odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), - 'sys_cfg': {}} + 'sys_cfg': sys_cfg} dsrc = self._get_ds(data) ret = dsrc.get_data() @@ -821,6 +839,36 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertEqual(expected_cfg, dsrc.network_config) mock_fallback.assert_not_called() + @mock.patch('cloudinit.net.get_interface_mac') + @mock.patch('cloudinit.net.get_devicelist') + @mock.patch('cloudinit.net.device_driver') + @mock.patch('cloudinit.net.generate_fallback_config') + def test_imds_network_ignored_when_apply_network_config_false( + self, mock_fallback, mock_dd, mock_devlist, mock_get_mac): + """When apply_network_config is False, use fallback instead of IMDS.""" + sys_cfg = {'datasource': {'Azure': {'apply_network_config': False}}} + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': sys_cfg} + fallback_config = { + 'version': 1, + 'config': [{ + 'type': 'physical', 'name': 'eth0', + 'mac_address': '00:11:22:33:44:55', + 'params': {'driver': 'hv_netsvc'}, + 'subnets': [{'type': 'dhcp'}], + }] + } + mock_fallback.return_value = fallback_config + + mock_devlist.return_value = ['eth0'] + mock_dd.return_value = ['hv_netsvc'] + mock_get_mac.return_value = '00:11:22:33:44:55' + + dsrc = self._get_ds(data) + self.assertTrue(dsrc.get_data()) + self.assertEqual(dsrc.network_config, fallback_config) + @mock.patch('cloudinit.net.get_interface_mac') @mock.patch('cloudinit.net.get_devicelist') @mock.patch('cloudinit.net.device_driver') -- cgit v1.2.3 From 907395104bb5850d221924365102cc5ab0eca2f1 Mon Sep 17 00:00:00 2001 From: asakkurr Date: Wed, 31 Oct 2018 20:19:15 +0000 Subject: azure: report ready to fabric after reprovision and reduce logging When reusing a preprovisioned VM, report ready to Azure fabric as soon as we get the reprovision data and the goal state so that we are not delayed by the cloud-init stage switch, saving 2-3 seconds. Also reduce logging when polling IMDS for reprovision data. LP: #1799594 --- cloudinit/sources/DataSourceAzure.py | 15 ++++++-- cloudinit/url_helper.py | 17 +++++---- tests/unittests/test_datasource/test_azure.py | 51 +++++++++++++++++++++++++++ 3 files changed, 74 insertions(+), 9 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d0358e96..8642915e 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -267,6 +267,7 @@ class DataSourceAzure(sources.DataSource): dsname = 'Azure' _negotiated = False _metadata_imds = sources.UNSET + lease_info = None def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -406,8 +407,10 @@ class DataSourceAzure(sources.DataSource): LOG.warning("%s was not mountable", cdev) continue + should_report_ready_after_reprovision = False if reprovision or self._should_reprovision(ret): ret = self._reprovision() + should_report_ready_after_reprovision = True imds_md = get_metadata_from_imds( self.fallback_interface, retries=3) (md, userdata_raw, cfg, files) = ret @@ -434,6 +437,11 @@ class DataSourceAzure(sources.DataSource): crawled_data['metadata']['random_seed'] = seed crawled_data['metadata']['instance-id'] = util.read_dmi_data( 'system-uuid') + + if should_report_ready_after_reprovision: + LOG.info("Reporting ready to Azure after getting ReprovisionData") + self._report_ready(lease=self.lease_info) + return crawled_data def _is_platform_viable(self): @@ -522,6 +530,7 @@ class DataSourceAzure(sources.DataSource): while True: try: with EphemeralDHCPv4() as lease: + self.lease_info = lease if report_ready: path = REPORTED_READY_MARKER_FILE LOG.info( @@ -531,13 +540,13 @@ class DataSourceAzure(sources.DataSource): self._report_ready(lease=lease) report_ready = False return readurl(url, timeout=1, headers=headers, - exception_cb=exc_cb, infinite=True).contents + exception_cb=exc_cb, infinite=True, + log_req_resp=False).contents except UrlError: pass def _report_ready(self, lease): - """Tells the fabric provisioning has completed - before we go into our polling loop.""" + """Tells the fabric provisioning has completed """ try: get_metadata_from_fabric(None, lease['unknown-245']) except Exception: diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 8067979e..cf57dbd5 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -199,7 +199,7 @@ def _get_ssl_args(url, ssl_details): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, headers=None, headers_cb=None, ssl_details=None, check_status=True, allow_redirects=True, exception_cb=None, - session=None, infinite=False): + session=None, infinite=False, log_req_resp=True): url = _cleanurl(url) req_args = { 'url': url, @@ -256,9 +256,11 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, continue filtered_req_args[k] = v try: - LOG.debug("[%s/%s] open '%s' with %s configuration", i, - "infinite" if infinite else manual_tries, url, - filtered_req_args) + + if log_req_resp: + LOG.debug("[%s/%s] open '%s' with %s configuration", i, + "infinite" if infinite else manual_tries, url, + filtered_req_args) if session is None: session = requests.Session() @@ -294,8 +296,11 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, break if (infinite and sec_between > 0) or \ (i + 1 < manual_tries and sec_between > 0): - LOG.debug("Please wait %s seconds while we wait to try again", - sec_between) + + if log_req_resp: + LOG.debug( + "Please wait %s seconds while we wait to try again", + sec_between) time.sleep(sec_between) if excps: raise excps[-1] diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index cd6e7e74..4c5c6c12 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -513,6 +513,57 @@ fdescfs /dev/fd fdescfs rw 0 0 dsrc.crawl_metadata() self.assertEqual(str(cm.exception), error_msg) + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') + @mock.patch('cloudinit.sources.DataSourceAzure.DataSourceAzure._poll_imds') + def test_crawl_metadata_on_reprovision_reports_ready( + self, poll_imds_func, + report_ready_func, + m_write): + """If reprovisioning, report ready at the end""" + ovfenv = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"}) + + data = {'ovfcontent': ovfenv, + 'sys_cfg': {}} + dsrc = self._get_ds(data) + poll_imds_func.return_value = ovfenv + dsrc.crawl_metadata() + self.assertEqual(1, report_ready_func.call_count) + + @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch( + 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') + @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') + @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') + @mock.patch('cloudinit.sources.DataSourceAzure.readurl') + def test_crawl_metadata_on_reprovision_reports_ready_using_lease( + self, m_readurl, m_dhcp, + m_net, report_ready_func, + m_write): + """If reprovisioning, report ready using the obtained lease""" + ovfenv = construct_valid_ovf_env( + platform_settings={"PreprovisionedVm": "True"}) + + data = {'ovfcontent': ovfenv, + 'sys_cfg': {}} + dsrc = self._get_ds(data) + + lease = { + 'interface': 'eth9', 'fixed-address': '192.168.2.9', + 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', + 'unknown-245': '624c3620'} + m_dhcp.return_value = [lease] + + reprovision_ovfenv = construct_valid_ovf_env() + m_readurl.return_value = url_helper.StringResponse( + reprovision_ovfenv.encode('utf-8')) + + dsrc.crawl_metadata() + self.assertEqual(2, report_ready_func.call_count) + report_ready_func.assert_called_with(lease=lease) + def test_waagent_d_has_0700_perms(self): # we expect /var/lib/waagent to be created 0700 dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) -- cgit v1.2.3 From 58476e719bad7dbe1f0bd09a61ff484ad17d8e55 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 1 Nov 2018 22:50:07 +0000 Subject: azure: remove /etc/netplan/90-hotplug-azure.yaml when net from IMDS There was a typo in the seeded filename s/azure-hotplug/hotplug-azure/. --- cloudinit/sources/DataSourceAzure.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 8642915e..7bdd43d8 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -57,7 +57,7 @@ IMDS_URL = "http://169.254.169.254/metadata/" # List of static scripts and network config artifacts created by # stock ubuntu suported images. UBUNTU_EXTENDED_NETWORK_SCRIPTS = [ - '/etc/netplan/90-azure-hotplug.yaml', + '/etc/netplan/90-hotplug-azure.yaml', '/usr/local/sbin/ephemeral_eth.sh', '/etc/udev/rules.d/10-net-device-added.rules', '/run/network/interfaces.ephemeral.d', @@ -1211,7 +1211,7 @@ def maybe_remove_ubuntu_network_config_scripts(paths=None): additional interfaces which get attached by a customer at some point after initial boot. Since the Azure datasource can now regenerate network configuration as metadata reports these new devices, we no longer - want the udev rules or netplan's 90-azure-hotplug.yaml to configure + want the udev rules or netplan's 90-hotplug-azure.yaml to configure networking on eth1 or greater as it might collide with cloud-init's configuration. -- cgit v1.2.3 From d910ecd15de642d73a36e935704e54370f93c45b Mon Sep 17 00:00:00 2001 From: asakkurr Date: Mon, 12 Nov 2018 17:16:09 +0000 Subject: azure: fix regression introduced when persisting ephemeral dhcp lease In commitish 9073951 azure datasource tried to leverage stale DHCP information obtained from EphemeralDHCPv4 context manager to report updated provisioning status to the fabric earlier in the boot process. Unfortunately the stale ephemeral network configuration had already been torn down in preparation to bring up IMDS network config so the report attempt failed on timeout. This branch introduces obtain_lease and clean_network public methods on EphemeralDHCPv4 to allow for setup and teardown of ephemeral network configuration without using a context manager. Azure datasource now uses this to persist ephemeral network configuration across multiple contexts during provisioning to avoid multiple DHCP roundtrips. --- cloudinit/net/dhcp.py | 42 ++++++++++++++++++------ cloudinit/sources/DataSourceAzure.py | 47 ++++++++++++++++----------- tests/unittests/test_datasource/test_azure.py | 3 +- 3 files changed, 62 insertions(+), 30 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 12cf5097..bdc5799f 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -40,34 +40,56 @@ class EphemeralDHCPv4(object): def __init__(self, iface=None): self.iface = iface self._ephipv4 = None + self.lease = None def __enter__(self): + """Setup sandboxed dhcp context.""" + return self.obtain_lease() + + def __exit__(self, excp_type, excp_value, excp_traceback): + """Teardown sandboxed dhcp context.""" + self.clean_network() + + def clean_network(self): + """Exit _ephipv4 context to teardown of ip configuration performed.""" + if self.lease: + self.lease = None + if not self._ephipv4: + return + self._ephipv4.__exit__(None, None, None) + + def obtain_lease(self): + """Perform dhcp discovery in a sandboxed environment if possible. + + @return: A dict representing dhcp options on the most recent lease + obtained from the dhclient discovery if run, otherwise an error + is raised. + + @raises: NoDHCPLeaseError if no leases could be obtained. + """ + if self.lease: + return self.lease try: leases = maybe_perform_dhcp_discovery(self.iface) except InvalidDHCPLeaseFileError: raise NoDHCPLeaseError() if not leases: raise NoDHCPLeaseError() - lease = leases[-1] + self.lease = leases[-1] LOG.debug("Received dhcp lease on %s for %s/%s", - lease['interface'], lease['fixed-address'], - lease['subnet-mask']) + self.lease['interface'], self.lease['fixed-address'], + self.lease['subnet-mask']) nmap = {'interface': 'interface', 'ip': 'fixed-address', 'prefix_or_mask': 'subnet-mask', 'broadcast': 'broadcast-address', 'router': 'routers'} - kwargs = dict([(k, lease.get(v)) for k, v in nmap.items()]) + kwargs = dict([(k, self.lease.get(v)) for k, v in nmap.items()]) if not kwargs['broadcast']: kwargs['broadcast'] = bcip(kwargs['prefix_or_mask'], kwargs['ip']) ephipv4 = EphemeralIPv4Network(**kwargs) ephipv4.__enter__() self._ephipv4 = ephipv4 - return lease - - def __exit__(self, excp_type, excp_value, excp_traceback): - if not self._ephipv4: - return - self._ephipv4.__exit__(excp_type, excp_value, excp_traceback) + return self.lease def maybe_perform_dhcp_discovery(nic=None): diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 7bdd43d8..5ec6096f 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -267,7 +267,6 @@ class DataSourceAzure(sources.DataSource): dsname = 'Azure' _negotiated = False _metadata_imds = sources.UNSET - lease_info = None def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -281,6 +280,7 @@ class DataSourceAzure(sources.DataSource): self._network_config = None # Regenerate network config new_instance boot and every boot self.update_events['network'].add(EventType.BOOT) + self._ephemeral_dhcp_ctx = None def __str__(self): root = sources.DataSource.__str__(self) @@ -407,10 +407,9 @@ class DataSourceAzure(sources.DataSource): LOG.warning("%s was not mountable", cdev) continue - should_report_ready_after_reprovision = False - if reprovision or self._should_reprovision(ret): + perform_reprovision = reprovision or self._should_reprovision(ret) + if perform_reprovision: ret = self._reprovision() - should_report_ready_after_reprovision = True imds_md = get_metadata_from_imds( self.fallback_interface, retries=3) (md, userdata_raw, cfg, files) = ret @@ -438,9 +437,16 @@ class DataSourceAzure(sources.DataSource): crawled_data['metadata']['instance-id'] = util.read_dmi_data( 'system-uuid') - if should_report_ready_after_reprovision: + if perform_reprovision: LOG.info("Reporting ready to Azure after getting ReprovisionData") - self._report_ready(lease=self.lease_info) + use_cached_ephemeral = (net.is_up(self.fallback_interface) and + getattr(self, '_ephemeral_dhcp_ctx', None)) + if use_cached_ephemeral: + self._report_ready(lease=self._ephemeral_dhcp_ctx.lease) + self._ephemeral_dhcp_ctx.clean_network() # Teardown ephemeral + else: + with EphemeralDHCPv4() as lease: + self._report_ready(lease=lease) return crawled_data @@ -529,20 +535,23 @@ class DataSourceAzure(sources.DataSource): while True: try: - with EphemeralDHCPv4() as lease: - self.lease_info = lease - if report_ready: - path = REPORTED_READY_MARKER_FILE - LOG.info( - "Creating a marker file to report ready: %s", path) - util.write_file(path, "{pid}: {time}\n".format( - pid=os.getpid(), time=time())) - self._report_ready(lease=lease) - report_ready = False - return readurl(url, timeout=1, headers=headers, - exception_cb=exc_cb, infinite=True, - log_req_resp=False).contents + # Save our EphemeralDHCPv4 context so we avoid repeated dhcp + self._ephemeral_dhcp_ctx = EphemeralDHCPv4() + lease = self._ephemeral_dhcp_ctx.obtain_lease() + if report_ready: + path = REPORTED_READY_MARKER_FILE + LOG.info( + "Creating a marker file to report ready: %s", path) + util.write_file(path, "{pid}: {time}\n".format( + pid=os.getpid(), time=time())) + self._report_ready(lease=lease) + report_ready = False + return readurl(url, timeout=1, headers=headers, + exception_cb=exc_cb, infinite=True, + log_req_resp=False).contents except UrlError: + # Teardown our EphemeralDHCPv4 context on failure as we retry + self._ephemeral_dhcp_ctx.clean_network() pass def _report_ready(self, lease): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 4c5c6c12..1dc69adb 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -513,6 +513,7 @@ fdescfs /dev/fd fdescfs rw 0 0 dsrc.crawl_metadata() self.assertEqual(str(cm.exception), error_msg) + @mock.patch('cloudinit.sources.DataSourceAzure.EphemeralDHCPv4') @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') @mock.patch( 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') @@ -520,7 +521,7 @@ fdescfs /dev/fd fdescfs rw 0 0 def test_crawl_metadata_on_reprovision_reports_ready( self, poll_imds_func, report_ready_func, - m_write): + m_write, m_dhcp): """If reprovisioning, report ready at the end""" ovfenv = construct_valid_ovf_env( platform_settings={"PreprovisionedVm": "True"}) -- cgit v1.2.3 From 6f9512049bbb594c3f01ffcd2ab25ae4e016f01e Mon Sep 17 00:00:00 2001 From: Jason Zions Date: Mon, 12 Nov 2018 18:43:42 +0000 Subject: azure: Accept variation in error msg from mount for ntfs volumes If Azure detects an ntfs filesystem type during mount attempt, it should still report the resource device as reformattable. There are slight differences in error message format on RedHat and SuSE. This patch simplifies the expected error match to work on both distributions. LP: #1799338 --- cloudinit/sources/DataSourceAzure.py | 2 +- tests/unittests/test_datasource/test_azure.py | 29 +++++++++++++-------------- 2 files changed, 15 insertions(+), 16 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 5ec6096f..6e1797ea 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -725,7 +725,7 @@ def can_dev_be_reformatted(devpath, preserve_ntfs): file_count = util.mount_cb(cand_path, count_files, mtype="ntfs", update_env_for_mount={'LANG': 'C'}) except util.MountFailedError as e: - if "mount: unknown filesystem type 'ntfs'" in str(e): + if "unknown filesystem type 'ntfs'" in str(e): return True, (bmsg + ' but this system cannot mount NTFS,' ' assuming there are no important files.' ' Formatting allowed.') diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 1dc69adb..8ad4368c 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1511,21 +1511,20 @@ class TestCanDevBeReformatted(CiTestCase): '/dev/sda1': {'num': 1, 'fs': 'ntfs', 'files': []} }}}) - err = ("Unexpected error while running command.\n", - "Command: ['mount', '-o', 'ro,sync', '-t', 'auto', ", - "'/dev/sda1', '/fake-tmp/dir']\n" - "Exit code: 32\n" - "Reason: -\n" - "Stdout: -\n" - "Stderr: mount: unknown filesystem type 'ntfs'") - self.m_mount_cb.side_effect = MountFailedError( - 'Failed mounting %s to %s due to: %s' % - ('/dev/sda', '/fake-tmp/dir', err)) - - value, msg = dsaz.can_dev_be_reformatted('/dev/sda', - preserve_ntfs=False) - self.assertTrue(value) - self.assertIn('cannot mount NTFS, assuming', msg) + error_msgs = [ + "Stderr: mount: unknown filesystem type 'ntfs'", # RHEL + "Stderr: mount: /dev/sdb1: unknown filesystem type 'ntfs'" # SLES + ] + + for err_msg in error_msgs: + self.m_mount_cb.side_effect = MountFailedError( + "Failed mounting %s to %s due to: \nUnexpected.\n%s" % + ('/dev/sda', '/fake-tmp/dir', err_msg)) + + value, msg = dsaz.can_dev_be_reformatted('/dev/sda', + preserve_ntfs=False) + self.assertTrue(value) + self.assertIn('cannot mount NTFS, assuming', msg) def test_never_destroy_ntfs_config_false(self): """Normally formattable situation with never_destroy_ntfs set.""" -- cgit v1.2.3 From 6062595b83e08e0f12e1fe6d8e367d8db9d91ef8 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 13 Nov 2018 03:14:58 +0000 Subject: azure: retry imds polling on requests.Timeout There is an infrequent race when the booting instance can hit the IMDS service before it is fully available. This results in a requests.ConnectTimeout being raised. Azure's retry_callback logic now retries on either 404s or Timeouts. LP:1800223 --- cloudinit/sources/DataSourceAzure.py | 18 +++------------- cloudinit/tests/test_url_helper.py | 25 +++++++++++++++++++++- cloudinit/url_helper.py | 14 +++++++++++++ tests/unittests/test_datasource/test_azure.py | 30 +++++++++++++++++++++++++++ 4 files changed, 71 insertions(+), 16 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 6e1797ea..9e8a1a8b 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -22,7 +22,7 @@ from cloudinit.event import EventType from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources from cloudinit.sources.helpers.azure import get_metadata_from_fabric -from cloudinit.url_helper import readurl, UrlError +from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc from cloudinit import util LOG = logging.getLogger(__name__) @@ -526,13 +526,6 @@ class DataSourceAzure(sources.DataSource): report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) LOG.debug("Start polling IMDS") - def exc_cb(msg, exception): - if isinstance(exception, UrlError) and exception.code == 404: - return True - # If we get an exception while trying to call IMDS, we - # call DHCP and setup the ephemeral network to acquire the new IP. - return False - while True: try: # Save our EphemeralDHCPv4 context so we avoid repeated dhcp @@ -547,7 +540,7 @@ class DataSourceAzure(sources.DataSource): self._report_ready(lease=lease) report_ready = False return readurl(url, timeout=1, headers=headers, - exception_cb=exc_cb, infinite=True, + exception_cb=retry_on_url_exc, infinite=True, log_req_resp=False).contents except UrlError: # Teardown our EphemeralDHCPv4 context on failure as we retry @@ -1187,17 +1180,12 @@ def get_metadata_from_imds(fallback_nic, retries): def _get_metadata_from_imds(retries): - def retry_on_url_error(msg, exception): - if isinstance(exception, UrlError) and exception.code == 404: - return True # Continue retries - return False # Stop retries on all other exceptions - url = IMDS_URL + "instance?api-version=2017-12-01" headers = {"Metadata": "true"} try: response = readurl( url, timeout=1, headers=headers, retries=retries, - exception_cb=retry_on_url_error) + exception_cb=retry_on_url_exc) except Exception as e: LOG.debug('Ignoring IMDS instance metadata: %s', e) return {} diff --git a/cloudinit/tests/test_url_helper.py b/cloudinit/tests/test_url_helper.py index 113249d9..aa9f3ec1 100644 --- a/cloudinit/tests/test_url_helper.py +++ b/cloudinit/tests/test_url_helper.py @@ -1,10 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. -from cloudinit.url_helper import oauth_headers, read_file_or_url +from cloudinit.url_helper import ( + NOT_FOUND, UrlError, oauth_headers, read_file_or_url, retry_on_url_exc) from cloudinit.tests.helpers import CiTestCase, mock, skipIf from cloudinit import util import httpretty +import requests try: @@ -64,3 +66,24 @@ class TestReadFileOrUrl(CiTestCase): result = read_file_or_url(url) self.assertEqual(result.contents, data) self.assertEqual(str(result), data.decode('utf-8')) + + +class TestRetryOnUrlExc(CiTestCase): + + def test_do_not_retry_non_urlerror(self): + """When exception is not UrlError return False.""" + myerror = IOError('something unexcpected') + self.assertFalse(retry_on_url_exc(msg='', exc=myerror)) + + def test_perform_retries_on_not_found(self): + """When exception is UrlError with a 404 status code return True.""" + myerror = UrlError(cause=RuntimeError( + 'something was not found'), code=NOT_FOUND) + self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) + + def test_perform_retries_on_timeout(self): + """When exception is a requests.Timout return True.""" + myerror = UrlError(cause=requests.Timeout('something timed out')) + self.assertTrue(retry_on_url_exc(msg='', exc=myerror)) + +# vi: ts=4 expandtab diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index cf57dbd5..396d69ae 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -554,4 +554,18 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, _uri, signed_headers, _body = client.sign(url) return signed_headers + +def retry_on_url_exc(msg, exc): + """readurl exception_cb that will retry on NOT_FOUND and Timeout. + + Returns False to raise the exception from readurl, True to retry. + """ + if not isinstance(exc, UrlError): + return False + if exc.code == NOT_FOUND: + return True + if exc.cause and isinstance(exc.cause, requests.Timeout): + return True + return False + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 8ad4368c..56484b27 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -17,6 +17,7 @@ import crypt import httpretty import json import os +import requests import stat import xml.etree.ElementTree as ET import yaml @@ -184,6 +185,35 @@ class TestGetMetadataFromIMDS(HttprettyTestCase): "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time self.logs.getvalue()) + @mock.patch('requests.Session.request') + @mock.patch('cloudinit.url_helper.time.sleep') + @mock.patch(MOCKPATH + 'net.is_up') + def test_get_metadata_from_imds_retries_on_timeout( + self, m_net_is_up, m_sleep, m_request): + """Retry IMDS network metadata on timeout errors.""" + + self.attempt = 0 + m_request.side_effect = requests.Timeout('Fake Connection Timeout') + + def retry_callback(request, uri, headers): + self.attempt += 1 + raise requests.Timeout('Fake connection timeout') + + httpretty.register_uri( + httpretty.GET, + dsaz.IMDS_URL + 'instance?api-version=2017-12-01', + body=retry_callback) + + m_net_is_up.return_value = True # skips dhcp + + self.assertEqual({}, dsaz.get_metadata_from_imds('eth9', retries=3)) + + m_net_is_up.assert_called_with('eth9') + self.assertEqual([mock.call(1)]*3, m_sleep.call_args_list) + self.assertIn( + "Crawl of Azure Instance Metadata Service (IMDS) took", # log_time + self.logs.getvalue()) + class TestAzureDataSource(CiTestCase): -- cgit v1.2.3 From 8f812a15fde01173c0dd5b7e1a77b61031fd93e4 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 15 Nov 2018 22:55:42 +0000 Subject: azure: _poll_imds only retry on 404. Fail on Timeout Upon URL timeout, _poll_imds is expected to re-dhcp to get updated IP configuration. We don't want to indefinitely retry because the instance likely has invalid IP configuration. LP: #1803598 --- cloudinit/sources/DataSourceAzure.py | 9 ++++++- tests/unittests/test_datasource/test_azure.py | 34 ++++++++++++++++++++++----- 2 files changed, 36 insertions(+), 7 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 9e8a1a8b..2a3e5677 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -526,6 +526,13 @@ class DataSourceAzure(sources.DataSource): report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) LOG.debug("Start polling IMDS") + def exc_cb(msg, exception): + if isinstance(exception, UrlError) and exception.code == 404: + return True + # If we get an exception while trying to call IMDS, we + # call DHCP and setup the ephemeral network to acquire the new IP. + return False + while True: try: # Save our EphemeralDHCPv4 context so we avoid repeated dhcp @@ -540,7 +547,7 @@ class DataSourceAzure(sources.DataSource): self._report_ready(lease=lease) report_ready = False return readurl(url, timeout=1, headers=headers, - exception_cb=retry_on_url_exc, infinite=True, + exception_cb=exc_cb, infinite=True, log_req_resp=False).contents except UrlError: # Teardown our EphemeralDHCPv4 context on failure as we retry diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 56484b27..5ea7ae5e 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1687,22 +1687,44 @@ class TestPreprovisioningPollIMDS(CiTestCase): self.paths = helpers.Paths({'cloud_dir': self.tmp}) dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - @mock.patch(MOCKPATH + 'util.write_file') - def test_poll_imds_calls_report_ready(self, write_f, report_ready_func, + @mock.patch(MOCKPATH + 'EphemeralDHCPv4') + def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, report_ready_func, fake_resp, m_dhcp, m_net): - """The poll_imds will call report_ready after creating marker file.""" - report_marker = self.tmp_path('report_marker', self.tmp) + """The poll_imds will retry DHCP on IMDS timeout.""" + report_file = self.tmp_path('report_marker', self.tmp) lease = { 'interface': 'eth9', 'fixed-address': '192.168.2.9', 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', 'unknown-245': '624c3620'} m_dhcp.return_value = [lease] + + dhcp_ctx = mock.MagicMock(lease=lease) + dhcp_ctx.obtain_lease.return_value = lease + m_dhcpv4.return_value = dhcp_ctx + + self.tries = 0 + + def fake_timeout_once(**kwargs): + self.tries += 1 + if self.tries == 1: + raise requests.Timeout('Fake connection timeout') + elif self.tries == 2: + response = requests.Response() + response.status_code = 404 + raise requests.exceptions.HTTPError( + "fake 404", response=response) + # Third try should succeed and stop retries or redhcp + return mock.MagicMock(status_code=200, text="good", content="good") + + fake_resp.side_effect = fake_timeout_once + dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - mock_path = (MOCKPATH + 'REPORTED_READY_MARKER_FILE') - with mock.patch(mock_path, report_marker): + with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): dsa._poll_imds() self.assertEqual(report_ready_func.call_count, 1) report_ready_func.assert_called_with(lease=lease) + self.assertEqual(2, m_dhcpv4.call_count, 'Expected 2 DHCP calls') + self.assertEqual(3, self.tries, 'Expected 3 total reads from IMDS') def test_poll_imds_report_ready_false(self, report_ready_func, fake_resp, m_dhcp, m_net): -- cgit v1.2.3 From c7c395ce0f3d024243192947fee32d7fc6c063f5 Mon Sep 17 00:00:00 2001 From: Adam DePue Date: Thu, 29 Nov 2018 18:22:14 +0000 Subject: Azure: fix copy/paste error in error handling when reading azure ovf. Check the appropriate variables based on code review. Correcting what seems to be a copy/paste mistake for the error handling from a few lines above. --- cloudinit/sources/DataSourceAzure.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 2a3e5677..be82ec4d 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -953,12 +953,12 @@ def read_azure_ovf(contents): lambda n: n.localName == "LinuxProvisioningConfigurationSet") - if len(results) == 0: + if len(lpcs_nodes) == 0: raise NonAzureDataSource("No LinuxProvisioningConfigurationSet") - if len(results) > 1: + if len(lpcs_nodes) > 1: raise BrokenAzureDataSource("found '%d' %ss" % ("LinuxProvisioningConfigurationSet", - len(results))) + len(lpcs_nodes))) lpcs = lpcs_nodes[0] if not lpcs.hasChildNodes(): -- cgit v1.2.3 From bf7917159dbb292c9fcdef82b004e0f5ecb32c16 Mon Sep 17 00:00:00 2001 From: Tamilmani Manoharan Date: Thu, 29 Nov 2018 21:53:18 +0000 Subject: azure: detect vnet migration via netlink media change event Replace Azure pre-provision polling on IMDS with a blocking call which watches for netlink link state change messages. The media change event happens when a pre-provisioned VM has been activated and is connected to the users virtual network and cloud-init can then resume operation to complete image instantiation. --- cloudinit/sources/DataSourceAzure.py | 31 +- cloudinit/sources/helpers/netlink.py | 250 ++++++++++++++++ cloudinit/sources/helpers/tests/test_netlink.py | 373 ++++++++++++++++++++++++ tests/unittests/test_datasource/test_azure.py | 40 ++- 4 files changed, 678 insertions(+), 16 deletions(-) create mode 100644 cloudinit/sources/helpers/netlink.py create mode 100644 cloudinit/sources/helpers/tests/test_netlink.py (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index be82ec4d..e076d5dc 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -22,6 +22,7 @@ from cloudinit.event import EventType from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources from cloudinit.sources.helpers.azure import get_metadata_from_fabric +from cloudinit.sources.helpers import netlink from cloudinit.url_helper import UrlError, readurl, retry_on_url_exc from cloudinit import util @@ -409,6 +410,10 @@ class DataSourceAzure(sources.DataSource): perform_reprovision = reprovision or self._should_reprovision(ret) if perform_reprovision: + if util.is_FreeBSD(): + msg = "Free BSD is not supported for PPS VMs" + LOG.error(msg) + raise sources.InvalidMetaDataException(msg) ret = self._reprovision() imds_md = get_metadata_from_imds( self.fallback_interface, retries=3) @@ -523,8 +528,8 @@ class DataSourceAzure(sources.DataSource): response. Then return the returned JSON object.""" url = IMDS_URL + "reprovisiondata?api-version=2017-04-02" headers = {"Metadata": "true"} + nl_sock = None report_ready = bool(not os.path.isfile(REPORTED_READY_MARKER_FILE)) - LOG.debug("Start polling IMDS") def exc_cb(msg, exception): if isinstance(exception, UrlError) and exception.code == 404: @@ -533,12 +538,19 @@ class DataSourceAzure(sources.DataSource): # call DHCP and setup the ephemeral network to acquire the new IP. return False + LOG.debug("Wait for vnetswitch to happen") while True: try: # Save our EphemeralDHCPv4 context so we avoid repeated dhcp self._ephemeral_dhcp_ctx = EphemeralDHCPv4() lease = self._ephemeral_dhcp_ctx.obtain_lease() if report_ready: + try: + nl_sock = netlink.create_bound_netlink_socket() + except netlink.NetlinkCreateSocketError as e: + LOG.warning(e) + self._ephemeral_dhcp_ctx.clean_network() + return path = REPORTED_READY_MARKER_FILE LOG.info( "Creating a marker file to report ready: %s", path) @@ -546,13 +558,24 @@ class DataSourceAzure(sources.DataSource): pid=os.getpid(), time=time())) self._report_ready(lease=lease) report_ready = False - return readurl(url, timeout=1, headers=headers, - exception_cb=exc_cb, infinite=True, - log_req_resp=False).contents + try: + netlink.wait_for_media_disconnect_connect( + nl_sock, lease['interface']) + except AssertionError as error: + LOG.error(error) + return + self._ephemeral_dhcp_ctx.clean_network() + else: + return readurl(url, timeout=1, headers=headers, + exception_cb=exc_cb, infinite=True, + log_req_resp=False).contents except UrlError: # Teardown our EphemeralDHCPv4 context on failure as we retry self._ephemeral_dhcp_ctx.clean_network() pass + finally: + if nl_sock: + nl_sock.close() def _report_ready(self, lease): """Tells the fabric provisioning has completed """ diff --git a/cloudinit/sources/helpers/netlink.py b/cloudinit/sources/helpers/netlink.py new file mode 100644 index 00000000..d377ae3d --- /dev/null +++ b/cloudinit/sources/helpers/netlink.py @@ -0,0 +1,250 @@ +# Author: Tamilmani Manoharan +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import log as logging +from cloudinit import util +from collections import namedtuple + +import os +import select +import socket +import struct + +LOG = logging.getLogger(__name__) + +# http://man7.org/linux/man-pages/man7/netlink.7.html +RTMGRP_LINK = 1 +NLMSG_NOOP = 1 +NLMSG_ERROR = 2 +NLMSG_DONE = 3 +RTM_NEWLINK = 16 +RTM_DELLINK = 17 +RTM_GETLINK = 18 +RTM_SETLINK = 19 +MAX_SIZE = 65535 +RTA_DATA_OFFSET = 32 +MSG_TYPE_OFFSET = 16 +SELECT_TIMEOUT = 60 + +NLMSGHDR_FMT = "IHHII" +IFINFOMSG_FMT = "BHiII" +NLMSGHDR_SIZE = struct.calcsize(NLMSGHDR_FMT) +IFINFOMSG_SIZE = struct.calcsize(IFINFOMSG_FMT) +RTATTR_START_OFFSET = NLMSGHDR_SIZE + IFINFOMSG_SIZE +RTA_DATA_START_OFFSET = 4 +PAD_ALIGNMENT = 4 + +IFLA_IFNAME = 3 +IFLA_OPERSTATE = 16 + +# https://www.kernel.org/doc/Documentation/networking/operstates.txt +OPER_UNKNOWN = 0 +OPER_NOTPRESENT = 1 +OPER_DOWN = 2 +OPER_LOWERLAYERDOWN = 3 +OPER_TESTING = 4 +OPER_DORMANT = 5 +OPER_UP = 6 + +RTAAttr = namedtuple('RTAAttr', ['length', 'rta_type', 'data']) +InterfaceOperstate = namedtuple('InterfaceOperstate', ['ifname', 'operstate']) +NetlinkHeader = namedtuple('NetlinkHeader', ['length', 'type', 'flags', 'seq', + 'pid']) + + +class NetlinkCreateSocketError(RuntimeError): + '''Raised if netlink socket fails during create or bind.''' + pass + + +def create_bound_netlink_socket(): + '''Creates netlink socket and bind on netlink group to catch interface + down/up events. The socket will bound only on RTMGRP_LINK (which only + includes RTM_NEWLINK/RTM_DELLINK/RTM_GETLINK events). The socket is set to + non-blocking mode since we're only receiving messages. + + :returns: netlink socket in non-blocking mode + :raises: NetlinkCreateSocketError + ''' + try: + netlink_socket = socket.socket(socket.AF_NETLINK, + socket.SOCK_RAW, + socket.NETLINK_ROUTE) + netlink_socket.bind((os.getpid(), RTMGRP_LINK)) + netlink_socket.setblocking(0) + except socket.error as e: + msg = "Exception during netlink socket create: %s" % e + raise NetlinkCreateSocketError(msg) + LOG.debug("Created netlink socket") + return netlink_socket + + +def get_netlink_msg_header(data): + '''Gets netlink message type and length + + :param: data read from netlink socket + :returns: netlink message type + :raises: AssertionError if data is None or data is not >= NLMSGHDR_SIZE + struct nlmsghdr { + __u32 nlmsg_len; /* Length of message including header */ + __u16 nlmsg_type; /* Type of message content */ + __u16 nlmsg_flags; /* Additional flags */ + __u32 nlmsg_seq; /* Sequence number */ + __u32 nlmsg_pid; /* Sender port ID */ + }; + ''' + assert (data is not None), ("data is none") + assert (len(data) >= NLMSGHDR_SIZE), ( + "data is smaller than netlink message header") + msg_len, msg_type, flags, seq, pid = struct.unpack(NLMSGHDR_FMT, + data[:MSG_TYPE_OFFSET]) + LOG.debug("Got netlink msg of type %d", msg_type) + return NetlinkHeader(msg_len, msg_type, flags, seq, pid) + + +def read_netlink_socket(netlink_socket, timeout=None): + '''Select and read from the netlink socket if ready. + + :param: netlink_socket: specify which socket object to read from + :param: timeout: specify a timeout value (integer) to wait while reading, + if none, it will block indefinitely until socket ready for read + :returns: string of data read (max length = ) from socket, + if no data read, returns None + :raises: AssertionError if netlink_socket is None + ''' + assert (netlink_socket is not None), ("netlink socket is none") + read_set, _, _ = select.select([netlink_socket], [], [], timeout) + # Incase of timeout,read_set doesn't contain netlink socket. + # just return from this function + if netlink_socket not in read_set: + return None + LOG.debug("netlink socket ready for read") + data = netlink_socket.recv(MAX_SIZE) + if data is None: + LOG.error("Reading from Netlink socket returned no data") + return data + + +def unpack_rta_attr(data, offset): + '''Unpack a single rta attribute. + + :param: data: string of data read from netlink socket + :param: offset: starting offset of RTA Attribute + :return: RTAAttr object with length, type and data. On error, return None. + :raises: AssertionError if data is None or offset is not integer. + ''' + assert (data is not None), ("data is none") + assert (type(offset) == int), ("offset is not integer") + assert (offset >= RTATTR_START_OFFSET), ( + "rta offset is less than expected length") + length = rta_type = 0 + attr_data = None + try: + length = struct.unpack_from("H", data, offset=offset)[0] + rta_type = struct.unpack_from("H", data, offset=offset+2)[0] + except struct.error: + return None # Should mean our offset is >= remaining data + + # Unpack just the attribute's data. Offset by 4 to skip length/type header + attr_data = data[offset+RTA_DATA_START_OFFSET:offset+length] + return RTAAttr(length, rta_type, attr_data) + + +def read_rta_oper_state(data): + '''Reads Interface name and operational state from RTA Data. + + :param: data: string of data read from netlink socket + :returns: InterfaceOperstate object containing if_name and oper_state. + None if data does not contain valid IFLA_OPERSTATE and + IFLA_IFNAME messages. + :raises: AssertionError if data is None or length of data is + smaller than RTATTR_START_OFFSET. + ''' + assert (data is not None), ("data is none") + assert (len(data) > RTATTR_START_OFFSET), ( + "length of data is smaller than RTATTR_START_OFFSET") + ifname = operstate = None + offset = RTATTR_START_OFFSET + while offset <= len(data): + attr = unpack_rta_attr(data, offset) + if not attr or attr.length == 0: + break + # Each attribute is 4-byte aligned. Determine pad length. + padlen = (PAD_ALIGNMENT - + (attr.length % PAD_ALIGNMENT)) % PAD_ALIGNMENT + offset += attr.length + padlen + + if attr.rta_type == IFLA_OPERSTATE: + operstate = ord(attr.data) + elif attr.rta_type == IFLA_IFNAME: + interface_name = util.decode_binary(attr.data, 'utf-8') + ifname = interface_name.strip('\0') + if not ifname or operstate is None: + return None + LOG.debug("rta attrs: ifname %s operstate %d", ifname, operstate) + return InterfaceOperstate(ifname, operstate) + + +def wait_for_media_disconnect_connect(netlink_socket, ifname): + '''Block until media disconnect and connect has happened on an interface. + Listens on netlink socket to receive netlink events and when the carrier + changes from 0 to 1, it considers event has happened and + return from this function + + :param: netlink_socket: netlink_socket to receive events + :param: ifname: Interface name to lookout for netlink events + :raises: AssertionError if netlink_socket is None or ifname is None. + ''' + assert (netlink_socket is not None), ("netlink socket is none") + assert (ifname is not None), ("interface name is none") + assert (len(ifname) > 0), ("interface name cannot be empty") + carrier = OPER_UP + prevCarrier = OPER_UP + data = bytes() + LOG.debug("Wait for media disconnect and reconnect to happen") + while True: + recv_data = read_netlink_socket(netlink_socket, SELECT_TIMEOUT) + if recv_data is None: + continue + LOG.debug('read %d bytes from socket', len(recv_data)) + data += recv_data + LOG.debug('Length of data after concat %d', len(data)) + offset = 0 + datalen = len(data) + while offset < datalen: + nl_msg = data[offset:] + if len(nl_msg) < NLMSGHDR_SIZE: + LOG.debug("Data is smaller than netlink header") + break + nlheader = get_netlink_msg_header(nl_msg) + if len(nl_msg) < nlheader.length: + LOG.debug("Partial data. Smaller than netlink message") + break + padlen = (nlheader.length+PAD_ALIGNMENT-1) & ~(PAD_ALIGNMENT-1) + offset = offset + padlen + LOG.debug('offset to next netlink message: %d', offset) + # Ignore any messages not new link or del link + if nlheader.type not in [RTM_NEWLINK, RTM_DELLINK]: + continue + interface_state = read_rta_oper_state(nl_msg) + if interface_state is None: + LOG.debug('Failed to read rta attributes: %s', interface_state) + continue + if interface_state.ifname != ifname: + LOG.debug( + "Ignored netlink event on interface %s. Waiting for %s.", + interface_state.ifname, ifname) + continue + if interface_state.operstate not in [OPER_UP, OPER_DOWN]: + continue + prevCarrier = carrier + carrier = interface_state.operstate + # check for carrier down, up sequence + isVnetSwitch = (prevCarrier == OPER_DOWN) and (carrier == OPER_UP) + if isVnetSwitch: + LOG.debug("Media switch happened on %s.", ifname) + return + data = data[offset:] + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/tests/test_netlink.py b/cloudinit/sources/helpers/tests/test_netlink.py new file mode 100644 index 00000000..c2898a16 --- /dev/null +++ b/cloudinit/sources/helpers/tests/test_netlink.py @@ -0,0 +1,373 @@ +# Author: Tamilmani Manoharan +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.tests.helpers import CiTestCase, mock +import socket +import struct +import codecs +from cloudinit.sources.helpers.netlink import ( + NetlinkCreateSocketError, create_bound_netlink_socket, read_netlink_socket, + read_rta_oper_state, unpack_rta_attr, wait_for_media_disconnect_connect, + OPER_DOWN, OPER_UP, OPER_DORMANT, OPER_LOWERLAYERDOWN, OPER_NOTPRESENT, + OPER_TESTING, OPER_UNKNOWN, RTATTR_START_OFFSET, RTM_NEWLINK, RTM_SETLINK, + RTM_GETLINK, MAX_SIZE) + + +def int_to_bytes(i): + '''convert integer to binary: eg: 1 to \x01''' + hex_value = '{0:x}'.format(i) + hex_value = '0' * (len(hex_value) % 2) + hex_value + return codecs.decode(hex_value, 'hex_codec') + + +class TestCreateBoundNetlinkSocket(CiTestCase): + + @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') + def test_socket_error_on_create(self, m_socket): + '''create_bound_netlink_socket catches socket creation exception''' + + """NetlinkCreateSocketError is raised when socket creation errors.""" + m_socket.side_effect = socket.error("Fake socket failure") + with self.assertRaises(NetlinkCreateSocketError) as ctx_mgr: + create_bound_netlink_socket() + self.assertEqual( + 'Exception during netlink socket create: Fake socket failure', + str(ctx_mgr.exception)) + + +class TestReadNetlinkSocket(CiTestCase): + + @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') + @mock.patch('cloudinit.sources.helpers.netlink.select.select') + def test_read_netlink_socket(self, m_select, m_socket): + '''read_netlink_socket able to receive data''' + data = 'netlinktest' + m_select.return_value = [m_socket], None, None + m_socket.recv.return_value = data + recv_data = read_netlink_socket(m_socket, 2) + m_select.assert_called_with([m_socket], [], [], 2) + m_socket.recv.assert_called_with(MAX_SIZE) + self.assertIsNotNone(recv_data) + self.assertEqual(recv_data, data) + + @mock.patch('cloudinit.sources.helpers.netlink.socket.socket') + @mock.patch('cloudinit.sources.helpers.netlink.select.select') + def test_netlink_read_timeout(self, m_select, m_socket): + '''read_netlink_socket should timeout if nothing to read''' + m_select.return_value = [], None, None + data = read_netlink_socket(m_socket, 1) + m_select.assert_called_with([m_socket], [], [], 1) + self.assertEqual(m_socket.recv.call_count, 0) + self.assertIsNone(data) + + def test_read_invalid_socket(self): + '''read_netlink_socket raises assert error if socket is invalid''' + socket = None + with self.assertRaises(AssertionError) as context: + read_netlink_socket(socket, 1) + self.assertTrue('netlink socket is none' in str(context.exception)) + + +class TestParseNetlinkMessage(CiTestCase): + + def test_read_rta_oper_state(self): + '''read_rta_oper_state could parse netlink message and extract data''' + ifname = "eth0" + bytes = ifname.encode("utf-8") + buf = bytearray(48) + struct.pack_into("HH4sHHc", buf, RTATTR_START_OFFSET, 8, 3, bytes, 5, + 16, int_to_bytes(OPER_DOWN)) + interface_state = read_rta_oper_state(buf) + self.assertEqual(interface_state.ifname, ifname) + self.assertEqual(interface_state.operstate, OPER_DOWN) + + def test_read_none_data(self): + '''read_rta_oper_state raises assert error if data is none''' + data = None + with self.assertRaises(AssertionError) as context: + read_rta_oper_state(data) + self.assertTrue('data is none', str(context.exception)) + + def test_read_invalid_rta_operstate_none(self): + '''read_rta_oper_state returns none if operstate is none''' + ifname = "eth0" + buf = bytearray(40) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4s", buf, RTATTR_START_OFFSET, 8, 3, bytes) + interface_state = read_rta_oper_state(buf) + self.assertIsNone(interface_state) + + def test_read_invalid_rta_ifname_none(self): + '''read_rta_oper_state returns none if ifname is none''' + buf = bytearray(40) + struct.pack_into("HHc", buf, RTATTR_START_OFFSET, 5, 16, + int_to_bytes(OPER_DOWN)) + interface_state = read_rta_oper_state(buf) + self.assertIsNone(interface_state) + + def test_read_invalid_data_len(self): + '''raise assert error if data size is smaller than required size''' + buf = bytearray(32) + with self.assertRaises(AssertionError) as context: + read_rta_oper_state(buf) + self.assertTrue('length of data is smaller than RTATTR_START_OFFSET' in + str(context.exception)) + + def test_unpack_rta_attr_none_data(self): + '''unpack_rta_attr raises assert error if data is none''' + data = None + with self.assertRaises(AssertionError) as context: + unpack_rta_attr(data, RTATTR_START_OFFSET) + self.assertTrue('data is none' in str(context.exception)) + + def test_unpack_rta_attr_invalid_offset(self): + '''unpack_rta_attr raises assert error if offset is invalid''' + data = bytearray(48) + with self.assertRaises(AssertionError) as context: + unpack_rta_attr(data, "offset") + self.assertTrue('offset is not integer' in str(context.exception)) + with self.assertRaises(AssertionError) as context: + unpack_rta_attr(data, 31) + self.assertTrue('rta offset is less than expected length' in + str(context.exception)) + + +@mock.patch('cloudinit.sources.helpers.netlink.socket.socket') +@mock.patch('cloudinit.sources.helpers.netlink.read_netlink_socket') +class TestWaitForMediaDisconnectConnect(CiTestCase): + with_logs = True + + def _media_switch_data(self, ifname, msg_type, operstate): + '''construct netlink data with specified fields''' + if ifname and operstate is not None: + data = bytearray(48) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(operstate)) + elif ifname: + data = bytearray(40) + bytes = ifname.encode("utf-8") + struct.pack_into("HH4s", data, RTATTR_START_OFFSET, 8, 3, bytes) + elif operstate: + data = bytearray(40) + struct.pack_into("HHc", data, RTATTR_START_OFFSET, 5, 16, + int_to_bytes(operstate)) + struct.pack_into("=LHHLL", data, 0, len(data), msg_type, 0, 0, 0) + return data + + def test_media_down_up_scenario(self, m_read_netlink_socket, + m_socket): + '''Test for media down up sequence for required interface name''' + ifname = "eth0" + # construct data for Oper State down + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + # construct data for Oper State up + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [data_op_down, data_op_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 2) + + def test_wait_for_media_switch_diff_interface(self, m_read_netlink_socket, + m_socket): + '''wait_for_media_disconnect_connect ignores unexpected interfaces. + + The first two messages are for other interfaces and last two are for + expected interface. So the function exit only after receiving last + 2 messages and therefore the call count for m_read_netlink_socket + has to be 4 + ''' + other_ifname = "eth1" + expected_ifname = "eth0" + data_op_down_eth1 = self._media_switch_data( + other_ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up_eth1 = self._media_switch_data( + other_ifname, RTM_NEWLINK, OPER_UP) + data_op_down_eth0 = self._media_switch_data( + expected_ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up_eth0 = self._media_switch_data( + expected_ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [data_op_down_eth1, + data_op_up_eth1, + data_op_down_eth0, + data_op_up_eth0] + wait_for_media_disconnect_connect(m_socket, expected_ifname) + self.assertIn('Ignored netlink event on interface %s' % other_ifname, + self.logs.getvalue()) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_invalid_msgtype_getlink(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect ignores GETLINK events. + + The first two messages are for oper down and up for RTM_GETLINK type + which netlink module will ignore. The last 2 messages are RTM_NEWLINK + with oper state down and up messages. Therefore the call count for + m_read_netlink_socket has to be 4 ignoring first 2 messages + of RTM_GETLINK + ''' + ifname = "eth0" + data_getlink_down = self._media_switch_data( + ifname, RTM_GETLINK, OPER_DOWN) + data_getlink_up = self._media_switch_data( + ifname, RTM_GETLINK, OPER_UP) + data_newlink_down = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_DOWN) + data_newlink_up = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [data_getlink_down, + data_getlink_up, + data_newlink_down, + data_newlink_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_invalid_msgtype_setlink(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect ignores SETLINK events. + + The first two messages are for oper down and up for RTM_GETLINK type + which it will ignore. 3rd and 4th messages are RTM_NEWLINK with down + and up messages. This function should exit after 4th messages since it + sees down->up scenario. So the call count for m_read_netlink_socket + has to be 4 ignoring first 2 messages of RTM_GETLINK and + last 2 messages of RTM_NEWLINK + ''' + ifname = "eth0" + data_setlink_down = self._media_switch_data( + ifname, RTM_SETLINK, OPER_DOWN) + data_setlink_up = self._media_switch_data( + ifname, RTM_SETLINK, OPER_UP) + data_newlink_down = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_DOWN) + data_newlink_up = self._media_switch_data( + ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [data_setlink_down, + data_setlink_up, + data_newlink_down, + data_newlink_up, + data_newlink_down, + data_newlink_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_netlink_invalid_switch_scenario(self, m_read_netlink_socket, + m_socket): + '''returns only if it receives UP event after a DOWN event''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK, + OPER_DORMANT) + data_op_notpresent = self._media_switch_data(ifname, RTM_NEWLINK, + OPER_NOTPRESENT) + data_op_lowerdown = self._media_switch_data(ifname, RTM_NEWLINK, + OPER_LOWERLAYERDOWN) + data_op_testing = self._media_switch_data(ifname, RTM_NEWLINK, + OPER_TESTING) + data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK, + OPER_UNKNOWN) + m_read_netlink_socket.side_effect = [data_op_up, data_op_up, + data_op_dormant, data_op_up, + data_op_notpresent, data_op_up, + data_op_lowerdown, data_op_up, + data_op_testing, data_op_up, + data_op_unknown, data_op_up, + data_op_down, data_op_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 14) + + def test_netlink_valid_inbetween_transitions(self, m_read_netlink_socket, + m_socket): + '''wait_for_media_disconnect_connect handles in between transitions''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + data_op_dormant = self._media_switch_data(ifname, RTM_NEWLINK, + OPER_DORMANT) + data_op_unknown = self._media_switch_data(ifname, RTM_NEWLINK, + OPER_UNKNOWN) + m_read_netlink_socket.side_effect = [data_op_down, data_op_dormant, + data_op_unknown, data_op_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_netlink_invalid_operstate(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect should handle invalid operstates. + + The function should not fail and return even if it receives invalid + operstates. It always should wait for down up sequence. + ''' + ifname = "eth0" + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + data_op_invalid = self._media_switch_data(ifname, RTM_NEWLINK, 7) + m_read_netlink_socket.side_effect = [data_op_invalid, data_op_up, + data_op_down, data_op_invalid, + data_op_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 5) + + def test_wait_invalid_socket(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect handle none netlink socket.''' + socket = None + ifname = "eth0" + with self.assertRaises(AssertionError) as context: + wait_for_media_disconnect_connect(socket, ifname) + self.assertTrue('netlink socket is none' in str(context.exception)) + + def test_wait_invalid_ifname(self, m_read_netlink_socket, m_socket): + '''wait_for_media_disconnect_connect handle none interface name''' + ifname = None + with self.assertRaises(AssertionError) as context: + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertTrue('interface name is none' in str(context.exception)) + ifname = "" + with self.assertRaises(AssertionError) as context: + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertTrue('interface name cannot be empty' in + str(context.exception)) + + def test_wait_invalid_rta_attr(self, m_read_netlink_socket, m_socket): + ''' wait_for_media_disconnect_connect handles invalid rta data''' + ifname = "eth0" + data_invalid1 = self._media_switch_data(None, RTM_NEWLINK, OPER_DOWN) + data_invalid2 = self._media_switch_data(ifname, RTM_NEWLINK, None) + data_op_down = self._media_switch_data(ifname, RTM_NEWLINK, OPER_DOWN) + data_op_up = self._media_switch_data(ifname, RTM_NEWLINK, OPER_UP) + m_read_netlink_socket.side_effect = [data_invalid1, data_invalid2, + data_op_down, data_op_up] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 4) + + def test_read_multiple_netlink_msgs(self, m_read_netlink_socket, m_socket): + '''Read multiple messages in single receive call''' + ifname = "eth0" + bytes = ifname.encode("utf-8") + data = bytearray(96) + struct.pack_into("=LHHLL", data, 0, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into("HH4sHHc", data, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(OPER_DOWN)) + struct.pack_into("=LHHLL", data, 48, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into("HH4sHHc", data, 48 + RTATTR_START_OFFSET, 8, + 3, bytes, 5, 16, int_to_bytes(OPER_UP)) + m_read_netlink_socket.return_value = data + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 1) + + def test_read_partial_netlink_msgs(self, m_read_netlink_socket, m_socket): + '''Read partial messages in receive call''' + ifname = "eth0" + bytes = ifname.encode("utf-8") + data1 = bytearray(112) + data2 = bytearray(32) + struct.pack_into("=LHHLL", data1, 0, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into("HH4sHHc", data1, RTATTR_START_OFFSET, 8, 3, + bytes, 5, 16, int_to_bytes(OPER_DOWN)) + struct.pack_into("=LHHLL", data1, 48, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into("HH4sHHc", data1, 80, 8, 3, bytes, 5, 16, + int_to_bytes(OPER_DOWN)) + struct.pack_into("=LHHLL", data1, 96, 48, RTM_NEWLINK, 0, 0, 0) + struct.pack_into("HH4sHHc", data2, 16, 8, 3, bytes, 5, 16, + int_to_bytes(OPER_UP)) + m_read_netlink_socket.side_effect = [data1, data2] + wait_for_media_disconnect_connect(m_socket, ifname) + self.assertEqual(m_read_netlink_socket.call_count, 2) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 5ea7ae5e..417d86a9 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -564,6 +564,8 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertEqual(1, report_ready_func.call_count) @mock.patch('cloudinit.sources.DataSourceAzure.util.write_file') + @mock.patch('cloudinit.sources.helpers.netlink.' + 'wait_for_media_disconnect_connect') @mock.patch( 'cloudinit.sources.DataSourceAzure.DataSourceAzure._report_ready') @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') @@ -572,7 +574,7 @@ fdescfs /dev/fd fdescfs rw 0 0 def test_crawl_metadata_on_reprovision_reports_ready_using_lease( self, m_readurl, m_dhcp, m_net, report_ready_func, - m_write): + m_media_switch, m_write): """If reprovisioning, report ready using the obtained lease""" ovfenv = construct_valid_ovf_env( platform_settings={"PreprovisionedVm": "True"}) @@ -586,6 +588,7 @@ fdescfs /dev/fd fdescfs rw 0 0 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', 'unknown-245': '624c3620'} m_dhcp.return_value = [lease] + m_media_switch.return_value = None reprovision_ovfenv = construct_valid_ovf_env() m_readurl.return_value = url_helper.StringResponse( @@ -1676,6 +1679,8 @@ class TestPreprovisioningShouldReprovision(CiTestCase): @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') +@mock.patch('cloudinit.sources.helpers.netlink.' + 'wait_for_media_disconnect_connect') @mock.patch('requests.Session.request') @mock.patch(MOCKPATH + 'DataSourceAzure._report_ready') class TestPreprovisioningPollIMDS(CiTestCase): @@ -1689,7 +1694,8 @@ class TestPreprovisioningPollIMDS(CiTestCase): @mock.patch(MOCKPATH + 'EphemeralDHCPv4') def test_poll_imds_re_dhcp_on_timeout(self, m_dhcpv4, report_ready_func, - fake_resp, m_dhcp, m_net): + fake_resp, m_media_switch, m_dhcp, + m_net): """The poll_imds will retry DHCP on IMDS timeout.""" report_file = self.tmp_path('report_marker', self.tmp) lease = { @@ -1697,7 +1703,7 @@ class TestPreprovisioningPollIMDS(CiTestCase): 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', 'unknown-245': '624c3620'} m_dhcp.return_value = [lease] - + m_media_switch.return_value = None dhcp_ctx = mock.MagicMock(lease=lease) dhcp_ctx.obtain_lease.return_value = lease m_dhcpv4.return_value = dhcp_ctx @@ -1723,11 +1729,12 @@ class TestPreprovisioningPollIMDS(CiTestCase): dsa._poll_imds() self.assertEqual(report_ready_func.call_count, 1) report_ready_func.assert_called_with(lease=lease) - self.assertEqual(2, m_dhcpv4.call_count, 'Expected 2 DHCP calls') + self.assertEqual(3, m_dhcpv4.call_count, 'Expected 3 DHCP calls') self.assertEqual(3, self.tries, 'Expected 3 total reads from IMDS') - def test_poll_imds_report_ready_false(self, report_ready_func, - fake_resp, m_dhcp, m_net): + def test_poll_imds_report_ready_false(self, + report_ready_func, fake_resp, + m_media_switch, m_dhcp, m_net): """The poll_imds should not call reporting ready when flag is false""" report_file = self.tmp_path('report_marker', self.tmp) @@ -1736,6 +1743,7 @@ class TestPreprovisioningPollIMDS(CiTestCase): 'interface': 'eth9', 'fixed-address': '192.168.2.9', 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', 'unknown-245': '624c3620'}] + m_media_switch.return_value = None dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) with mock.patch(MOCKPATH + 'REPORTED_READY_MARKER_FILE', report_file): dsa._poll_imds() @@ -1745,6 +1753,8 @@ class TestPreprovisioningPollIMDS(CiTestCase): @mock.patch(MOCKPATH + 'util.subp') @mock.patch(MOCKPATH + 'util.write_file') @mock.patch(MOCKPATH + 'util.is_FreeBSD') +@mock.patch('cloudinit.sources.helpers.netlink.' + 'wait_for_media_disconnect_connect') @mock.patch('cloudinit.net.dhcp.EphemeralIPv4Network') @mock.patch('cloudinit.net.dhcp.maybe_perform_dhcp_discovery') @mock.patch('requests.Session.request') @@ -1757,10 +1767,13 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): self.paths = helpers.Paths({'cloud_dir': tmp}) dsaz.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - def test_poll_imds_returns_ovf_env(self, fake_resp, m_dhcp, m_net, + def test_poll_imds_returns_ovf_env(self, fake_resp, + m_dhcp, m_net, + m_media_switch, m_is_bsd, write_f, subp): """The _poll_imds method should return the ovf_env.xml.""" m_is_bsd.return_value = False + m_media_switch.return_value = None m_dhcp.return_value = [{ 'interface': 'eth9', 'fixed-address': '192.168.2.9', 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0'}] @@ -1778,16 +1791,19 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): 'Cloud-Init/%s' % vs() }, method='GET', timeout=1, url=full_url)]) - self.assertEqual(m_dhcp.call_count, 1) + self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', prefix_or_mask='255.255.255.0', router='192.168.2.1') - self.assertEqual(m_net.call_count, 1) + self.assertEqual(m_net.call_count, 2) - def test__reprovision_calls__poll_imds(self, fake_resp, m_dhcp, m_net, + def test__reprovision_calls__poll_imds(self, fake_resp, + m_dhcp, m_net, + m_media_switch, m_is_bsd, write_f, subp): """The _reprovision method should call poll IMDS.""" m_is_bsd.return_value = False + m_media_switch.return_value = None m_dhcp.return_value = [{ 'interface': 'eth9', 'fixed-address': '192.168.2.9', 'routers': '192.168.2.1', 'subnet-mask': '255.255.255.0', @@ -1811,11 +1827,11 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): 'User-Agent': 'Cloud-Init/%s' % vs()}, method='GET', timeout=1, url=full_url)]) - self.assertEqual(m_dhcp.call_count, 1) + self.assertEqual(m_dhcp.call_count, 2) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', prefix_or_mask='255.255.255.0', router='192.168.2.1') - self.assertEqual(m_net.call_count, 1) + self.assertEqual(m_net.call_count, 2) class TestRemoveUbuntuNetworkConfigScripts(CiTestCase): -- cgit v1.2.3 From cb44ad6f42ac015d7d8eaf2ab0bb5ab125ed04b6 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Mon, 3 Dec 2018 18:43:21 +0000 Subject: ovf: Fix ovf network config generation gateway/routes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move routes under the nic's subnet rather than use top-level ("global") route config ensuring all net renderers will provide the configured route. Also updated cloudinit/cmd/devel/net_convert.py:  - Add input type 'vmware-imc' for OVF customization config files  - Fix bug when output-type was netplan which invoked netplan   generate/apply and attempted to write to   /etc/netplan/50-cloud-init.yaml instead of joining with the   output directory. LP: #1806103 --- cloudinit/cmd/devel/net_convert.py | 15 ++++-- cloudinit/sources/helpers/vmware/imc/config_nic.py | 5 +- tests/unittests/test_vmware_config_file.py | 58 +++++++++++++++++++--- 3 files changed, 64 insertions(+), 14 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/cmd/devel/net_convert.py b/cloudinit/cmd/devel/net_convert.py index a0f58a0a..1ad7e0bd 100755 --- a/cloudinit/cmd/devel/net_convert.py +++ b/cloudinit/cmd/devel/net_convert.py @@ -9,6 +9,7 @@ import yaml from cloudinit.sources.helpers import openstack from cloudinit.sources import DataSourceAzure as azure +from cloudinit.sources import DataSourceOVF as ovf from cloudinit import distros from cloudinit.net import eni, netplan, network_state, sysconfig @@ -31,7 +32,7 @@ def get_parser(parser=None): metavar="PATH", required=True) parser.add_argument("-k", "--kind", choices=['eni', 'network_data.json', 'yaml', - 'azure-imds'], + 'azure-imds', 'vmware-imc'], required=True) parser.add_argument("-d", "--directory", metavar="PATH", @@ -76,7 +77,6 @@ def handle_args(name, args): net_data = args.network_data.read() if args.kind == "eni": pre_ns = eni.convert_eni_data(net_data) - ns = network_state.parse_net_config_data(pre_ns) elif args.kind == "yaml": pre_ns = yaml.load(net_data) if 'network' in pre_ns: @@ -85,15 +85,16 @@ def handle_args(name, args): sys.stderr.write('\n'.join( ["Input YAML", yaml.dump(pre_ns, default_flow_style=False, indent=4), ""])) - ns = network_state.parse_net_config_data(pre_ns) elif args.kind == 'network_data.json': pre_ns = openstack.convert_net_json( json.loads(net_data), known_macs=known_macs) - ns = network_state.parse_net_config_data(pre_ns) elif args.kind == 'azure-imds': pre_ns = azure.parse_network_config(json.loads(net_data)) - ns = network_state.parse_net_config_data(pre_ns) + elif args.kind == 'vmware-imc': + config = ovf.Config(ovf.ConfigFile(args.network_data.name)) + pre_ns = ovf.get_network_config_from_conf(config, False) + ns = network_state.parse_net_config_data(pre_ns) if not ns: raise RuntimeError("No valid network_state object created from" "input data") @@ -111,6 +112,10 @@ def handle_args(name, args): elif args.output_kind == "netplan": r_cls = netplan.Renderer config = distro.renderer_configs.get('netplan') + # don't run netplan generate/apply + config['postcmds'] = False + # trim leading slash + config['netplan_path'] = config['netplan_path'][1:] else: r_cls = sysconfig.Renderer config = distro.renderer_configs.get('sysconfig') diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index e1890e23..77cbf3b6 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -165,9 +165,8 @@ class NicConfigurator(object): # Add routes if there is no primary nic if not self._primaryNic and v4.gateways: - route_list.extend(self.gen_ipv4_route(nic, - v4.gateways, - v4.netmask)) + subnet.update( + {'routes': self.gen_ipv4_route(nic, v4.gateways, v4.netmask)}) return ([subnet], route_list) diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py index 602dedb0..f47335ea 100644 --- a/tests/unittests/test_vmware_config_file.py +++ b/tests/unittests/test_vmware_config_file.py @@ -263,7 +263,7 @@ class TestVmwareConfigFile(CiTestCase): nicConfigurator = NicConfigurator(config.nics, False) nics_cfg_list = nicConfigurator.generate() - self.assertEqual(5, len(nics_cfg_list), "number of elements") + self.assertEqual(2, len(nics_cfg_list), "number of elements") nic1 = {'name': 'NIC1'} nic2 = {'name': 'NIC2'} @@ -275,8 +275,6 @@ class TestVmwareConfigFile(CiTestCase): nic1.update(cfg) elif cfg.get('name') == nic2.get('name'): nic2.update(cfg) - elif cfg_type == 'route': - route_list.append(cfg) self.assertEqual('physical', nic1.get('type'), 'type of NIC1') self.assertEqual('NIC1', nic1.get('name'), 'name of NIC1') @@ -297,6 +295,9 @@ class TestVmwareConfigFile(CiTestCase): static6_subnet.append(subnet) else: self.assertEqual(True, False, 'Unknown type') + if 'route' in subnet: + for route in subnet.get('routes'): + route_list.append(route) self.assertEqual(1, len(static_subnet), 'Number of static subnet') self.assertEqual(1, len(static6_subnet), 'Number of static6 subnet') @@ -351,6 +352,8 @@ class TestVmwareConfigFile(CiTestCase): class TestVmwareNetConfig(CiTestCase): """Test conversion of vmware config to cloud-init config.""" + maxDiff = None + def _get_NicConfigurator(self, text): fp = None try: @@ -420,9 +423,52 @@ class TestVmwareNetConfig(CiTestCase): 'mac_address': '00:50:56:a6:8c:08', 'subnets': [ {'control': 'auto', 'type': 'static', - 'address': '10.20.87.154', 'netmask': '255.255.252.0'}]}, - {'type': 'route', 'destination': '10.20.84.0/22', - 'gateway': '10.20.87.253', 'metric': 10000}], + 'address': '10.20.87.154', 'netmask': '255.255.252.0', + 'routes': + [{'type': 'route', 'destination': '10.20.84.0/22', + 'gateway': '10.20.87.253', 'metric': 10000}]}]}], + nc.generate()) + + def test_cust_non_primary_nic_with_gateway_(self): + """A customer non primary nic set can have a gateway.""" + config = textwrap.dedent("""\ + [NETWORK] + NETWORKING = yes + BOOTPROTO = dhcp + HOSTNAME = static-debug-vm + DOMAINNAME = cluster.local + + [NIC-CONFIG] + NICS = NIC1 + + [NIC1] + MACADDR = 00:50:56:ac:d1:8a + ONBOOT = yes + IPv4_MODE = BACKWARDS_COMPATIBLE + BOOTPROTO = static + IPADDR = 100.115.223.75 + NETMASK = 255.255.255.0 + GATEWAY = 100.115.223.254 + + + [DNS] + DNSFROMDHCP=no + + NAMESERVER|1 = 8.8.8.8 + + [DATETIME] + UTC = yes + """) + nc = self._get_NicConfigurator(config) + self.assertEqual( + [{'type': 'physical', 'name': 'NIC1', + 'mac_address': '00:50:56:ac:d1:8a', + 'subnets': [ + {'control': 'auto', 'type': 'static', + 'address': '100.115.223.75', 'netmask': '255.255.255.0', + 'routes': + [{'type': 'route', 'destination': '100.115.223.0/24', + 'gateway': '100.115.223.254', 'metric': 10000}]}]}], nc.generate()) def test_a_primary_nic_with_gateway(self): -- cgit v1.2.3 From adbd950af07a4b613a14bd83049915abdd6ca348 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 3 Dec 2018 22:06:47 +0000 Subject: NoCloud: Allow top level 'network' key in network-config. NoCloud's 'network-config' file was originally expected to contain network configuration without the top level 'network' key. This was because the file was named 'network-config' so specifying 'network' seemed redundant. However, JuJu is currently providing a top level 'network' config when it tries to disable networking ({"network": {"config": "disabled"}). Other users have also been surprised/confused by the fact that a network config in /etc/cloud/cloud.cfg.d/network.cfg differed from what was expected in 'network-config'. LP: #1798117 --- cloudinit/sources/DataSourceNoCloud.py | 32 +++++++- tests/unittests/test_datasource/test_nocloud.py | 100 ++++++++++++++++-------- 2 files changed, 97 insertions(+), 35 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 9010f06c..6860f0cc 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -311,6 +311,35 @@ def parse_cmdline_data(ds_id, fill, cmdline=None): return True +def _maybe_remove_top_network(cfg): + """If network-config contains top level 'network' key, then remove it. + + Some providers of network configuration may provide a top level + 'network' key (LP: #1798117) even though it is not necessary. + + Be friendly and remove it if it really seems so. + + Return the original value if no change or the updated value if changed.""" + nullval = object() + network_val = cfg.get('network', nullval) + if network_val is nullval: + return cfg + bmsg = 'Top level network key in network-config %s: %s' + if not isinstance(network_val, dict): + LOG.debug(bmsg, "was not a dict", cfg) + return cfg + if len(list(cfg.keys())) != 1: + LOG.debug(bmsg, "had multiple top level keys", cfg) + return cfg + if network_val.get('config') == "disabled": + LOG.debug(bmsg, "was config/disabled", cfg) + elif not all(('config' in network_val, 'version' in network_val)): + LOG.debug(bmsg, "but missing 'config' or 'version'", cfg) + return cfg + LOG.debug(bmsg, "fixed by removing shifting network.", cfg) + return network_val + + def _merge_new_seed(cur, seeded): ret = cur.copy() @@ -320,7 +349,8 @@ def _merge_new_seed(cur, seeded): ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd]) if seeded.get('network-config'): - ret['network-config'] = util.load_yaml(seeded['network-config']) + ret['network-config'] = _maybe_remove_top_network( + util.load_yaml(seeded.get('network-config'))) if 'user-data' in seeded: ret['user-data'] = seeded['user-data'] diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index b6468b6d..3429272c 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -1,7 +1,10 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit import helpers -from cloudinit.sources import DataSourceNoCloud +from cloudinit.sources.DataSourceNoCloud import ( + DataSourceNoCloud as dsNoCloud, + _maybe_remove_top_network, + parse_cmdline_data) from cloudinit import util from cloudinit.tests.helpers import CiTestCase, populate_dir, mock, ExitStack @@ -40,9 +43,7 @@ class TestNoCloudDataSource(CiTestCase): 'datasource': {'NoCloud': {'fs_label': None}} } - ds = DataSourceNoCloud.DataSourceNoCloud - - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertEqual(dsrc.userdata_raw, ud) self.assertEqual(dsrc.metadata, md) @@ -63,9 +64,7 @@ class TestNoCloudDataSource(CiTestCase): 'datasource': {'NoCloud': {'fs_label': None}} } - ds = DataSourceNoCloud.DataSourceNoCloud - - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) self.assertTrue(dsrc.get_data()) self.assertEqual(dsrc.platform_type, 'nocloud') self.assertEqual( @@ -73,8 +72,6 @@ class TestNoCloudDataSource(CiTestCase): def test_fs_label(self, m_is_lxd): # find_devs_with should not be called ff fs_label is None - ds = DataSourceNoCloud.DataSourceNoCloud - class PsuedoException(Exception): pass @@ -84,12 +81,12 @@ class TestNoCloudDataSource(CiTestCase): # by default, NoCloud should search for filesystems by label sys_cfg = {'datasource': {'NoCloud': {}}} - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) self.assertRaises(PsuedoException, dsrc.get_data) # but disabling searching should just end up with None found sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertFalse(ret) @@ -97,13 +94,10 @@ class TestNoCloudDataSource(CiTestCase): # no source should be found if no cmdline, config, and fs_label=None sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - ds = DataSourceNoCloud.DataSourceNoCloud - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) self.assertFalse(dsrc.get_data()) def test_seed_in_config(self, m_is_lxd): - ds = DataSourceNoCloud.DataSourceNoCloud - data = { 'fs_label': None, 'meta-data': yaml.safe_dump({'instance-id': 'IID'}), @@ -111,7 +105,7 @@ class TestNoCloudDataSource(CiTestCase): } sys_cfg = {'datasource': {'NoCloud': data}} - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertEqual(dsrc.userdata_raw, b"USER_DATA_RAW") self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') @@ -130,9 +124,7 @@ class TestNoCloudDataSource(CiTestCase): 'datasource': {'NoCloud': {'fs_label': None}} } - ds = DataSourceNoCloud.DataSourceNoCloud - - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertEqual(dsrc.userdata_raw, ud) self.assertEqual(dsrc.metadata, md) @@ -145,9 +137,7 @@ class TestNoCloudDataSource(CiTestCase): sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - ds = DataSourceNoCloud.DataSourceNoCloud - - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertEqual(dsrc.userdata_raw, b"ud") self.assertFalse(dsrc.vendordata) @@ -174,9 +164,7 @@ class TestNoCloudDataSource(CiTestCase): sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - ds = DataSourceNoCloud.DataSourceNoCloud - - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertTrue(ret) # very simple check just for the strings above @@ -195,9 +183,23 @@ class TestNoCloudDataSource(CiTestCase): sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - ds = DataSourceNoCloud.DataSourceNoCloud + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEqual(netconf, dsrc.network_config) + + def test_metadata_network_config_with_toplevel_network(self, m_is_lxd): + """network-config may have 'network' top level key.""" + netconf = {'config': 'disabled'} + populate_dir( + os.path.join(self.paths.seed_dir, "nocloud"), + {'user-data': b"ud", + 'meta-data': "instance-id: IID\n", + 'network-config': yaml.dump({'network': netconf}) + "\n"}) + + sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertTrue(ret) self.assertEqual(netconf, dsrc.network_config) @@ -228,9 +230,7 @@ class TestNoCloudDataSource(CiTestCase): sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} - ds = DataSourceNoCloud.DataSourceNoCloud - - dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() self.assertTrue(ret) self.assertEqual(netconf, dsrc.network_config) @@ -258,8 +258,7 @@ class TestParseCommandLineData(CiTestCase): for (fmt, expected) in pairs: fill = {} cmdline = fmt % {'ds_id': ds_id} - ret = DataSourceNoCloud.parse_cmdline_data(ds_id=ds_id, fill=fill, - cmdline=cmdline) + ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline) self.assertEqual(expected, fill) self.assertTrue(ret) @@ -276,10 +275,43 @@ class TestParseCommandLineData(CiTestCase): for cmdline in cmdlines: fill = {} - ret = DataSourceNoCloud.parse_cmdline_data(ds_id=ds_id, fill=fill, - cmdline=cmdline) + ret = parse_cmdline_data(ds_id=ds_id, fill=fill, cmdline=cmdline) self.assertEqual(fill, {}) self.assertFalse(ret) +class TestMaybeRemoveToplevelNetwork(CiTestCase): + """test _maybe_remove_top_network function.""" + basecfg = [{'type': 'physical', 'name': 'interface0', + 'subnets': [{'type': 'dhcp'}]}] + + def test_should_remove_safely(self): + mcfg = {'config': self.basecfg, 'version': 1} + self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg})) + + def test_no_remove_if_other_keys(self): + """should not shift if other keys at top level.""" + mcfg = {'network': {'config': self.basecfg, 'version': 1}, + 'unknown_keyname': 'keyval'} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + def test_no_remove_if_non_dict(self): + """should not shift if not a dict.""" + mcfg = {'network': '"content here'} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + def test_no_remove_if_missing_config_or_version(self): + """should not shift unless network entry has config and version.""" + mcfg = {'network': {'config': self.basecfg}} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + mcfg = {'network': {'version': 1}} + self.assertEqual(mcfg, _maybe_remove_top_network(mcfg)) + + def test_remove_with_config_disabled(self): + """network/config=disabled should be shifted.""" + mcfg = {'config': 'disabled'} + self.assertEqual(mcfg, _maybe_remove_top_network({'network': mcfg})) + + # vi: ts=4 expandtab -- cgit v1.2.3