diff options
Diffstat (limited to 'cloudinit/sources/DataSourceEc2.py')
-rw-r--r-- | cloudinit/sources/DataSourceEc2.py | 461 |
1 files changed, 281 insertions, 180 deletions
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 1930a509..03b3870c 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -8,19 +8,18 @@ # # This file is part of cloud-init. See LICENSE file for license information. +import copy import os import time from cloudinit import dmi from cloudinit import ec2_utils as ec2 from cloudinit import log as logging -from cloudinit import net -from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError -from cloudinit import sources +from cloudinit import net, sources from cloudinit import url_helper as uhelp -from cloudinit import util -from cloudinit import warnings -from cloudinit.event import EventType +from cloudinit import util, warnings +from cloudinit.event import EventScope, EventType +from cloudinit.net.dhcp import EphemeralDHCPv4, NoDHCPLeaseError LOG = logging.getLogger(__name__) @@ -29,10 +28,10 @@ SKIP_METADATA_URL_CODES = frozenset([uhelp.NOT_FOUND]) STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") STRICT_ID_DEFAULT = "warn" -API_TOKEN_ROUTE = 'latest/api/token' -AWS_TOKEN_TTL_SECONDS = '21600' -AWS_TOKEN_PUT_HEADER = 'X-aws-ec2-metadata-token' -AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + '-ttl-seconds' +API_TOKEN_ROUTE = "latest/api/token" +AWS_TOKEN_TTL_SECONDS = "21600" +AWS_TOKEN_PUT_HEADER = "X-aws-ec2-metadata-token" +AWS_TOKEN_REQ_HEADER = AWS_TOKEN_PUT_HEADER + "-ttl-seconds" AWS_TOKEN_REDACT = [AWS_TOKEN_PUT_HEADER, AWS_TOKEN_REQ_HEADER] @@ -52,18 +51,18 @@ class CloudNames(object): class DataSourceEc2(sources.DataSource): - dsname = 'Ec2' + dsname = "Ec2" # Default metadata urls that will be used if none are provided # They will be checked for 'resolveability' and some of the # following may be discarded if they do not resolve metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"] # The minimum supported metadata_version from the ec2 metadata apis - min_metadata_version = '2009-04-04' + min_metadata_version = "2009-04-04" # Priority ordered list of additional metadata versions which will be tried # for extended metadata content. IPv6 support comes in 2016-09-02 - extended_metadata_versions = ['2018-09-24', '2016-09-02'] + extended_metadata_versions = ["2018-09-24", "2016-09-02"] # Setup read_url parameters per get_url_params. url_max_wait = 120 @@ -75,6 +74,15 @@ class DataSourceEc2(sources.DataSource): # Whether we want to get network configuration from the metadata service. perform_dhcp_setup = False + supported_update_events = { + EventScope.NETWORK: { + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT, + EventType.BOOT_LEGACY, + EventType.HOTPLUG, + } + } + def __init__(self, sys_cfg, distro, paths): super(DataSourceEc2, self).__init__(sys_cfg, distro, paths) self.metadata_address = None @@ -85,11 +93,18 @@ class DataSourceEc2(sources.DataSource): def _get_data(self): strict_mode, _sleep = read_strict_mode( - util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH, - STRICT_ID_DEFAULT), ("warn", None)) - - LOG.debug("strict_mode: %s, cloud_name=%s cloud_platform=%s", - strict_mode, self.cloud_name, self.platform) + util.get_cfg_by_path( + self.sys_cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT + ), + ("warn", None), + ) + + LOG.debug( + "strict_mode: %s, cloud_name=%s cloud_platform=%s", + strict_mode, + self.cloud_name, + self.platform, + ) if strict_mode == "true" and self.cloud_name == CloudNames.UNKNOWN: return False elif self.cloud_name == CloudNames.NO_EC2_METADATA: @@ -102,20 +117,27 @@ class DataSourceEc2(sources.DataSource): try: with EphemeralDHCPv4(self.fallback_interface): self._crawled_metadata = util.log_time( - logfunc=LOG.debug, msg='Crawl of metadata service', - func=self.crawl_metadata) + logfunc=LOG.debug, + msg="Crawl of metadata service", + func=self.crawl_metadata, + ) except NoDHCPLeaseError: return False else: self._crawled_metadata = util.log_time( - logfunc=LOG.debug, msg='Crawl of metadata service', - func=self.crawl_metadata) + logfunc=LOG.debug, + msg="Crawl of metadata service", + func=self.crawl_metadata, + ) if not self._crawled_metadata: return False - self.metadata = self._crawled_metadata.get('meta-data', None) - self.userdata_raw = self._crawled_metadata.get('user-data', None) - self.identity = self._crawled_metadata.get( - 'dynamic', {}).get('instance-identity', {}).get('document', {}) + self.metadata = self._crawled_metadata.get("meta-data", None) + self.userdata_raw = self._crawled_metadata.get("user-data", None) + self.identity = ( + self._crawled_metadata.get("dynamic", {}) + .get("instance-identity", {}) + .get("document", {}) + ) return True def is_classic_instance(self): @@ -125,9 +147,9 @@ class DataSourceEc2(sources.DataSource): # network_config where metadata will be present. # Secondary call site is in packaging postinst script. return False - ifaces_md = self.metadata.get('network', {}).get('interfaces', {}) - for _mac, mac_data in ifaces_md.get('macs', {}).items(): - if 'vpc-id' in mac_data: + ifaces_md = self.metadata.get("network", {}).get("interfaces", {}) + for _mac, mac_data in ifaces_md.get("macs", {}).items(): + if "vpc-id" in mac_data: return False return True @@ -135,12 +157,12 @@ class DataSourceEc2(sources.DataSource): def launch_index(self): if not self.metadata: return None - return self.metadata.get('ami-launch-index') + return self.metadata.get("ami-launch-index") @property def platform(self): # Handle upgrade path of pickled ds - if not hasattr(self, '_platform_type'): + if not hasattr(self, "_platform_type"): self._platform_type = DataSourceEc2.dsname.lower() if not self._platform_type: self._platform_type = DataSourceEc2.dsname.lower() @@ -156,44 +178,47 @@ class DataSourceEc2(sources.DataSource): min_metadata_version. """ # Assumes metadata service is already up - url_tmpl = '{0}/{1}/meta-data/instance-id' + url_tmpl = "{0}/{1}/meta-data/instance-id" headers = self._get_headers() for api_ver in self.extended_metadata_versions: url = url_tmpl.format(self.metadata_address, api_ver) try: - resp = uhelp.readurl(url=url, headers=headers, - headers_redact=AWS_TOKEN_REDACT) + resp = uhelp.readurl( + url=url, headers=headers, headers_redact=AWS_TOKEN_REDACT + ) except uhelp.UrlError as e: - LOG.debug('url %s raised exception %s', url, e) + LOG.debug("url %s raised exception %s", url, e) else: if resp.code == 200: - LOG.debug('Found preferred metadata version %s', api_ver) + LOG.debug("Found preferred metadata version %s", api_ver) return api_ver elif resp.code == 404: - msg = 'Metadata api version %s not present. Headers: %s' + msg = "Metadata api version %s not present. Headers: %s" LOG.debug(msg, api_ver, resp.headers) return self.min_metadata_version def get_instance_id(self): if self.cloud_name == CloudNames.AWS: # Prefer the ID from the instance identity document, but fall back - if not getattr(self, 'identity', None): + if not getattr(self, "identity", None): # If re-using cached datasource, it's get_data run didn't # setup self.identity. So we need to do that now. api_version = self.get_metadata_api_version() self.identity = ec2.get_instance_identity( - api_version, self.metadata_address, + api_version, + self.metadata_address, headers_cb=self._get_headers, headers_redact=AWS_TOKEN_REDACT, - exception_cb=self._refresh_stale_aws_token_cb).get( - 'document', {}) + exception_cb=self._refresh_stale_aws_token_cb, + ).get("document", {}) return self.identity.get( - 'instanceId', self.metadata['instance-id']) + "instanceId", self.metadata["instance-id"] + ) else: - return self.metadata['instance-id'] + return self.metadata["instance-id"] def _maybe_fetch_api_token(self, mdurls, timeout=None, max_wait=None): - """ Get an API token for EC2 Instance Metadata Service. + """Get an API token for EC2 Instance Metadata Service. On EC2. IMDS will always answer an API token, unless the instance owner has disabled the IMDS HTTP endpoint or @@ -205,26 +230,29 @@ class DataSourceEc2(sources.DataSource): urls = [] url2base = {} url_path = API_TOKEN_ROUTE - request_method = 'PUT' + request_method = "PUT" for url in mdurls: - cur = '{0}/{1}'.format(url, url_path) + cur = "{0}/{1}".format(url, url_path) urls.append(cur) url2base[cur] = url # use the self._imds_exception_cb to check for Read errors - LOG.debug('Fetching Ec2 IMDSv2 API Token') + LOG.debug("Fetching Ec2 IMDSv2 API Token") response = None url = None url_params = self.get_url_params() try: url, response = uhelp.wait_for_url( - urls=urls, max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, status_cb=LOG.warning, + urls=urls, + max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds, + status_cb=LOG.warning, headers_cb=self._get_headers, exception_cb=self._imds_exception_cb, request_method=request_method, - headers_redact=AWS_TOKEN_REDACT) + headers_redact=AWS_TOKEN_REDACT, + ) except uhelp.UrlError: # We use the raised exception to interupt the retry loop. # Nothing else to do here. @@ -250,8 +278,10 @@ class DataSourceEc2(sources.DataSource): filtered = [x for x in mdurls if util.is_resolvable_url(x)] if set(filtered) != set(mdurls): - LOG.debug("Removed the following from metadata urls: %s", - list((set(mdurls) - set(filtered)))) + LOG.debug( + "Removed the following from metadata urls: %s", + list((set(mdurls) - set(filtered))), + ) if len(filtered): mdurls = filtered @@ -269,20 +299,25 @@ class DataSourceEc2(sources.DataSource): # if we can't get a token, use instance-id path urls = [] url2base = {} - url_path = '{ver}/meta-data/instance-id'.format( - ver=self.min_metadata_version) - request_method = 'GET' + url_path = "{ver}/meta-data/instance-id".format( + ver=self.min_metadata_version + ) + request_method = "GET" for url in mdurls: - cur = '{0}/{1}'.format(url, url_path) + cur = "{0}/{1}".format(url, url_path) urls.append(cur) url2base[cur] = url start_time = time.time() url, _ = uhelp.wait_for_url( - urls=urls, max_wait=url_params.max_wait_seconds, - timeout=url_params.timeout_seconds, status_cb=LOG.warning, - headers_redact=AWS_TOKEN_REDACT, headers_cb=self._get_headers, - request_method=request_method) + urls=urls, + max_wait=url_params.max_wait_seconds, + timeout=url_params.timeout_seconds, + status_cb=LOG.warning, + headers_redact=AWS_TOKEN_REDACT, + headers_cb=self._get_headers, + request_method=request_method, + ) if url: metadata_address = url2base[url] @@ -293,8 +328,11 @@ class DataSourceEc2(sources.DataSource): elif self.cloud_name == CloudNames.AWS: LOG.warning("IMDS's HTTP endpoint is probably disabled") else: - LOG.critical("Giving up on md from %s after %s seconds", - urls, int(time.time() - start_time)) + LOG.critical( + "Giving up on md from %s after %s seconds", + urls, + int(time.time() - start_time), + ) return bool(metadata_address) @@ -302,7 +340,7 @@ class DataSourceEc2(sources.DataSource): # Consult metadata service, that has # ephemeral0: sdb # and return 'sdb' for input 'ephemeral0' - if 'block-device-mapping' not in self.metadata: + if "block-device-mapping" not in self.metadata: return None # Example: @@ -311,7 +349,7 @@ class DataSourceEc2(sources.DataSource): # 'ephemeral0': '/dev/sdb', # 'root': '/dev/sda1'} found = None - bdm = self.metadata['block-device-mapping'] + bdm = self.metadata["block-device-mapping"] if not isinstance(bdm, dict): LOG.debug("block-device-mapping not a dictionary: '%s'", bdm) return None @@ -354,17 +392,18 @@ class DataSourceEc2(sources.DataSource): try: if self.cloud_name == CloudNames.AWS: return self.identity.get( - 'availabilityZone', - self.metadata['placement']['availability-zone']) + "availabilityZone", + self.metadata["placement"]["availability-zone"], + ) else: - return self.metadata['placement']['availability-zone'] + return self.metadata["placement"]["availability-zone"] except KeyError: return None @property def region(self): if self.cloud_name == CloudNames.AWS: - region = self.identity.get('region') + region = self.identity.get("region") # Fallback to trimming the availability zone if region is missing if self.availability_zone and not region: region = self.availability_zone[:-1] @@ -381,7 +420,8 @@ class DataSourceEc2(sources.DataSource): if self.cloud_name == CloudNames.UNKNOWN: warn_if_necessary( util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT), - cfg) + cfg, + ) @property def network_config(self): @@ -392,30 +432,39 @@ class DataSourceEc2(sources.DataSource): if self.metadata is None: # this would happen if get_data hadn't been called. leave as UNSET LOG.warning( - "Unexpected call to network_config when metadata is None.") + "Unexpected call to network_config when metadata is None." + ) return None result = None no_network_metadata_on_aws = bool( - 'network' not in self.metadata and - self.cloud_name == CloudNames.AWS) + "network" not in self.metadata + and self.cloud_name == CloudNames.AWS + ) if no_network_metadata_on_aws: - LOG.debug("Metadata 'network' not present:" - " Refreshing stale metadata from prior to upgrade.") + LOG.debug( + "Metadata 'network' not present:" + " Refreshing stale metadata from prior to upgrade." + ) util.log_time( - logfunc=LOG.debug, msg='Re-crawl of metadata service', - func=self.get_data) + logfunc=LOG.debug, + msg="Re-crawl of metadata service", + func=self.get_data, + ) iface = self.fallback_interface - net_md = self.metadata.get('network') + net_md = self.metadata.get("network") if isinstance(net_md, dict): # SRU_BLOCKER: xenial, bionic and eoan should default # apply_full_imds_network_config to False to retain original # behavior on those releases. result = convert_ec2_metadata_network_config( - net_md, fallback_nic=iface, + net_md, + fallback_nic=iface, full_network_config=util.get_cfg_option_bool( - self.ds_cfg, 'apply_full_imds_network_config', True)) + self.ds_cfg, "apply_full_imds_network_config", True + ), + ) # RELEASE_BLOCKER: xenial should drop the below if statement, # because the issue being addressed doesn't exist pre-netplan. @@ -426,7 +475,15 @@ class DataSourceEc2(sources.DataSource): # Non-VPC (aka Classic) Ec2 instances need to rewrite the # network config file every boot due to MAC address change. if self.is_classic_instance(): - self.update_events['network'].add(EventType.BOOT) + self.default_update_events = copy.deepcopy( + self.default_update_events + ) + self.default_update_events[EventScope.NETWORK].add( + EventType.BOOT + ) + self.default_update_events[EventScope.NETWORK].add( + EventType.BOOT_LEGACY + ) else: LOG.warning("Metadata 'network' key not valid: %s.", net_md) self._network_config = result @@ -438,7 +495,7 @@ class DataSourceEc2(sources.DataSource): if self._fallback_interface is None: # fallback_nic was used at one point, so restored objects may # have an attribute there. respect that if found. - _legacy_fbnic = getattr(self, 'fallback_nic', None) + _legacy_fbnic = getattr(self, "fallback_nic", None) if _legacy_fbnic: self._fallback_interface = _legacy_fbnic self.fallback_nic = None @@ -463,26 +520,37 @@ class DataSourceEc2(sources.DataSource): else: exc_cb = exc_cb_ud = None try: - crawled_metadata['user-data'] = ec2.get_instance_userdata( - api_version, self.metadata_address, - headers_cb=self._get_headers, headers_redact=redact, - exception_cb=exc_cb_ud) - crawled_metadata['meta-data'] = ec2.get_instance_metadata( - api_version, self.metadata_address, - headers_cb=self._get_headers, headers_redact=redact, - exception_cb=exc_cb) + crawled_metadata["user-data"] = ec2.get_instance_userdata( + api_version, + self.metadata_address, + headers_cb=self._get_headers, + headers_redact=redact, + exception_cb=exc_cb_ud, + ) + crawled_metadata["meta-data"] = ec2.get_instance_metadata( + api_version, + self.metadata_address, + headers_cb=self._get_headers, + headers_redact=redact, + exception_cb=exc_cb, + ) if self.cloud_name == CloudNames.AWS: identity = ec2.get_instance_identity( - api_version, self.metadata_address, - headers_cb=self._get_headers, headers_redact=redact, - exception_cb=exc_cb) - crawled_metadata['dynamic'] = {'instance-identity': identity} + api_version, + self.metadata_address, + headers_cb=self._get_headers, + headers_redact=redact, + exception_cb=exc_cb, + ) + crawled_metadata["dynamic"] = {"instance-identity": identity} except Exception: util.logexc( - LOG, "Failed reading from metadata address %s", - self.metadata_address) + LOG, + "Failed reading from metadata address %s", + self.metadata_address, + ) return {} - crawled_metadata['_metadata_api_version'] = api_version + crawled_metadata["_metadata_api_version"] = api_version return crawled_metadata def _refresh_api_token(self, seconds=AWS_TOKEN_TTL_SECONDS): @@ -495,23 +563,27 @@ class DataSourceEc2(sources.DataSource): return None LOG.debug("Refreshing Ec2 metadata API token") request_header = {AWS_TOKEN_REQ_HEADER: seconds} - token_url = '{}/{}'.format(self.metadata_address, API_TOKEN_ROUTE) + token_url = "{}/{}".format(self.metadata_address, API_TOKEN_ROUTE) try: - response = uhelp.readurl(token_url, headers=request_header, - headers_redact=AWS_TOKEN_REDACT, - request_method="PUT") + response = uhelp.readurl( + token_url, + headers=request_header, + headers_redact=AWS_TOKEN_REDACT, + request_method="PUT", + ) except uhelp.UrlError as e: LOG.warning( - 'Unable to get API token: %s raised exception %s', - token_url, e) + "Unable to get API token: %s raised exception %s", token_url, e + ) return None return response.contents def _skip_or_refresh_stale_aws_token_cb(self, msg, exception): """Callback will not retry on SKIP_USERDATA_CODES or if no token - is available.""" + is available.""" retry = ec2.skip_retry_on_codes( - ec2.SKIP_USERDATA_CODES, msg, exception) + ec2.SKIP_USERDATA_CODES, msg, exception + ) if not retry: return False # False raises exception return self._refresh_stale_aws_token_cb(msg, exception) @@ -541,14 +613,17 @@ class DataSourceEc2(sources.DataSource): # requests.ConnectionError will have exception.code == None if exception.code and exception.code >= 400: if exception.code == 403: - LOG.warning('Ec2 IMDS endpoint returned a 403 error. ' - 'HTTP endpoint is disabled. Aborting.') + LOG.warning( + "Ec2 IMDS endpoint returned a 403 error. " + "HTTP endpoint is disabled. Aborting." + ) else: - LOG.warning('Fatal error while requesting ' - 'Ec2 IMDSv2 API tokens') + LOG.warning( + "Fatal error while requesting Ec2 IMDSv2 API tokens" + ) raise exception - def _get_headers(self, url=''): + def _get_headers(self, url=""): """Return a dict of headers for accessing a url. If _api_token is unset on AWS, attempt to refresh the token via a PUT @@ -578,13 +653,17 @@ class DataSourceEc2Local(DataSourceEc2): metadata service. If the metadata service provides network configuration then render the network configuration for that instance based on metadata. """ + perform_dhcp_setup = True # Use dhcp before querying metadata def get_data(self): supported_platforms = (CloudNames.AWS,) if self.cloud_name not in supported_platforms: - LOG.debug("Local Ec2 mode only supported on %s, not %s", - supported_platforms, self.cloud_name) + LOG.debug( + "Local Ec2 mode only supported on %s, not %s", + supported_platforms, + self.cloud_name, + ) return False return super(DataSourceEc2Local, self).get_data() @@ -602,18 +681,19 @@ def parse_strict_mode(cfgval): # true, false, warn,[sleep] # return tuple with string mode (true|false|warn) and sleep. if cfgval is True: - return 'true', None + return "true", None if cfgval is False: - return 'false', None + return "false", None if not cfgval: - return 'warn', 0 + return "warn", 0 mode, _, sleep = cfgval.partition(",") - if mode not in ('true', 'false', 'warn'): + if mode not in ("true", "false", "warn"): raise ValueError( "Invalid mode '%s' in strict_id setting '%s': " - "Expected one of 'true', 'false', 'warn'." % (mode, cfgval)) + "Expected one of 'true', 'false', 'warn'." % (mode, cfgval) + ) if sleep: try: @@ -639,47 +719,53 @@ def warn_if_necessary(cfgval, cfg): if mode == "false": return - warnings.show_warning('non_ec2_md', cfg, mode=True, sleep=sleep) + warnings.show_warning("non_ec2_md", cfg, mode=True, sleep=sleep) def identify_aws(data): # data is a dictionary returned by _collect_platform_data. - if (data['uuid'].startswith('ec2') and - (data['uuid_source'] == 'hypervisor' or - data['uuid'] == data['serial'])): + if data["uuid"].startswith("ec2") and ( + data["uuid_source"] == "hypervisor" or data["uuid"] == data["serial"] + ): return CloudNames.AWS return None def identify_brightbox(data): - if data['serial'].endswith('.brightbox.com'): + if data["serial"].endswith(".brightbox.com"): return CloudNames.BRIGHTBOX def identify_zstack(data): - if data['asset_tag'].endswith('.zstack.io'): + if data["asset_tag"].endswith(".zstack.io"): return CloudNames.ZSTACK def identify_e24cloud(data): - if data['vendor'] == 'e24cloud': + if data["vendor"] == "e24cloud": return CloudNames.E24CLOUD def identify_platform(): # identify the platform and return an entry in CloudNames. data = _collect_platform_data() - checks = (identify_aws, identify_brightbox, identify_zstack, - identify_e24cloud, lambda x: CloudNames.UNKNOWN) + checks = ( + identify_aws, + identify_brightbox, + identify_zstack, + identify_e24cloud, + lambda x: CloudNames.UNKNOWN, + ) for checker in checks: try: result = checker(data) if result: return result except Exception as e: - LOG.warning("calling %s with %s raised exception: %s", - checker, data, e) + LOG.warning( + "calling %s with %s raised exception: %s", checker, data, e + ) def _collect_platform_data(): @@ -698,36 +784,36 @@ def _collect_platform_data(): data = {} try: uuid = util.load_file("/sys/hypervisor/uuid").strip() - data['uuid_source'] = 'hypervisor' + data["uuid_source"] = "hypervisor" except Exception: - uuid = dmi.read_dmi_data('system-uuid') - data['uuid_source'] = 'dmi' + uuid = dmi.read_dmi_data("system-uuid") + data["uuid_source"] = "dmi" if uuid is None: - uuid = '' - data['uuid'] = uuid.lower() + uuid = "" + data["uuid"] = uuid.lower() - serial = dmi.read_dmi_data('system-serial-number') + serial = dmi.read_dmi_data("system-serial-number") if serial is None: - serial = '' + serial = "" - data['serial'] = serial.lower() + data["serial"] = serial.lower() - asset_tag = dmi.read_dmi_data('chassis-asset-tag') + asset_tag = dmi.read_dmi_data("chassis-asset-tag") if asset_tag is None: - asset_tag = '' + asset_tag = "" - data['asset_tag'] = asset_tag.lower() + data["asset_tag"] = asset_tag.lower() - vendor = dmi.read_dmi_data('system-manufacturer') - data['vendor'] = (vendor if vendor else '').lower() + vendor = dmi.read_dmi_data("system-manufacturer") + data["vendor"] = (vendor if vendor else "").lower() return data def convert_ec2_metadata_network_config( - network_md, macs_to_nics=None, fallback_nic=None, - full_network_config=True): + network_md, macs_to_nics=None, fallback_nic=None, full_network_config=True +): """Convert ec2 metadata to network config version 2 data dict. @param: network_md: 'network' portion of EC2 metadata. @@ -746,49 +832,55 @@ def convert_ec2_metadata_network_config( @return A dict of network config version 2 based on the metadata and macs. """ - netcfg = {'version': 2, 'ethernets': {}} + netcfg = {"version": 2, "ethernets": {}} if not macs_to_nics: macs_to_nics = net.get_interfaces_by_mac() - macs_metadata = network_md['interfaces']['macs'] + macs_metadata = network_md["interfaces"]["macs"] if not full_network_config: for mac, nic_name in macs_to_nics.items(): if nic_name == fallback_nic: break - dev_config = {'dhcp4': True, - 'dhcp6': False, - 'match': {'macaddress': mac.lower()}, - 'set-name': nic_name} + dev_config = { + "dhcp4": True, + "dhcp6": False, + "match": {"macaddress": mac.lower()}, + "set-name": nic_name, + } nic_metadata = macs_metadata.get(mac) - if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured - dev_config['dhcp6'] = True - netcfg['ethernets'][nic_name] = dev_config + if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured + dev_config["dhcp6"] = True + netcfg["ethernets"][nic_name] = dev_config return netcfg # Apply network config for all nics and any secondary IPv4/v6 addresses + nic_idx = 0 for mac, nic_name in sorted(macs_to_nics.items()): nic_metadata = macs_metadata.get(mac) if not nic_metadata: continue # Not a physical nic represented in metadata # device-number is zero-indexed, we want it 1-indexed for the # multiplication on the following line - nic_idx = int(nic_metadata['device-number']) + 1 - dhcp_override = {'route-metric': nic_idx * 100} - dev_config = {'dhcp4': True, 'dhcp4-overrides': dhcp_override, - 'dhcp6': False, - 'match': {'macaddress': mac.lower()}, - 'set-name': nic_name} - if nic_metadata.get('ipv6s'): # Any IPv6 addresses configured - dev_config['dhcp6'] = True - dev_config['dhcp6-overrides'] = dhcp_override - dev_config['addresses'] = get_secondary_addresses(nic_metadata, mac) - if not dev_config['addresses']: - dev_config.pop('addresses') # Since we found none configured - netcfg['ethernets'][nic_name] = dev_config + nic_idx = int(nic_metadata.get("device-number", nic_idx)) + 1 + dhcp_override = {"route-metric": nic_idx * 100} + dev_config = { + "dhcp4": True, + "dhcp4-overrides": dhcp_override, + "dhcp6": False, + "match": {"macaddress": mac.lower()}, + "set-name": nic_name, + } + if nic_metadata.get("ipv6s"): # Any IPv6 addresses configured + dev_config["dhcp6"] = True + dev_config["dhcp6-overrides"] = dhcp_override + dev_config["addresses"] = get_secondary_addresses(nic_metadata, mac) + if not dev_config["addresses"]: + dev_config.pop("addresses") # Since we found none configured + netcfg["ethernets"][nic_name] = dev_config # Remove route-metric dhcp overrides if only one nic configured - if len(netcfg['ethernets']) == 1: - for nic_name in netcfg['ethernets'].keys(): - netcfg['ethernets'][nic_name].pop('dhcp4-overrides') - netcfg['ethernets'][nic_name].pop('dhcp6-overrides', None) + if len(netcfg["ethernets"]) == 1: + for nic_name in netcfg["ethernets"].keys(): + netcfg["ethernets"][nic_name].pop("dhcp4-overrides") + netcfg["ethernets"][nic_name].pop("dhcp6-overrides", None) return netcfg @@ -798,18 +890,22 @@ def get_secondary_addresses(nic_metadata, mac): :return: List of secondary IPv4 or IPv6 addresses to configure on the interface """ - ipv4s = nic_metadata.get('local-ipv4s') - ipv6s = nic_metadata.get('ipv6s') + ipv4s = nic_metadata.get("local-ipv4s") + ipv6s = nic_metadata.get("ipv6s") addresses = [] # In version < 2018-09-24 local_ipv4s or ipv6s is a str with one IP if bool(isinstance(ipv4s, list) and len(ipv4s) > 1): addresses.extend( _get_secondary_addresses( - nic_metadata, 'subnet-ipv4-cidr-block', mac, ipv4s, '24')) + nic_metadata, "subnet-ipv4-cidr-block", mac, ipv4s, "24" + ) + ) if bool(isinstance(ipv6s, list) and len(ipv6s) > 1): addresses.extend( _get_secondary_addresses( - nic_metadata, 'subnet-ipv6-cidr-block', mac, ipv6s, '128')) + nic_metadata, "subnet-ipv6-cidr-block", mac, ipv6s, "128" + ) + ) return sorted(addresses) @@ -822,18 +918,22 @@ def _get_secondary_addresses(nic_metadata, cidr_key, mac, ips, default_prefix): addresses = [] cidr = nic_metadata.get(cidr_key) prefix = default_prefix - if not cidr or len(cidr.split('/')) != 2: - ip_type = 'ipv4' if 'ipv4' in cidr_key else 'ipv6' + if not cidr or len(cidr.split("/")) != 2: + ip_type = "ipv4" if "ipv4" in cidr_key else "ipv6" LOG.warning( - 'Could not parse %s %s for mac %s. %s network' - ' config prefix defaults to /%s', - cidr_key, cidr, mac, ip_type, prefix) + "Could not parse %s %s for mac %s. %s network" + " config prefix defaults to /%s", + cidr_key, + cidr, + mac, + ip_type, + prefix, + ) else: - prefix = cidr.split('/')[1] + prefix = cidr.split("/")[1] # We know we have > 1 ips for in metadata for this IP type for ip in ips[1:]: - addresses.append( - '{ip}/{prefix}'.format(ip=ip, prefix=prefix)) + addresses.append("{ip}/{prefix}".format(ip=ip, prefix=prefix)) return addresses @@ -848,4 +948,5 @@ datasources = [ def get_datasource_list(depends): return sources.list_from_depends(depends, datasources) + # vi: ts=4 expandtab |