diff options
Diffstat (limited to 'cloudinit')
-rw-r--r-- | cloudinit/cloud.py | 4 | ||||
-rw-r--r-- | cloudinit/config/cc_ntp.py | 9 | ||||
-rw-r--r-- | cloudinit/config/cc_rh_subscription.py | 46 | ||||
-rw-r--r-- | cloudinit/config/cc_update_etc_hosts.py | 4 | ||||
-rw-r--r-- | cloudinit/net/dhcp.py | 12 | ||||
-rw-r--r-- | cloudinit/net/tests/test_dhcp.py | 9 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceAzure.py | 25 | ||||
-rw-r--r-- | cloudinit/sources/DataSourceEc2.py | 44 | ||||
-rw-r--r-- | cloudinit/user_data.py | 33 |
9 files changed, 114 insertions, 72 deletions
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index d8a9fc86..ba616781 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -56,8 +56,8 @@ class Cloud(object): def get_template_filename(self, name): fn = self.paths.template_tpl % (name) if not os.path.isfile(fn): - LOG.warning("No template found at %s for template named %s", - fn, name) + LOG.warning("No template found in %s for template named %s", + os.path.dirname(fn), name) return None return fn diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index d43d060c..f50bcb35 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -23,7 +23,7 @@ frequency = PER_INSTANCE NTP_CONF = '/etc/ntp.conf' TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf' NR_POOL_SERVERS = 4 -distros = ['centos', 'debian', 'fedora', 'opensuse', 'ubuntu'] +distros = ['centos', 'debian', 'fedora', 'opensuse', 'sles', 'ubuntu'] # The schema definition for each cloud-config module is a strict contract for @@ -174,8 +174,13 @@ def rename_ntp_conf(config=None): def generate_server_names(distro): names = [] + pool_distro = distro + # For legal reasons x.pool.sles.ntp.org does not exist, + # use the opensuse pool + if distro == 'sles': + pool_distro = 'opensuse' for x in range(0, NR_POOL_SERVERS): - name = "%d.%s.pool.ntp.org" % (x, distro) + name = "%d.%s.pool.ntp.org" % (x, pool_distro) names.append(name) return names diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 7f36cf8f..a9d21e78 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -38,14 +38,16 @@ Subscription`` example config. server-hostname: <hostname> """ +from cloudinit import log as logging from cloudinit import util +LOG = logging.getLogger(__name__) + distros = ['fedora', 'rhel'] def handle(name, cfg, _cloud, log, _args): - sm = SubscriptionManager(cfg) - sm.log = log + sm = SubscriptionManager(cfg, log=log) if not sm.is_configured(): log.debug("%s: module not configured.", name) return None @@ -86,10 +88,9 @@ def handle(name, cfg, _cloud, log, _args): if not return_stat: raise SubscriptionError("Unable to attach pools {0}" .format(sm.pools)) - if (sm.enable_repo is not None) or (sm.disable_repo is not None): - return_stat = sm.update_repos(sm.enable_repo, sm.disable_repo) - if not return_stat: - raise SubscriptionError("Unable to add or remove repos") + return_stat = sm.update_repos() + if not return_stat: + raise SubscriptionError("Unable to add or remove repos") sm.log_success("rh_subscription plugin completed successfully") except SubscriptionError as e: sm.log_warn(str(e)) @@ -108,7 +109,10 @@ class SubscriptionManager(object): 'rhsm-baseurl', 'server-hostname', 'auto-attach', 'service-level'] - def __init__(self, cfg): + def __init__(self, cfg, log=None): + if log is None: + log = LOG + self.log = log self.cfg = cfg self.rhel_cfg = self.cfg.get('rh_subscription', {}) self.rhsm_baseurl = self.rhel_cfg.get('rhsm-baseurl') @@ -130,7 +134,7 @@ class SubscriptionManager(object): def log_warn(self, msg): '''Simple wrapper for logging warning messages. Useful for unittests''' - self.log.warn(msg) + self.log.warning(msg) def _verify_keys(self): ''' @@ -245,7 +249,7 @@ class SubscriptionManager(object): return False reg_id = return_out.split("ID: ")[1].rstrip() - self.log.debug("Registered successfully with ID {0}".format(reg_id)) + self.log.debug("Registered successfully with ID %s", reg_id) return True def _set_service_level(self): @@ -347,7 +351,7 @@ class SubscriptionManager(object): try: self._sub_man_cli(cmd) self.log.debug("Attached the following pools to your " - "system: %s" % (", ".join(pool_list)) + "system: %s", (", ".join(pool_list)) .replace('--pool=', '')) return True except util.ProcessExecutionError as e: @@ -355,18 +359,24 @@ class SubscriptionManager(object): "due to {1}".format(pool, e)) return False - def update_repos(self, erepos, drepos): + def update_repos(self): ''' Takes a list of yum repo ids that need to be disabled or enabled; then it verifies if they are already enabled or disabled and finally executes the action to disable or enable ''' - if (erepos is not None) and (not isinstance(erepos, list)): + erepos = self.enable_repo + drepos = self.disable_repo + if erepos is None: + erepos = [] + if drepos is None: + drepos = [] + if not isinstance(erepos, list): self.log_warn("Repo IDs must in the format of a list.") return False - if (drepos is not None) and (not isinstance(drepos, list)): + if not isinstance(drepos, list): self.log_warn("Repo IDs must in the format of a list.") return False @@ -399,14 +409,14 @@ class SubscriptionManager(object): for fail in enable_list_fail: # Check if the repo exists or not if fail in active_repos: - self.log.debug("Repo {0} is already enabled".format(fail)) + self.log.debug("Repo %s is already enabled", fail) else: self.log_warn("Repo {0} does not appear to " "exist".format(fail)) if len(disable_list_fail) > 0: for fail in disable_list_fail: - self.log.debug("Repo {0} not disabled " - "because it is not enabled".format(fail)) + self.log.debug("Repo %s not disabled " + "because it is not enabled", fail) cmd = ['repos'] if len(disable_list) > 0: @@ -422,10 +432,10 @@ class SubscriptionManager(object): return False if len(enable_list) > 0: - self.log.debug("Enabled the following repos: %s" % + self.log.debug("Enabled the following repos: %s", (", ".join(enable_list)).replace('--enable=', '')) if len(disable_list) > 0: - self.log.debug("Disabled the following repos: %s" % + self.log.debug("Disabled the following repos: %s", (", ".join(disable_list)).replace('--disable=', '')) return True diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index b3947849..c96eede1 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -23,8 +23,8 @@ using the template located in ``/etc/cloud/templates/hosts.tmpl``. In the If ``manage_etc_hosts`` is set to ``localhost``, then cloud-init will not rewrite ``/etc/hosts`` entirely, but rather will ensure that a entry for the -fqdn with ip ``127.0.1.1`` is present in ``/etc/hosts`` (i.e. -``ping <hostname>`` will ping ``127.0.1.1``). +fqdn with a distribution dependent ip is present in ``/etc/hosts`` (i.e. +``ping <hostname>`` will ping ``127.0.0.1`` or ``127.0.1.1`` or other ip). .. note:: if ``manage_etc_hosts`` is set ``true`` or ``template``, the contents diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 0cba7032..d8624d82 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -8,6 +8,7 @@ import configobj import logging import os import re +import signal from cloudinit.net import find_fallback_nic, get_devicelist from cloudinit import temp_utils @@ -41,8 +42,7 @@ def maybe_perform_dhcp_discovery(nic=None): if nic is None: nic = find_fallback_nic() if nic is None: - LOG.debug( - 'Skip dhcp_discovery: Unable to find fallback nic.') + LOG.debug('Skip dhcp_discovery: Unable to find fallback nic.') return {} elif nic not in get_devicelist(): LOG.debug( @@ -119,7 +119,13 @@ def dhcp_discovery(dhclient_cmd_path, interface, cleandir): cmd = [sandbox_dhclient_cmd, '-1', '-v', '-lf', lease_file, '-pf', pid_file, interface, '-sf', '/bin/true'] util.subp(cmd, capture=True) - return parse_dhcp_lease_file(lease_file) + pid = None + try: + pid = int(util.load_file(pid_file).strip()) + return parse_dhcp_lease_file(lease_file) + finally: + if pid: + os.kill(pid, signal.SIGKILL) def networkd_parse_lease(content): diff --git a/cloudinit/net/tests/test_dhcp.py b/cloudinit/net/tests/test_dhcp.py index 1c1f504a..3d8e15c0 100644 --- a/cloudinit/net/tests/test_dhcp.py +++ b/cloudinit/net/tests/test_dhcp.py @@ -2,6 +2,7 @@ import mock import os +import signal from textwrap import dedent from cloudinit.net.dhcp import ( @@ -114,8 +115,9 @@ class TestDHCPDiscoveryClean(CiTestCase): self.assertEqual('eth9', call[0][1]) self.assertIn('/var/tmp/cloud-init/cloud-init-dhcp-', call[0][2]) + @mock.patch('cloudinit.net.dhcp.os.kill') @mock.patch('cloudinit.net.dhcp.util.subp') - def test_dhcp_discovery_run_in_sandbox(self, m_subp): + def test_dhcp_discovery_run_in_sandbox(self, m_subp, m_kill): """dhcp_discovery brings up the interface and runs dhclient. It also returns the parsed dhcp.leases file generated in the sandbox. @@ -134,6 +136,10 @@ class TestDHCPDiscoveryClean(CiTestCase): """) lease_file = os.path.join(tmpdir, 'dhcp.leases') write_file(lease_file, lease_content) + pid_file = os.path.join(tmpdir, 'dhclient.pid') + my_pid = 1 + write_file(pid_file, "%d\n" % my_pid) + self.assertItemsEqual( [{'interface': 'eth9', 'fixed-address': '192.168.2.74', 'subnet-mask': '255.255.255.0', 'routers': '192.168.2.1'}], @@ -149,6 +155,7 @@ class TestDHCPDiscoveryClean(CiTestCase): [os.path.join(tmpdir, 'dhclient'), '-1', '-v', '-lf', lease_file, '-pf', os.path.join(tmpdir, 'dhclient.pid'), 'eth9', '-sf', '/bin/true'], capture=True)]) + m_kill.assert_has_calls([mock.call(my_pid, signal.SIGKILL)]) class TestSystemdParseLeases(CiTestCase): diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 80c2bd12..8c3492d9 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -465,10 +465,8 @@ class DataSourceAzure(sources.DataSource): 1. Probe the drivers of the net-devices present and inject them in the network configuration under params: driver: <driver> value - 2. If the driver value is 'mlx4_core', the control mode should be - set to manual. The device will be later used to build a bond, - for now we want to ensure the device gets named but does not - break any network configuration + 2. Generate a fallback network config that does not include any of + the blacklisted devices. """ blacklist = ['mlx4_core'] if not self._network_config: @@ -477,25 +475,6 @@ class DataSourceAzure(sources.DataSource): netconfig = net.generate_fallback_config( blacklist_drivers=blacklist, config_driver=True) - # if we have any blacklisted devices, update the network_config to - # include the device, mac, and driver values, but with no ip - # config; this ensures udev rules are generated but won't affect - # ip configuration - bl_found = 0 - for bl_dev in [dev for dev in net.get_devicelist() - if net.device_driver(dev) in blacklist]: - bl_found += 1 - cfg = { - 'type': 'physical', - 'name': 'vf%d' % bl_found, - 'mac_address': net.get_interface_mac(bl_dev), - 'params': { - 'driver': net.device_driver(bl_dev), - 'device_id': net.device_devid(bl_dev), - }, - } - netconfig['config'].append(cfg) - self._network_config = netconfig return self._network_config diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 0ef22174..7bbbfb63 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -65,7 +65,7 @@ class DataSourceEc2(sources.DataSource): get_network_metadata = False # Track the discovered fallback nic for use in configuration generation. - fallback_nic = None + _fallback_interface = None def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -92,18 +92,17 @@ class DataSourceEc2(sources.DataSource): elif self.cloud_platform == Platforms.NO_EC2_METADATA: return False - self.fallback_nic = net.find_fallback_nic() if self.get_network_metadata: # Setup networking in init-local stage. if util.is_FreeBSD(): LOG.debug("FreeBSD doesn't support running dhclient with -sf") return False - dhcp_leases = dhcp.maybe_perform_dhcp_discovery(self.fallback_nic) + dhcp_leases = dhcp.maybe_perform_dhcp_discovery( + self.fallback_interface) if not dhcp_leases: # DataSourceEc2Local failed in init-local stage. DataSourceEc2 # will still run in init-network stage. return False dhcp_opts = dhcp_leases[-1] - self.fallback_nic = dhcp_opts.get('interface') net_params = {'interface': dhcp_opts.get('interface'), 'ip': dhcp_opts.get('fixed-address'), 'prefix_or_mask': dhcp_opts.get('subnet-mask'), @@ -301,21 +300,44 @@ class DataSourceEc2(sources.DataSource): return None result = None - net_md = self.metadata.get('network') + no_network_metadata_on_aws = bool( + 'network' not in self.metadata and + self.cloud_platform == Platforms.AWS) + if no_network_metadata_on_aws: + LOG.debug("Metadata 'network' not present:" + " Refreshing stale metadata from prior to upgrade.") + util.log_time( + logfunc=LOG.debug, msg='Re-crawl of metadata service', + func=self._crawl_metadata) + # Limit network configuration to only the primary/fallback nic - macs_to_nics = { - net.get_interface_mac(self.fallback_nic): self.fallback_nic} + iface = self.fallback_interface + macs_to_nics = {net.get_interface_mac(iface): iface} + net_md = self.metadata.get('network') if isinstance(net_md, dict): result = convert_ec2_metadata_network_config( - net_md, macs_to_nics=macs_to_nics, - fallback_nic=self.fallback_nic) + net_md, macs_to_nics=macs_to_nics, fallback_nic=iface) else: - LOG.warning("unexpected metadata 'network' key not valid: %s", - net_md) + LOG.warning("Metadata 'network' key not valid: %s.", net_md) self._network_config = result return self._network_config + @property + def fallback_interface(self): + if self._fallback_interface is None: + # fallback_nic was used at one point, so restored objects may + # have an attribute there. respect that if found. + _legacy_fbnic = getattr(self, 'fallback_nic', None) + if _legacy_fbnic: + self._fallback_interface = _legacy_fbnic + self.fallback_nic = None + else: + self._fallback_interface = net.find_fallback_nic() + if self._fallback_interface is None: + LOG.warning("Did not find a fallback interface on EC2.") + return self._fallback_interface + def _crawl_metadata(self): """Crawl metadata service when available. diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index 88cb7f84..cc55daf8 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -19,6 +19,7 @@ import six from cloudinit import handlers from cloudinit import log as logging +from cloudinit.url_helper import UrlError from cloudinit import util LOG = logging.getLogger(__name__) @@ -222,16 +223,28 @@ class UserDataProcessor(object): if include_once_on and os.path.isfile(include_once_fn): content = util.load_file(include_once_fn) else: - resp = util.read_file_or_url(include_url, - ssl_details=self.ssl_details) - if include_once_on and resp.ok(): - util.write_file(include_once_fn, resp.contents, mode=0o600) - if resp.ok(): - content = resp.contents - else: - LOG.warning(("Fetching from %s resulted in" - " a invalid http code of %s"), - include_url, resp.code) + try: + resp = util.read_file_or_url(include_url, + ssl_details=self.ssl_details) + if include_once_on and resp.ok(): + util.write_file(include_once_fn, resp.contents, + mode=0o600) + if resp.ok(): + content = resp.contents + else: + LOG.warning(("Fetching from %s resulted in" + " a invalid http code of %s"), + include_url, resp.code) + except UrlError as urle: + message = str(urle) + # Older versions of requests.exceptions.HTTPError may not + # include the errant url. Append it for clarity in logs. + if include_url not in message: + message += ' for url: {0}'.format(include_url) + LOG.warning(message) + except IOError as ioe: + LOG.warning("Fetching from %s resulted in %s", + include_url, ioe) if content is not None: new_msg = convert_string(content) |