From 01ff5c2eae9fc75beab3bc1368e17a7278dc0905 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Sat, 7 Apr 2018 15:03:26 -0600 Subject: tests: fix ec2 integration network metadata validation Fix integraiton test logic for ec2 to look for network and availability-zone data under the key path 'ds'=>'meta-data' instead of just 'ds' when parsing instance-data.json. --- tests/cloud_tests/testcases/base.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'tests/cloud_tests') diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 324c7c91..7598d469 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -149,7 +149,10 @@ class CloudTestCase(unittest.TestCase): self.assertEqual( ['ds/user-data'], instance_data['base64-encoded-keys']) ds = instance_data.get('ds', {}) - macs = ds.get('network', {}).get('interfaces', {}).get('macs', {}) + v1_data = instance_data.get('v1', {}) + metadata = ds.get('meta-data', {}) + macs = metadata.get( + 'network', {}).get('interfaces', {}).get('macs', {}) if not macs: raise AssertionError('No network data from EC2 meta-data') # Check meta-data items we depend on @@ -160,10 +163,8 @@ class CloudTestCase(unittest.TestCase): for key in expected_net_keys: self.assertIn(key, mac_data) self.assertIsNotNone( - ds.get('placement', {}).get('availability-zone'), + metadata.get('placement', {}).get('availability-zone'), 'Could not determine EC2 Availability zone placement') - ds = instance_data.get('ds', {}) - v1_data = instance_data.get('v1', {}) self.assertIsNotNone( v1_data['availability-zone'], 'expected ec2 availability-zone') self.assertEqual('aws', v1_data['cloud-name']) -- cgit v1.2.3 From c6dff581a9c253170d5e3f12fb83d16a8dec8257 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 12 Apr 2018 15:32:25 -0400 Subject: Implement ntp client spec with auto support for distro selection Add a base NTP client configuration dictionary and allow Distro specific changes to be merged. Add a select client function which implements logic to preferr installed clients over clients which need to be installed. Also allow distributions to override the cloud-init defaults. LP: #1749722 --- cloudinit/config/cc_ntp.py | 485 ++++++++++-- cloudinit/distros/__init__.py | 12 + cloudinit/distros/opensuse.py | 24 + cloudinit/distros/ubuntu.py | 19 + config/cloud.cfg.tmpl | 2 + templates/chrony.conf.debian.tmpl | 39 + templates/chrony.conf.fedora.tmpl | 48 ++ templates/chrony.conf.opensuse.tmpl | 38 + templates/chrony.conf.rhel.tmpl | 45 ++ templates/chrony.conf.sles.tmpl | 38 + templates/chrony.conf.ubuntu.tmpl | 42 + tests/cloud_tests/testcases/modules/ntp.yaml | 1 + tests/cloud_tests/testcases/modules/ntp_chrony.py | 15 + .../cloud_tests/testcases/modules/ntp_chrony.yaml | 17 + tests/cloud_tests/testcases/modules/ntp_pools.yaml | 1 + .../cloud_tests/testcases/modules/ntp_servers.yaml | 1 + .../cloud_tests/testcases/modules/ntp_timesyncd.py | 15 + .../testcases/modules/ntp_timesyncd.yaml | 15 + tests/unittests/test_distros/test_netconfig.py | 6 + .../test_distros/test_user_data_normalize.py | 6 + tests/unittests/test_handler/test_handler_ntp.py | 881 ++++++++++++++------- 21 files changed, 1369 insertions(+), 381 deletions(-) create mode 100644 templates/chrony.conf.debian.tmpl create mode 100644 templates/chrony.conf.fedora.tmpl create mode 100644 templates/chrony.conf.opensuse.tmpl create mode 100644 templates/chrony.conf.rhel.tmpl create mode 100644 templates/chrony.conf.sles.tmpl create mode 100644 templates/chrony.conf.ubuntu.tmpl create mode 100644 tests/cloud_tests/testcases/modules/ntp_chrony.py create mode 100644 tests/cloud_tests/testcases/modules/ntp_chrony.yaml create mode 100644 tests/cloud_tests/testcases/modules/ntp_timesyncd.py create mode 100644 tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml (limited to 'tests/cloud_tests') diff --git a/cloudinit/config/cc_ntp.py b/cloudinit/config/cc_ntp.py index cbd0237d..9e074bda 100644 --- a/cloudinit/config/cc_ntp.py +++ b/cloudinit/config/cc_ntp.py @@ -10,20 +10,95 @@ from cloudinit.config.schema import ( get_schema_doc, validate_cloudconfig_schema) from cloudinit import log as logging from cloudinit.settings import PER_INSTANCE +from cloudinit import temp_utils from cloudinit import templater from cloudinit import type_utils from cloudinit import util +import copy import os +import six from textwrap import dedent LOG = logging.getLogger(__name__) frequency = PER_INSTANCE NTP_CONF = '/etc/ntp.conf' -TIMESYNCD_CONF = '/etc/systemd/timesyncd.conf.d/cloud-init.conf' NR_POOL_SERVERS = 4 -distros = ['centos', 'debian', 'fedora', 'opensuse', 'sles', 'ubuntu'] +distros = ['centos', 'debian', 'fedora', 'opensuse', 'rhel', 'sles', 'ubuntu'] + +NTP_CLIENT_CONFIG = { + 'chrony': { + 'check_exe': 'chronyd', + 'confpath': '/etc/chrony.conf', + 'packages': ['chrony'], + 'service_name': 'chrony', + 'template_name': 'chrony.conf.{distro}', + 'template': None, + }, + 'ntp': { + 'check_exe': 'ntpd', + 'confpath': NTP_CONF, + 'packages': ['ntp'], + 'service_name': 'ntp', + 'template_name': 'ntp.conf.{distro}', + 'template': None, + }, + 'ntpdate': { + 'check_exe': 'ntpdate', + 'confpath': NTP_CONF, + 'packages': ['ntpdate'], + 'service_name': 'ntpdate', + 'template_name': 'ntp.conf.{distro}', + 'template': None, + }, + 'systemd-timesyncd': { + 'check_exe': '/lib/systemd/systemd-timesyncd', + 'confpath': '/etc/systemd/timesyncd.conf.d/cloud-init.conf', + 'packages': [], + 'service_name': 'systemd-timesyncd', + 'template_name': 'timesyncd.conf', + 'template': None, + }, +} + +# This is Distro-specific configuration overrides of the base config +DISTRO_CLIENT_CONFIG = { + 'debian': { + 'chrony': { + 'confpath': '/etc/chrony/chrony.conf', + }, + }, + 'opensuse': { + 'chrony': { + 'service_name': 'chronyd', + }, + 'ntp': { + 'confpath': '/etc/ntp.conf', + 'service_name': 'ntpd', + }, + 'systemd-timesyncd': { + 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + }, + }, + 'sles': { + 'chrony': { + 'service_name': 'chronyd', + }, + 'ntp': { + 'confpath': '/etc/ntp.conf', + 'service_name': 'ntpd', + }, + 'systemd-timesyncd': { + 'check_exe': '/usr/lib/systemd/systemd-timesyncd', + }, + }, + 'ubuntu': { + 'chrony': { + 'confpath': '/etc/chrony/chrony.conf', + }, + }, +} # The schema definition for each cloud-config module is a strict contract for @@ -48,7 +123,34 @@ schema = { 'distros': distros, 'examples': [ dedent("""\ + # Override ntp with chrony configuration on Ubuntu + ntp: + enabled: true + ntp_client: chrony # Uses cloud-init default chrony configuration + """), + dedent("""\ + # Provide a custom ntp client configuration ntp: + enabled: true + ntp_client: myntpclient + config: + confpath: /etc/myntpclient/myntpclient.conf + check_exe: myntpclientd + packages: + - myntpclient + service_name: myntpclient + template: | + ## template:jinja + # My NTP Client config + {% if pools -%}# pools{% endif %} + {% for pool in pools -%} + pool {{pool}} iburst + {% endfor %} + {%- if servers %}# servers + {% endif %} + {% for server in servers -%} + server {{server}} iburst + {% endfor %} pools: [0.int.pool.ntp.org, 1.int.pool.ntp.org, ntp.myorg.org] servers: - ntp.server.local @@ -83,79 +185,159 @@ schema = { List of ntp servers. If both pools and servers are empty, 4 default pool servers will be provided with the format ``{0-3}.{distro}.pool.ntp.org``.""") - } + }, + 'ntp_client': { + 'type': 'string', + 'default': 'auto', + 'description': dedent("""\ + Name of an NTP client to use to configure system NTP. + When unprovided or 'auto' the default client preferred + by the distribution will be used. The following + built-in client names can be used to override existing + configuration defaults: chrony, ntp, ntpdate, + systemd-timesyncd."""), + }, + 'enabled': { + 'type': 'boolean', + 'default': True, + 'description': dedent("""\ + Attempt to enable ntp clients if set to True. If set + to False, ntp client will not be configured or + installed"""), + }, + 'config': { + 'description': dedent("""\ + Configuration settings or overrides for the + ``ntp_client`` specified."""), + 'type': ['object'], + 'properties': { + 'confpath': { + 'type': 'string', + 'description': dedent("""\ + The path to where the ``ntp_client`` + configuration is written."""), + }, + 'check_exe': { + 'type': 'string', + 'description': dedent("""\ + The executable name for the ``ntp_client``. + For example, ntp service ``check_exe`` is + 'ntpd' because it runs the ntpd binary."""), + }, + 'packages': { + 'type': 'array', + 'items': { + 'type': 'string', + }, + 'uniqueItems': True, + 'description': dedent("""\ + List of packages needed to be installed for the + selected ``ntp_client``."""), + }, + 'service_name': { + 'type': 'string', + 'description': dedent("""\ + The systemd or sysvinit service name used to + start and stop the ``ntp_client`` + service."""), + }, + 'template': { + 'type': 'string', + 'description': dedent("""\ + Inline template allowing users to define their + own ``ntp_client`` configuration template. + The value must start with '## template:jinja' + to enable use of templating support. + """), + }, + }, + # Don't use REQUIRED_NTP_CONFIG_KEYS to allow for override + # of builtin client values. + 'required': [], + 'minProperties': 1, # If we have config, define something + 'additionalProperties': False + }, }, 'required': [], 'additionalProperties': False } } } - -__doc__ = get_schema_doc(schema) # Supplement python help() +REQUIRED_NTP_CONFIG_KEYS = frozenset([ + 'check_exe', 'confpath', 'packages', 'service_name']) -def handle(name, cfg, cloud, log, _args): - """Enable and configure ntp.""" - if 'ntp' not in cfg: - LOG.debug( - "Skipping module named %s, not present or disabled by cfg", name) - return - ntp_cfg = cfg['ntp'] - if ntp_cfg is None: - ntp_cfg = {} # Allow empty config which will install the package +__doc__ = get_schema_doc(schema) # Supplement python help() - # TODO drop this when validate_cloudconfig_schema is strict=True - if not isinstance(ntp_cfg, (dict)): - raise RuntimeError( - "'ntp' key existed in config, but not a dictionary type," - " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) - validate_cloudconfig_schema(cfg, schema) - if ntp_installable(): - service_name = 'ntp' - confpath = NTP_CONF - template_name = None - packages = ['ntp'] - check_exe = 'ntpd' - else: - service_name = 'systemd-timesyncd' - confpath = TIMESYNCD_CONF - template_name = 'timesyncd.conf' - packages = [] - check_exe = '/lib/systemd/systemd-timesyncd' - - rename_ntp_conf() - # ensure when ntp is installed it has a configuration file - # to use instead of starting up with packaged defaults - write_ntp_config_template(ntp_cfg, cloud, confpath, template=template_name) - install_ntp(cloud.distro.install_packages, packages=packages, - check_exe=check_exe) +def distro_ntp_client_configs(distro): + """Construct a distro-specific ntp client config dictionary by merging + distro specific changes into base config. - try: - reload_ntp(service_name, systemd=cloud.distro.uses_systemd()) - except util.ProcessExecutionError as e: - LOG.exception("Failed to reload/start ntp service: %s", e) - raise + @param distro: String providing the distro class name. + @returns: Dict of distro configurations for ntp clients. + """ + dcfg = DISTRO_CLIENT_CONFIG + cfg = copy.copy(NTP_CLIENT_CONFIG) + if distro in dcfg: + cfg = util.mergemanydict([cfg, dcfg[distro]], reverse=True) + return cfg -def ntp_installable(): - """Check if we can install ntp package +def select_ntp_client(ntp_client, distro): + """Determine which ntp client is to be used, consulting the distro + for its preference. - Ubuntu-Core systems do not have an ntp package available, so - we always return False. Other systems require package managers to install - the ntp package If we fail to find one of the package managers, then we - cannot install ntp. + @param ntp_client: String name of the ntp client to use. + @param distro: Distro class instance. + @returns: Dict of the selected ntp client or {} if none selected. """ - if util.system_is_snappy(): - return False - if any(map(util.which, ['apt-get', 'dnf', 'yum', 'zypper'])): - return True + # construct distro-specific ntp_client_config dict + distro_cfg = distro_ntp_client_configs(distro.name) + + # user specified client, return its config + if ntp_client and ntp_client != 'auto': + LOG.debug('Selected NTP client "%s" via user-data configuration', + ntp_client) + return distro_cfg.get(ntp_client, {}) + + # default to auto if unset in distro + distro_ntp_client = distro.get_option('ntp_client', 'auto') + + clientcfg = {} + if distro_ntp_client == "auto": + for client in distro.preferred_ntp_clients: + cfg = distro_cfg.get(client) + if util.which(cfg.get('check_exe')): + LOG.debug('Selected NTP client "%s", already installed', + client) + clientcfg = cfg + break + + if not clientcfg: + client = distro.preferred_ntp_clients[0] + LOG.debug( + 'Selected distro preferred NTP client "%s", not yet installed', + client) + clientcfg = distro_cfg.get(client) + else: + LOG.debug('Selected NTP client "%s" via distro system config', + distro_ntp_client) + clientcfg = distro_cfg.get(distro_ntp_client, {}) + + return clientcfg - return False +def install_ntp_client(install_func, packages=None, check_exe="ntpd"): + """Install ntp client package if not already installed. -def install_ntp(install_func, packages=None, check_exe="ntpd"): + @param install_func: function. This parameter is invoked with the contents + of the packages parameter. + @param packages: list. This parameter defaults to ['ntp']. + @param check_exe: string. The name of a binary that indicates the package + the specified package is already installed. + """ if util.which(check_exe): return if packages is None: @@ -164,15 +346,23 @@ def install_ntp(install_func, packages=None, check_exe="ntpd"): install_func(packages) -def rename_ntp_conf(config=None): - """Rename any existing ntp.conf file""" - if config is None: # For testing - config = NTP_CONF - if os.path.exists(config): - util.rename(config, config + ".dist") +def rename_ntp_conf(confpath=None): + """Rename any existing ntp client config file + + @param confpath: string. Specify a path to an existing ntp client + configuration file. + """ + if os.path.exists(confpath): + util.rename(confpath, confpath + ".dist") def generate_server_names(distro): + """Generate a list of server names to populate an ntp client configuration + file. + + @param distro: string. Specify the distro name + @returns: list: A list of strings representing ntp servers for this distro. + """ names = [] pool_distro = distro # For legal reasons x.pool.sles.ntp.org does not exist, @@ -185,34 +375,60 @@ def generate_server_names(distro): return names -def write_ntp_config_template(cfg, cloud, path, template=None): - servers = cfg.get('servers', []) - pools = cfg.get('pools', []) +def write_ntp_config_template(distro_name, servers=None, pools=None, + path=None, template_fn=None, template=None): + """Render a ntp client configuration for the specified client. + + @param distro_name: string. The distro class name. + @param servers: A list of strings specifying ntp servers. Defaults to empty + list. + @param pools: A list of strings specifying ntp pools. Defaults to empty + list. + @param path: A string to specify where to write the rendered template. + @param template_fn: A string to specify the template source file. + @param template: A string specifying the contents of the template. This + content will be written to a temporary file before being used to render + the configuration file. + + @raises: ValueError when path is None. + @raises: ValueError when template_fn is None and template is None. + """ + if not servers: + servers = [] + if not pools: + pools = [] if len(servers) == 0 and len(pools) == 0: - pools = generate_server_names(cloud.distro.name) + pools = generate_server_names(distro_name) LOG.debug( 'Adding distro default ntp pool servers: %s', ','.join(pools)) - params = { - 'servers': servers, - 'pools': pools, - } + if not path: + raise ValueError('Invalid value for path parameter') - if template is None: - template = 'ntp.conf.%s' % cloud.distro.name + if not template_fn and not template: + raise ValueError('Not template_fn or template provided') - template_fn = cloud.get_template_filename(template) - if not template_fn: - template_fn = cloud.get_template_filename('ntp.conf') - if not template_fn: - raise RuntimeError( - 'No template found, not rendering {path}'.format(path=path)) + params = {'servers': servers, 'pools': pools} + if template: + tfile = temp_utils.mkstemp(prefix='template_name-', suffix=".tmpl") + template_fn = tfile[1] # filepath is second item in tuple + util.write_file(template_fn, content=template) templater.render_to_file(template_fn, path, params) + # clean up temporary template + if template: + util.del_file(template_fn) def reload_ntp(service, systemd=False): + """Restart or reload an ntp system service. + + @param service: A string specifying the name of the service to be affected. + @param systemd: A boolean indicating if the distro uses systemd, defaults + to False. + @returns: A tuple of stdout, stderr results from executing the action. + """ if systemd: cmd = ['systemctl', 'reload-or-restart', service] else: @@ -220,4 +436,117 @@ def reload_ntp(service, systemd=False): util.subp(cmd, capture=True) +def supplemental_schema_validation(ntp_config): + """Validate user-provided ntp:config option values. + + This function supplements flexible jsonschema validation with specific + value checks to aid in triage of invalid user-provided configuration. + + @param ntp_config: Dictionary of configuration value under 'ntp'. + + @raises: ValueError describing invalid values provided. + """ + errors = [] + missing = REQUIRED_NTP_CONFIG_KEYS.difference(set(ntp_config.keys())) + if missing: + keys = ', '.join(sorted(missing)) + errors.append( + 'Missing required ntp:config keys: {keys}'.format(keys=keys)) + elif not any([ntp_config.get('template'), + ntp_config.get('template_name')]): + errors.append( + 'Either ntp:config:template or ntp:config:template_name values' + ' are required') + for key, value in sorted(ntp_config.items()): + keypath = 'ntp:config:' + key + if key == 'confpath': + if not all([value, isinstance(value, six.string_types)]): + errors.append( + 'Expected a config file path {keypath}.' + ' Found ({value})'.format(keypath=keypath, value=value)) + elif key == 'packages': + if not isinstance(value, list): + errors.append( + 'Expected a list of required package names for {keypath}.' + ' Found ({value})'.format(keypath=keypath, value=value)) + elif key in ('template', 'template_name'): + if value is None: # Either template or template_name can be none + continue + if not isinstance(value, six.string_types): + errors.append( + 'Expected a string type for {keypath}.' + ' Found ({value})'.format(keypath=keypath, value=value)) + elif not isinstance(value, six.string_types): + errors.append( + 'Expected a string type for {keypath}.' + ' Found ({value})'.format(keypath=keypath, value=value)) + + if errors: + raise ValueError(r'Invalid ntp configuration:\n{errors}'.format( + errors='\n'.join(errors))) + + +def handle(name, cfg, cloud, log, _args): + """Enable and configure ntp.""" + if 'ntp' not in cfg: + LOG.debug( + "Skipping module named %s, not present or disabled by cfg", name) + return + ntp_cfg = cfg['ntp'] + if ntp_cfg is None: + ntp_cfg = {} # Allow empty config which will install the package + + # TODO drop this when validate_cloudconfig_schema is strict=True + if not isinstance(ntp_cfg, (dict)): + raise RuntimeError( + "'ntp' key existed in config, but not a dictionary type," + " is a {_type} instead".format(_type=type_utils.obj_name(ntp_cfg))) + + validate_cloudconfig_schema(cfg, schema) + + # Allow users to explicitly enable/disable + enabled = ntp_cfg.get('enabled', True) + if util.is_false(enabled): + LOG.debug("Skipping module named %s, disabled by cfg", name) + return + + # Select which client is going to be used and get the configuration + ntp_client_config = select_ntp_client(ntp_cfg.get('ntp_client'), + cloud.distro) + + # Allow user ntp config to override distro configurations + ntp_client_config = util.mergemanydict( + [ntp_client_config, ntp_cfg.get('config', {})], reverse=True) + + supplemental_schema_validation(ntp_client_config) + rename_ntp_conf(confpath=ntp_client_config.get('confpath')) + + template_fn = None + if not ntp_client_config.get('template'): + template_name = ( + ntp_client_config.get('template_name').replace('{distro}', + cloud.distro.name)) + template_fn = cloud.get_template_filename(template_name) + if not template_fn: + msg = ('No template found, not rendering %s' % + ntp_client_config.get('template_name')) + raise RuntimeError(msg) + + write_ntp_config_template(cloud.distro.name, + servers=ntp_cfg.get('servers', []), + pools=ntp_cfg.get('pools', []), + path=ntp_client_config.get('confpath'), + template_fn=template_fn, + template=ntp_client_config.get('template')) + + install_ntp_client(cloud.distro.install_packages, + packages=ntp_client_config['packages'], + check_exe=ntp_client_config['check_exe']) + try: + reload_ntp(ntp_client_config['service_name'], + systemd=cloud.distro.uses_systemd()) + except util.ProcessExecutionError as e: + LOG.exception("Failed to reload/start ntp service: %s", e) + raise + # vi: ts=4 expandtab diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 55260eae..6c22b07f 100755 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -49,6 +49,9 @@ LOG = logging.getLogger(__name__) # It could break when Amazon adds new regions and new AZs. _EC2_AZ_RE = re.compile('^[a-z][a-z]-(?:[a-z]+-)+[0-9][a-z]$') +# Default NTP Client Configurations +PREFERRED_NTP_CLIENTS = ['chrony', 'systemd-timesyncd', 'ntp', 'ntpdate'] + @six.add_metaclass(abc.ABCMeta) class Distro(object): @@ -60,6 +63,7 @@ class Distro(object): tz_zone_dir = "/usr/share/zoneinfo" init_cmd = ['service'] # systemctl, service etc renderer_configs = {} + _preferred_ntp_clients = None def __init__(self, name, cfg, paths): self._paths = paths @@ -339,6 +343,14 @@ class Distro(object): contents.write("%s\n" % (eh)) util.write_file(self.hosts_fn, contents.getvalue(), mode=0o644) + @property + def preferred_ntp_clients(self): + """Allow distro to determine the preferred ntp client list""" + if not self._preferred_ntp_clients: + self._preferred_ntp_clients = list(PREFERRED_NTP_CLIENTS) + + return self._preferred_ntp_clients + def _bring_up_interface(self, device_name): cmd = ['ifup', device_name] LOG.debug("Attempting to run bring up interface %s using command %s", diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index 162dfa05..9f90e95e 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -208,4 +208,28 @@ class Distro(distros.Distro): nameservers, searchservers) return dev_names + @property + def preferred_ntp_clients(self): + """The preferred ntp client is dependent on the version.""" + + """Allow distro to determine the preferred ntp client list""" + if not self._preferred_ntp_clients: + distro_info = util.system_info()['dist'] + name = distro_info[0] + major_ver = int(distro_info[1].split('.')[0]) + + # This is horribly complicated because of a case of + # "we do not care if versions should be increasing syndrome" + if ( + (major_ver >= 15 and 'openSUSE' not in name) or + (major_ver >= 15 and 'openSUSE' in name and major_ver != 42) + ): + self._preferred_ntp_clients = ['chrony', + 'systemd-timesyncd', 'ntp'] + else: + self._preferred_ntp_clients = ['ntp', + 'systemd-timesyncd', 'chrony'] + + return self._preferred_ntp_clients + # vi: ts=4 expandtab diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py index 82ca34f5..fdc1f622 100644 --- a/cloudinit/distros/ubuntu.py +++ b/cloudinit/distros/ubuntu.py @@ -10,12 +10,31 @@ # This file is part of cloud-init. See LICENSE file for license information. from cloudinit.distros import debian +from cloudinit.distros import PREFERRED_NTP_CLIENTS from cloudinit import log as logging +from cloudinit import util + +import copy LOG = logging.getLogger(__name__) class Distro(debian.Distro): + + @property + def preferred_ntp_clients(self): + """The preferred ntp client is dependent on the version.""" + if not self._preferred_ntp_clients: + (name, version, codename) = util.system_info()['dist'] + # Xenial cloud-init only installed ntp, UbuntuCore has timesyncd. + if codename == "xenial" and not util.system_is_snappy(): + self._preferred_ntp_clients = ['ntp'] + else: + self._preferred_ntp_clients = ( + copy.deepcopy(PREFERRED_NTP_CLIENTS)) + return self._preferred_ntp_clients + pass + # vi: ts=4 expandtab diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 3129d4eb..5619de3e 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -151,6 +151,8 @@ system_info: groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video] sudo: ["ALL=(ALL) NOPASSWD:ALL"] shell: /bin/bash + # Automatically discover the best ntp_client + ntp_client: auto # Other config here will be given to the distro class and/or path classes paths: cloud_dir: /var/lib/cloud/ diff --git a/templates/chrony.conf.debian.tmpl b/templates/chrony.conf.debian.tmpl new file mode 100644 index 00000000..e72bfee1 --- /dev/null +++ b/templates/chrony.conf.debian.tmpl @@ -0,0 +1,39 @@ +## template:jinja +# Welcome to the chrony configuration file. See chrony.conf(5) for more +# information about usuable directives. +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# This directive specify the location of the file containing ID/key pairs for +# NTP authentication. +keyfile /etc/chrony/chrony.keys + +# This directive specify the file into which chronyd will store the rate +# information. +driftfile /var/lib/chrony/chrony.drift + +# Uncomment the following line to turn logging on. +#log tracking measurements statistics + +# Log files location. +logdir /var/log/chrony + +# Stop bad estimates upsetting machine clock. +maxupdateskew 100.0 + +# This directive enables kernel synchronisation (every 11 minutes) of the +# real-time clock. Note that it can't be used along with the 'rtcfile' directive. +rtcsync + +# Step the system clock instead of slewing it if the adjustment is larger than +# one second, but only in the first three clock updates. +makestep 1 3 + diff --git a/templates/chrony.conf.fedora.tmpl b/templates/chrony.conf.fedora.tmpl new file mode 100644 index 00000000..8551f793 --- /dev/null +++ b/templates/chrony.conf.fedora.tmpl @@ -0,0 +1,48 @@ +## template:jinja +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +# Allow the system clock to be stepped in the first three updates +# if its offset is larger than 1 second. +makestep 1.0 3 + +# Enable kernel synchronization of the real-time clock (RTC). +rtcsync + +# Enable hardware timestamping on all interfaces that support it. +#hwtimestamp * + +# Increase the minimum number of selectable sources required to adjust +# the system clock. +#minsources 2 + +# Allow NTP client access from local network. +#allow 192.168.0.0/16 + +# Serve time even if not synchronized to a time source. +#local stratum 10 + +# Specify file containing keys for NTP authentication. +#keyfile /etc/chrony.keys + +# Get TAI-UTC offset and leap seconds from the system tz database. +leapsectz right/UTC + +# Specify directory for log files. +logdir /var/log/chrony + +# Select which information is logged. +#log measurements statistics tracking diff --git a/templates/chrony.conf.opensuse.tmpl b/templates/chrony.conf.opensuse.tmpl new file mode 100644 index 00000000..a3d3e0ec --- /dev/null +++ b/templates/chrony.conf.opensuse.tmpl @@ -0,0 +1,38 @@ +## template:jinja +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +# In first three updates step the system clock instead of slew +# if the adjustment is larger than 1 second. +makestep 1.0 3 + +# Enable kernel synchronization of the real-time clock (RTC). +rtcsync + +# Allow NTP client access from local network. +#allow 192.168/16 + +# Serve time even if not synchronized to any NTP server. +#local stratum 10 + +# Specify file containing keys for NTP authentication. +#keyfile /etc/chrony.keys + +# Specify directory for log files. +logdir /var/log/chrony + +# Select which information is logged. +#log measurements statistics tracking diff --git a/templates/chrony.conf.rhel.tmpl b/templates/chrony.conf.rhel.tmpl new file mode 100644 index 00000000..5b3542ef --- /dev/null +++ b/templates/chrony.conf.rhel.tmpl @@ -0,0 +1,45 @@ +## template:jinja +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +# Allow the system clock to be stepped in the first three updates +# if its offset is larger than 1 second. +makestep 1.0 3 + +# Enable kernel synchronization of the real-time clock (RTC). +rtcsync + +# Enable hardware timestamping on all interfaces that support it. +#hwtimestamp * + +# Increase the minimum number of selectable sources required to adjust +# the system clock. +#minsources 2 + +# Allow NTP client access from local network. +#allow 192.168.0.0/16 + +# Serve time even if not synchronized to a time source. +#local stratum 10 + +# Specify file containing keys for NTP authentication. +#keyfile /etc/chrony.keys + +# Specify directory for log files. +logdir /var/log/chrony + +# Select which information is logged. +#log measurements statistics tracking diff --git a/templates/chrony.conf.sles.tmpl b/templates/chrony.conf.sles.tmpl new file mode 100644 index 00000000..a3d3e0ec --- /dev/null +++ b/templates/chrony.conf.sles.tmpl @@ -0,0 +1,38 @@ +## template:jinja +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# Record the rate at which the system clock gains/losses time. +driftfile /var/lib/chrony/drift + +# In first three updates step the system clock instead of slew +# if the adjustment is larger than 1 second. +makestep 1.0 3 + +# Enable kernel synchronization of the real-time clock (RTC). +rtcsync + +# Allow NTP client access from local network. +#allow 192.168/16 + +# Serve time even if not synchronized to any NTP server. +#local stratum 10 + +# Specify file containing keys for NTP authentication. +#keyfile /etc/chrony.keys + +# Specify directory for log files. +logdir /var/log/chrony + +# Select which information is logged. +#log measurements statistics tracking diff --git a/templates/chrony.conf.ubuntu.tmpl b/templates/chrony.conf.ubuntu.tmpl new file mode 100644 index 00000000..da7f16a4 --- /dev/null +++ b/templates/chrony.conf.ubuntu.tmpl @@ -0,0 +1,42 @@ +## template:jinja +# Welcome to the chrony configuration file. See chrony.conf(5) for more +# information about usuable directives. + +# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board +# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for +# more information. +{% if pools %}# pools +{% endif %} +{% for pool in pools -%} +pool {{pool}} iburst +{% endfor %} +{%- if servers %}# servers +{% endif %} +{% for server in servers -%} +server {{server}} iburst +{% endfor %} + +# This directive specify the location of the file containing ID/key pairs for +# NTP authentication. +keyfile /etc/chrony/chrony.keys + +# This directive specify the file into which chronyd will store the rate +# information. +driftfile /var/lib/chrony/chrony.drift + +# Uncomment the following line to turn logging on. +#log tracking measurements statistics + +# Log files location. +logdir /var/log/chrony + +# Stop bad estimates upsetting machine clock. +maxupdateskew 100.0 + +# This directive enables kernel synchronisation (every 11 minutes) of the +# real-time clock. Note that it can't be used along with the 'rtcfile' directive. +rtcsync + +# Step the system clock instead of slewing it if the adjustment is larger than +# one second, but only in the first three clock updates. +makestep 1 3 diff --git a/tests/cloud_tests/testcases/modules/ntp.yaml b/tests/cloud_tests/testcases/modules/ntp.yaml index 2530d72e..7ea0707d 100644 --- a/tests/cloud_tests/testcases/modules/ntp.yaml +++ b/tests/cloud_tests/testcases/modules/ntp.yaml @@ -4,6 +4,7 @@ cloud_config: | #cloud-config ntp: + ntp_client: ntp pools: [] servers: [] collect_scripts: diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py new file mode 100644 index 00000000..461630a8 --- /dev/null +++ b/tests/cloud_tests/testcases/modules/ntp_chrony.py @@ -0,0 +1,15 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""cloud-init Integration Test Verify Script.""" +from tests.cloud_tests.testcases import base + + +class TestNtpChrony(base.CloudTestCase): + """Test ntp module with chrony client""" + + def test_chrony_entires(self): + """Test chrony config entries""" + out = self.get_data_file('chrony_conf') + self.assertIn('.pool.ntp.org', out) + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.yaml b/tests/cloud_tests/testcases/modules/ntp_chrony.yaml new file mode 100644 index 00000000..120735e2 --- /dev/null +++ b/tests/cloud_tests/testcases/modules/ntp_chrony.yaml @@ -0,0 +1,17 @@ +# +# ntp enabled, chrony selected, check conf file +# as chrony won't start in a container +# +cloud_config: | + #cloud-config + ntp: + enabled: true + ntp_client: chrony +collect_scripts: + chrony_conf: | + #!/bin/sh + set -- /etc/chrony.conf /etc/chrony/chrony.conf + for p in "$@"; do + [ -e "$p" ] && { cat "$p"; exit; } + done +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/ntp_pools.yaml b/tests/cloud_tests/testcases/modules/ntp_pools.yaml index d490b228..60fa0fd1 100644 --- a/tests/cloud_tests/testcases/modules/ntp_pools.yaml +++ b/tests/cloud_tests/testcases/modules/ntp_pools.yaml @@ -9,6 +9,7 @@ required_features: cloud_config: | #cloud-config ntp: + ntp_client: ntp pools: - 0.cloud-init.mypool - 1.cloud-init.mypool diff --git a/tests/cloud_tests/testcases/modules/ntp_servers.yaml b/tests/cloud_tests/testcases/modules/ntp_servers.yaml index 6b13b70e..ee636679 100644 --- a/tests/cloud_tests/testcases/modules/ntp_servers.yaml +++ b/tests/cloud_tests/testcases/modules/ntp_servers.yaml @@ -6,6 +6,7 @@ required_features: cloud_config: | #cloud-config ntp: + ntp_client: ntp servers: - 172.16.15.14 - 172.16.17.18 diff --git a/tests/cloud_tests/testcases/modules/ntp_timesyncd.py b/tests/cloud_tests/testcases/modules/ntp_timesyncd.py new file mode 100644 index 00000000..eca750bc --- /dev/null +++ b/tests/cloud_tests/testcases/modules/ntp_timesyncd.py @@ -0,0 +1,15 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +"""cloud-init Integration Test Verify Script.""" +from tests.cloud_tests.testcases import base + + +class TestNtpTimesyncd(base.CloudTestCase): + """Test ntp module with systemd-timesyncd client""" + + def test_timesyncd_entries(self): + """Test timesyncd config entries""" + out = self.get_data_file('timesyncd_conf') + self.assertIn('.pool.ntp.org', out) + +# vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml b/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml new file mode 100644 index 00000000..ee47a741 --- /dev/null +++ b/tests/cloud_tests/testcases/modules/ntp_timesyncd.yaml @@ -0,0 +1,15 @@ +# +# ntp enabled, systemd-timesyncd selected, check conf file +# as systemd-timesyncd won't start in a container +# +cloud_config: | + #cloud-config + ntp: + enabled: true + ntp_client: systemd-timesyncd +collect_scripts: + timesyncd_conf: | + #!/bin/sh + cat /etc/systemd/timesyncd.conf.d/cloud-init.conf + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 1c2e45fe..7765e408 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -189,6 +189,12 @@ hn0: flags=8843 metric 0 mtu 1500 status: active """ + def setUp(self): + super(TestNetCfgDistro, self).setUp() + self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') + self.add_patch('cloudinit.util.system_info', 'm_sysinfo') + self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')} + def _get_distro(self, dname, renderers=None): cls = distros.fetch(dname) cfg = settings.CFG_BUILTIN diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py index 0fa9cdb5..fa4b6cfe 100644 --- a/tests/unittests/test_distros/test_user_data_normalize.py +++ b/tests/unittests/test_distros/test_user_data_normalize.py @@ -22,6 +22,12 @@ bcfg = { class TestUGNormalize(TestCase): + def setUp(self): + super(TestUGNormalize, self).setUp() + self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') + self.add_patch('cloudinit.util.system_info', 'm_sysinfo') + self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')} + def _make_distro(self, dtype, def_user=None): cfg = dict(settings.CFG_BUILTIN) cfg['system_info']['distro'] = dtype diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 695897c0..1b3ca570 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -4,20 +4,21 @@ from cloudinit.config import cc_ntp from cloudinit.sources import DataSourceNone from cloudinit import (distros, helpers, cloud, util) from cloudinit.tests.helpers import ( - FilesystemMockingTestCase, mock, skipUnlessJsonSchema) + CiTestCase, FilesystemMockingTestCase, mock, skipUnlessJsonSchema) +import copy import os from os.path import dirname import shutil -NTP_TEMPLATE = b"""\ +NTP_TEMPLATE = """\ ## template: jinja servers {{servers}} pools {{pools}} """ -TIMESYNCD_TEMPLATE = b"""\ +TIMESYNCD_TEMPLATE = """\ ## template:jinja [Time] {% if servers or pools -%} @@ -32,56 +33,88 @@ class TestNtp(FilesystemMockingTestCase): def setUp(self): super(TestNtp, self).setUp() - self.subp = util.subp self.new_root = self.tmp_dir() + self.add_patch('cloudinit.util.system_is_snappy', 'm_snappy') + self.m_snappy.return_value = False + self.add_patch('cloudinit.util.system_info', 'm_sysinfo') + self.m_sysinfo.return_value = {'dist': ('Distro', '99.1', 'Codename')} - def _get_cloud(self, distro): - self.patchUtils(self.new_root) + def _get_cloud(self, distro, sys_cfg=None): + self.new_root = self.reRoot(root=self.new_root) paths = helpers.Paths({'templates_dir': self.new_root}) cls = distros.fetch(distro) - mydist = cls(distro, {}, paths) - myds = DataSourceNone.DataSourceNone({}, mydist, paths) - return cloud.Cloud(myds, paths, {}, mydist, None) + if not sys_cfg: + sys_cfg = {} + mydist = cls(distro, sys_cfg, paths) + myds = DataSourceNone.DataSourceNone(sys_cfg, mydist, paths) + return cloud.Cloud(myds, paths, sys_cfg, mydist, None) + + def _get_template_path(self, template_name, distro, basepath=None): + # ntp.conf.{distro} -> ntp.conf.debian.tmpl + template_fn = '{0}.tmpl'.format( + template_name.replace('{distro}', distro)) + if not basepath: + basepath = self.new_root + path = os.path.join(basepath, template_fn) + return path + + def _generate_template(self, template=None): + if not template: + template = NTP_TEMPLATE + confpath = os.path.join(self.new_root, 'client.conf') + template_fn = os.path.join(self.new_root, 'client.conf.tmpl') + util.write_file(template_fn, content=template) + return (confpath, template_fn) + + def _mock_ntp_client_config(self, client=None, distro=None): + if not client: + client = 'ntp' + if not distro: + distro = 'ubuntu' + dcfg = cc_ntp.distro_ntp_client_configs(distro) + if client == 'systemd-timesyncd': + template = TIMESYNCD_TEMPLATE + else: + template = NTP_TEMPLATE + (confpath, template_fn) = self._generate_template(template=template) + ntpconfig = copy.deepcopy(dcfg[client]) + ntpconfig['confpath'] = confpath + ntpconfig['template_name'] = os.path.basename(confpath) + return ntpconfig @mock.patch("cloudinit.config.cc_ntp.util") def test_ntp_install(self, mock_util): - """ntp_install installs via install_func when check_exe is absent.""" + """ntp_install_client runs install_func when check_exe is absent.""" mock_util.which.return_value = None # check_exe not found. install_func = mock.MagicMock() - cc_ntp.install_ntp(install_func, packages=['ntpx'], check_exe='ntpdx') - + cc_ntp.install_ntp_client(install_func, + packages=['ntpx'], check_exe='ntpdx') mock_util.which.assert_called_with('ntpdx') install_func.assert_called_once_with(['ntpx']) @mock.patch("cloudinit.config.cc_ntp.util") def test_ntp_install_not_needed(self, mock_util): - """ntp_install doesn't attempt install when check_exe is found.""" - mock_util.which.return_value = ["/usr/sbin/ntpd"] # check_exe found. + """ntp_install_client doesn't install when check_exe is found.""" + client = 'chrony' + mock_util.which.return_value = [client] # check_exe found. install_func = mock.MagicMock() - cc_ntp.install_ntp(install_func, packages=['ntp'], check_exe='ntpd') + cc_ntp.install_ntp_client(install_func, packages=[client], + check_exe=client) install_func.assert_not_called() @mock.patch("cloudinit.config.cc_ntp.util") def test_ntp_install_no_op_with_empty_pkg_list(self, mock_util): - """ntp_install calls install_func with empty list""" + """ntp_install_client runs install_func with empty list""" mock_util.which.return_value = None # check_exe not found install_func = mock.MagicMock() - cc_ntp.install_ntp(install_func, packages=[], check_exe='timesyncd') + cc_ntp.install_ntp_client(install_func, packages=[], + check_exe='timesyncd') install_func.assert_called_once_with([]) - def test_ntp_rename_ntp_conf(self): - """When NTP_CONF exists, rename_ntp moves it.""" - ntpconf = self.tmp_path("ntp.conf", self.new_root) - util.write_file(ntpconf, "") - with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): - cc_ntp.rename_ntp_conf() - self.assertFalse(os.path.exists(ntpconf)) - self.assertTrue(os.path.exists("{0}.dist".format(ntpconf))) - @mock.patch("cloudinit.config.cc_ntp.util") def test_reload_ntp_defaults(self, mock_util): """Test service is restarted/reloaded (defaults)""" - service = 'ntp' + service = 'ntp_service_name' cmd = ['service', service, 'restart'] cc_ntp.reload_ntp(service) mock_util.subp.assert_called_with(cmd, capture=True) @@ -89,193 +122,171 @@ class TestNtp(FilesystemMockingTestCase): @mock.patch("cloudinit.config.cc_ntp.util") def test_reload_ntp_systemd(self, mock_util): """Test service is restarted/reloaded (systemd)""" - service = 'ntp' - cmd = ['systemctl', 'reload-or-restart', service] + service = 'ntp_service_name' cc_ntp.reload_ntp(service, systemd=True) - mock_util.subp.assert_called_with(cmd, capture=True) - - @mock.patch("cloudinit.config.cc_ntp.util") - def test_reload_ntp_systemd_timesycnd(self, mock_util): - """Test service is restarted/reloaded (systemd/timesyncd)""" - service = 'systemd-timesycnd' cmd = ['systemctl', 'reload-or-restart', service] - cc_ntp.reload_ntp(service, systemd=True) mock_util.subp.assert_called_with(cmd, capture=True) + def test_ntp_rename_ntp_conf(self): + """When NTP_CONF exists, rename_ntp moves it.""" + ntpconf = self.tmp_path("ntp.conf", self.new_root) + util.write_file(ntpconf, "") + cc_ntp.rename_ntp_conf(confpath=ntpconf) + self.assertFalse(os.path.exists(ntpconf)) + self.assertTrue(os.path.exists("{0}.dist".format(ntpconf))) + def test_ntp_rename_ntp_conf_skip_missing(self): """When NTP_CONF doesn't exist rename_ntp doesn't create a file.""" ntpconf = self.tmp_path("ntp.conf", self.new_root) self.assertFalse(os.path.exists(ntpconf)) - with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): - cc_ntp.rename_ntp_conf() + cc_ntp.rename_ntp_conf(confpath=ntpconf) self.assertFalse(os.path.exists("{0}.dist".format(ntpconf))) self.assertFalse(os.path.exists(ntpconf)) - def test_write_ntp_config_template_from_ntp_conf_tmpl_with_servers(self): - """write_ntp_config_template reads content from ntp.conf.tmpl. - - It reads ntp.conf.tmpl if present and renders the value from servers - key. When no pools key is defined, template is rendered using an empty - list for pools. - """ - distro = 'ubuntu' - cfg = { - 'servers': ['192.168.2.1', '192.168.2.2'] - } - mycloud = self._get_cloud(distro) - ntp_conf = self.tmp_path("ntp.conf", self.new_root) # Doesn't exist - # Create ntp.conf.tmpl - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.write_ntp_config_template(cfg, mycloud, ntp_conf) - content = util.read_file_or_url('file://' + ntp_conf).contents + def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self): + """write_ntp_config_template reads from $client.conf.distro.tmpl""" + servers = [] + pools = ['10.0.0.1', '10.0.0.2'] + (confpath, template_fn) = self._generate_template() + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.write_ntp_config_template('ubuntu', + servers=servers, pools=pools, + path=confpath, + template_fn=template_fn, + template=None) + content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( - "servers ['192.168.2.1', '192.168.2.2']\npools []\n", - content.decode()) + "servers []\npools ['10.0.0.1', '10.0.0.2']\n", content.decode()) - def test_write_ntp_config_template_uses_ntp_conf_distro_no_servers(self): - """write_ntp_config_template reads content from ntp.conf.distro.tmpl. + def test_write_ntp_config_template_defaults_pools_w_empty_lists(self): + """write_ntp_config_template defaults pools servers upon empty config. - It reads ntp.conf..tmpl before attempting ntp.conf.tmpl. It - renders the value from the keys servers and pools. When no - servers value is present, template is rendered using an empty list. + When both pools and servers are empty, default NR_POOL_SERVERS get + configured. """ distro = 'ubuntu' - cfg = { - 'pools': ['10.0.0.1', '10.0.0.2'] - } - mycloud = self._get_cloud(distro) - ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist - # Create ntp.conf.tmpl which isn't read - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(b'NOT READ: ntp.conf..tmpl is primary') - # Create ntp.conf.tmpl. - with open('{0}.{1}.tmpl'.format(ntp_conf, distro), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.write_ntp_config_template(cfg, mycloud, ntp_conf) - content = util.read_file_or_url('file://' + ntp_conf).contents + pools = cc_ntp.generate_server_names(distro) + servers = [] + (confpath, template_fn) = self._generate_template() + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.write_ntp_config_template(distro, + servers=servers, pools=pools, + path=confpath, + template_fn=template_fn, + template=None) + content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( - "servers []\npools ['10.0.0.1', '10.0.0.2']\n", + "servers []\npools {0}\n".format(pools), content.decode()) - def test_write_ntp_config_template_defaults_pools_when_empty_lists(self): - """write_ntp_config_template defaults pools servers upon empty config. + def test_defaults_pools_empty_lists_sles(self): + """write_ntp_config_template defaults opensuse pools upon empty config. When both pools and servers are empty, default NR_POOL_SERVERS get configured. """ - distro = 'ubuntu' - mycloud = self._get_cloud(distro) - ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist - # Create ntp.conf.tmpl - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.write_ntp_config_template({}, mycloud, ntp_conf) - content = util.read_file_or_url('file://' + ntp_conf).contents - default_pools = [ - "{0}.{1}.pool.ntp.org".format(x, distro) - for x in range(0, cc_ntp.NR_POOL_SERVERS)] + distro = 'sles' + default_pools = cc_ntp.generate_server_names(distro) + (confpath, template_fn) = self._generate_template() + + cc_ntp.write_ntp_config_template(distro, + servers=[], pools=[], + path=confpath, + template_fn=template_fn, + template=None) + content = util.read_file_or_url('file://' + confpath).contents + for pool in default_pools: + self.assertIn('opensuse', pool) self.assertEqual( - "servers []\npools {0}\n".format(default_pools), - content.decode()) + "servers []\npools {0}\n".format(default_pools), content.decode()) self.assertIn( "Adding distro default ntp pool servers: {0}".format( ",".join(default_pools)), self.logs.getvalue()) - @mock.patch("cloudinit.config.cc_ntp.ntp_installable") - def test_ntp_handler_mocked_template(self, m_ntp_install): - """Test ntp handler renders ubuntu ntp.conf template.""" - pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] - servers = ['192.168.23.3', '192.168.23.4'] - cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers - } - } - mycloud = self._get_cloud('ubuntu') - ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist - m_ntp_install.return_value = True - - # Create ntp.conf.tmpl - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - with mock.patch.object(util, 'which', return_value=None): - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - - content = util.read_file_or_url('file://' + ntp_conf).contents - self.assertEqual( - 'servers {0}\npools {1}\n'.format(servers, pools), - content.decode()) - - @mock.patch("cloudinit.config.cc_ntp.util") - def test_ntp_handler_mocked_template_snappy(self, m_util): - """Test ntp handler renders timesycnd.conf template on snappy.""" + def test_timesyncd_template(self): + """Test timesycnd template is correct""" pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] servers = ['192.168.23.3', '192.168.23.4'] - cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers - } - } - mycloud = self._get_cloud('ubuntu') - m_util.system_is_snappy.return_value = True - - # Create timesyncd.conf.tmpl - tsyncd_conf = self.tmp_path("timesyncd.conf", self.new_root) - template = '{0}.tmpl'.format(tsyncd_conf) - with open(template, 'wb') as stream: - stream.write(TIMESYNCD_TEMPLATE) - - with mock.patch('cloudinit.config.cc_ntp.TIMESYNCD_CONF', tsyncd_conf): - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - - content = util.read_file_or_url('file://' + tsyncd_conf).contents + (confpath, template_fn) = self._generate_template( + template=TIMESYNCD_TEMPLATE) + cc_ntp.write_ntp_config_template('ubuntu', + servers=servers, pools=pools, + path=confpath, + template_fn=template_fn, + template=None) + content = util.read_file_or_url('file://' + confpath).contents self.assertEqual( "[Time]\nNTP=%s %s \n" % (" ".join(servers), " ".join(pools)), content.decode()) - def test_ntp_handler_real_distro_templates(self): - """Test ntp handler renders the shipped distro ntp.conf templates.""" + def test_distro_ntp_client_configs(self): + """Test we have updated ntp client configs on different distros""" + delta = copy.deepcopy(cc_ntp.DISTRO_CLIENT_CONFIG) + base = copy.deepcopy(cc_ntp.NTP_CLIENT_CONFIG) + # confirm no-delta distros match the base config + for distro in cc_ntp.distros: + if distro not in delta: + result = cc_ntp.distro_ntp_client_configs(distro) + self.assertEqual(base, result) + # for distros with delta, ensure the merged config values match + # what is set in the delta + for distro in delta.keys(): + result = cc_ntp.distro_ntp_client_configs(distro) + for client in delta[distro].keys(): + for key in delta[distro][client].keys(): + self.assertEqual(delta[distro][client][key], + result[client][key]) + + def test_ntp_handler_real_distro_ntp_templates(self): + """Test ntp handler renders the shipped distro ntp client templates.""" pools = ['0.mycompany.pool.ntp.org', '3.mycompany.pool.ntp.org'] servers = ['192.168.23.3', '192.168.23.4'] - cfg = { - 'ntp': { - 'pools': pools, - 'servers': servers - } - } - ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist - for distro in ('debian', 'ubuntu', 'fedora', 'rhel', 'sles'): - mycloud = self._get_cloud(distro) - root_dir = dirname(dirname(os.path.realpath(util.__file__))) - tmpl_file = os.path.join( - '{0}/templates/ntp.conf.{1}.tmpl'.format(root_dir, distro)) - # Create a copy in our tmp_dir - shutil.copy( - tmpl_file, - os.path.join(self.new_root, 'ntp.conf.%s.tmpl' % distro)) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - with mock.patch.object(util, 'which', return_value=[True]): - cc_ntp.handle('notimportant', cfg, mycloud, None, None) - - content = util.read_file_or_url('file://' + ntp_conf).contents - expected_servers = '\n'.join([ - 'server {0} iburst'.format(server) for server in servers]) - self.assertIn( - expected_servers, content.decode(), - 'failed to render ntp.conf for distro:{0}'.format(distro)) - expected_pools = '\n'.join([ - 'pool {0} iburst'.format(pool) for pool in pools]) - self.assertIn( - expected_pools, content.decode(), - 'failed to render ntp.conf for distro:{0}'.format(distro)) + for client in ['ntp', 'systemd-timesyncd', 'chrony']: + for distro in cc_ntp.distros: + distro_cfg = cc_ntp.distro_ntp_client_configs(distro) + ntpclient = distro_cfg[client] + confpath = ( + os.path.join(self.new_root, ntpclient.get('confpath')[1:])) + template = ntpclient.get('template_name') + # find sourcetree template file + root_dir = ( + dirname(dirname(os.path.realpath(util.__file__))) + + '/templates') + source_fn = self._get_template_path(template, distro, + basepath=root_dir) + template_fn = self._get_template_path(template, distro) + # don't fail if cloud-init doesn't have a template for + # a distro,client pair + if not os.path.exists(source_fn): + continue + # Create a copy in our tmp_dir + shutil.copy(source_fn, template_fn) + cc_ntp.write_ntp_config_template(distro, servers=servers, + pools=pools, path=confpath, + template_fn=template_fn) + content = util.read_file_or_url('file://' + confpath).contents + if client in ['ntp', 'chrony']: + expected_servers = '\n'.join([ + 'server {0} iburst'.format(srv) for srv in servers]) + print('distro=%s client=%s' % (distro, client)) + self.assertIn(expected_servers, content.decode(), + ('failed to render {0} conf' + ' for distro:{1}'.format(client, distro))) + expected_pools = '\n'.join([ + 'pool {0} iburst'.format(pool) for pool in pools]) + self.assertIn(expected_pools, content.decode(), + ('failed to render {0} conf' + ' for distro:{1}'.format(client, distro))) + elif client == 'systemd-timesyncd': + expected_content = ( + "# cloud-init generated file\n" + + "# See timesyncd.conf(5) for details.\n\n" + + "[Time]\nNTP=%s %s \n" % (" ".join(servers), + " ".join(pools))) + self.assertEqual(expected_content, content.decode()) def test_no_ntpcfg_does_nothing(self): """When no ntp section is defined handler logs a warning and noops.""" @@ -285,95 +296,99 @@ class TestNtp(FilesystemMockingTestCase): 'not present or disabled by cfg\n', self.logs.getvalue()) - def test_ntp_handler_schema_validation_allows_empty_ntp_config(self): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_allows_empty_ntp_config(self, + m_select): """Ntp schema validation allows for an empty ntp: configuration.""" valid_empty_configs = [{'ntp': {}}, {'ntp': None}] - distro = 'ubuntu' - cc = self._get_cloud(distro) - ntp_conf = os.path.join(self.new_root, 'ntp.conf') - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) for valid_empty_config in valid_empty_configs: - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', valid_empty_config, cc, None, []) - with open(ntp_conf) as stream: - content = stream.read() - default_pools = [ - "{0}.{1}.pool.ntp.org".format(x, distro) - for x in range(0, cc_ntp.NR_POOL_SERVERS)] - self.assertEqual( - "servers []\npools {0}\n".format(default_pools), - content) - self.assertNotIn('Invalid config:', self.logs.getvalue()) + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', valid_empty_config, mycloud, None, []) + content = util.read_file_or_url('file://' + confpath).contents + pools = cc_ntp.generate_server_names(mycloud.distro.name) + self.assertEqual( + "servers []\npools {0}\n".format(pools), content.decode()) + self.assertNotIn('Invalid config:', self.logs.getvalue()) @skipUnlessJsonSchema() - def test_ntp_handler_schema_validation_warns_non_string_item_type(self): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_non_string_item_type(self, + m_sel): """Ntp schema validation warns of non-strings in pools or servers. Schema validation is not strict, so ntp config is still be rendered. """ invalid_config = {'ntp': {'pools': [123], 'servers': ['valid', None]}} - cc = self._get_cloud('ubuntu') - ntp_conf = os.path.join(self.new_root, 'ntp.conf') - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) - self.assertIn( - "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n" - "ntp.servers.1: None is not of type 'string'", - self.logs.getvalue()) - with open(ntp_conf) as stream: - content = stream.read() - self.assertEqual("servers ['valid', None]\npools [123]\n", content) + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_sel.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp.pools.0: 123 is not of type 'string'\n" + "ntp.servers.1: None is not of type 'string'", + self.logs.getvalue()) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual("servers ['valid', None]\npools [123]\n", + content.decode()) @skipUnlessJsonSchema() - def test_ntp_handler_schema_validation_warns_of_non_array_type(self): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_of_non_array_type(self, + m_select): """Ntp schema validation warns of non-array pools or servers types. Schema validation is not strict, so ntp config is still be rendered. """ invalid_config = {'ntp': {'pools': 123, 'servers': 'non-array'}} - cc = self._get_cloud('ubuntu') - ntp_conf = os.path.join(self.new_root, 'ntp.conf') - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) - self.assertIn( - "Invalid config:\nntp.pools: 123 is not of type 'array'\n" - "ntp.servers: 'non-array' is not of type 'array'", - self.logs.getvalue()) - with open(ntp_conf) as stream: - content = stream.read() - self.assertEqual("servers non-array\npools 123\n", content) + + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp.pools: 123 is not of type 'array'\n" + "ntp.servers: 'non-array' is not of type 'array'", + self.logs.getvalue()) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual("servers non-array\npools 123\n", + content.decode()) @skipUnlessJsonSchema() - def test_ntp_handler_schema_validation_warns_invalid_key_present(self): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_invalid_key_present(self, + m_select): """Ntp schema validation warns of invalid keys present in ntp config. Schema validation is not strict, so ntp config is still be rendered. """ invalid_config = { 'ntp': {'invalidkey': 1, 'pools': ['0.mycompany.pool.ntp.org']}} - cc = self._get_cloud('ubuntu') - ntp_conf = os.path.join(self.new_root, 'ntp.conf') - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) - self.assertIn( - "Invalid config:\nntp: Additional properties are not allowed " - "('invalidkey' was unexpected)", - self.logs.getvalue()) - with open(ntp_conf) as stream: - content = stream.read() - self.assertEqual( - "servers []\npools ['0.mycompany.pool.ntp.org']\n", - content) + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp: Additional properties are not allowed " + "('invalidkey' was unexpected)", + self.logs.getvalue()) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "servers []\npools ['0.mycompany.pool.ntp.org']\n", + content.decode()) @skipUnlessJsonSchema() - def test_ntp_handler_schema_validation_warns_of_duplicates(self): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_schema_validation_warns_of_duplicates(self, m_select): """Ntp schema validation warns of duplicates in servers or pools. Schema validation is not strict, so ntp config is still be rendered. @@ -381,74 +396,334 @@ class TestNtp(FilesystemMockingTestCase): invalid_config = { 'ntp': {'pools': ['0.mypool.org', '0.mypool.org'], 'servers': ['10.0.0.1', '10.0.0.1']}} - cc = self._get_cloud('ubuntu') - ntp_conf = os.path.join(self.new_root, 'ntp.conf') - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.handle('cc_ntp', invalid_config, cc, None, []) - self.assertIn( - "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org'] has " - "non-unique elements\nntp.servers: ['10.0.0.1', '10.0.0.1'] has " - "non-unique elements", - self.logs.getvalue()) - with open(ntp_conf) as stream: - content = stream.read() - self.assertEqual( - "servers ['10.0.0.1', '10.0.0.1']\n" - "pools ['0.mypool.org', '0.mypool.org']\n", - content) + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', invalid_config, mycloud, None, []) + self.assertIn( + "Invalid config:\nntp.pools: ['0.mypool.org', '0.mypool.org']" + " has non-unique elements\nntp.servers: " + "['10.0.0.1', '10.0.0.1'] has non-unique elements", + self.logs.getvalue()) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "servers ['10.0.0.1', '10.0.0.1']\n" + "pools ['0.mypool.org', '0.mypool.org']\n", content.decode()) - @mock.patch("cloudinit.config.cc_ntp.ntp_installable") - def test_ntp_handler_timesyncd(self, m_ntp_install): + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_timesyncd(self, m_select): """Test ntp handler configures timesyncd""" - m_ntp_install.return_value = False - distro = 'ubuntu' - cfg = { - 'servers': ['192.168.2.1', '192.168.2.2'], - 'pools': ['0.mypool.org'], - } - mycloud = self._get_cloud(distro) - tsyncd_conf = self.tmp_path("timesyncd.conf", self.new_root) - # Create timesyncd.conf.tmpl - template = '{0}.tmpl'.format(tsyncd_conf) - print(template) - with open(template, 'wb') as stream: - stream.write(TIMESYNCD_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.TIMESYNCD_CONF', tsyncd_conf): - cc_ntp.write_ntp_config_template(cfg, mycloud, tsyncd_conf, - template='timesyncd.conf') - - content = util.read_file_or_url('file://' + tsyncd_conf).contents - self.assertEqual( - "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n", - content.decode()) + servers = ['192.168.2.1', '192.168.2.2'] + pools = ['0.mypool.org'] + cfg = {'ntp': {'servers': servers, 'pools': pools}} + client = 'systemd-timesyncd' + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro, + client=client) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + cc_ntp.handle('cc_ntp', cfg, mycloud, None, []) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "[Time]\nNTP=192.168.2.1 192.168.2.2 0.mypool.org \n", + content.decode()) + + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_handler_enabled_false(self, m_select): + """Test ntp handler does not run if enabled: false """ + cfg = {'ntp': {'enabled': False}} + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + self.assertEqual(0, m_select.call_count) + + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + @mock.patch("cloudinit.distros.Distro.uses_systemd") + def test_ntp_the_whole_package(self, m_sysd, m_select): + """Test enabled config renders template, and restarts service """ + cfg = {'ntp': {'enabled': True}} + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(distro=distro) + confpath = ntpconfig['confpath'] + service_name = ntpconfig['service_name'] + m_select.return_value = ntpconfig + pools = cc_ntp.generate_server_names(mycloud.distro.name) + # force uses systemd path + m_sysd.return_value = True + with mock.patch('cloudinit.config.cc_ntp.util') as m_util: + # allow use of util.mergemanydict + m_util.mergemanydict.side_effect = util.mergemanydict + # default client is present + m_util.which.return_value = True + # use the config 'enabled' value + m_util.is_false.return_value = util.is_false( + cfg['ntp']['enabled']) + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + m_util.subp.assert_called_with( + ['systemctl', 'reload-or-restart', + service_name], capture=True) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "servers []\npools {0}\n".format(pools), + content.decode()) + + def test_opensuse_picks_chrony(self): + """Test opensuse picks chrony or ntp on certain distro versions""" + # < 15.0 => ntp + self.m_sysinfo.return_value = {'dist': + ('openSUSE', '13.2', 'Harlequin')} + mycloud = self._get_cloud('opensuse') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('ntp', expected_client) + + # >= 15.0 and not openSUSE => chrony + self.m_sysinfo.return_value = {'dist': + ('SLES', '15.0', + 'SUSE Linux Enterprise Server 15')} + mycloud = self._get_cloud('sles') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('chrony', expected_client) + + # >= 15.0 and openSUSE and ver != 42 => chrony + self.m_sysinfo.return_value = {'dist': ('openSUSE Tumbleweed', + '20180326', + 'timbleweed')} + mycloud = self._get_cloud('opensuse') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('chrony', expected_client) + + def test_ubuntu_xenial_picks_ntp(self): + """Test Ubuntu picks ntp on xenial release""" + + self.m_sysinfo.return_value = {'dist': ('Ubuntu', '16.04', 'xenial')} + mycloud = self._get_cloud('ubuntu') + expected_client = mycloud.distro.preferred_ntp_clients[0] + self.assertEqual('ntp', expected_client) - def test_write_ntp_config_template_defaults_pools_empty_lists_sles(self): - """write_ntp_config_template defaults pools servers upon empty config. + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_snappy_system_picks_timesyncd(self, m_which): + """Test snappy systems prefer installed clients""" - When both pools and servers are empty, default NR_POOL_SERVERS get - configured. - """ - distro = 'sles' - mycloud = self._get_cloud(distro) - ntp_conf = self.tmp_path('ntp.conf', self.new_root) # Doesn't exist - # Create ntp.conf.tmpl - with open('{0}.tmpl'.format(ntp_conf), 'wb') as stream: - stream.write(NTP_TEMPLATE) - with mock.patch('cloudinit.config.cc_ntp.NTP_CONF', ntp_conf): - cc_ntp.write_ntp_config_template({}, mycloud, ntp_conf) - content = util.read_file_or_url('file://' + ntp_conf).contents - default_pools = [ - "{0}.opensuse.pool.ntp.org".format(x) - for x in range(0, cc_ntp.NR_POOL_SERVERS)] - self.assertEqual( - "servers []\npools {0}\n".format(default_pools), - content.decode()) - self.assertIn( - "Adding distro default ntp pool servers: {0}".format( - ",".join(default_pools)), - self.logs.getvalue()) + # we are on ubuntu-core here + self.m_snappy.return_value = True + # ubuntu core systems will have timesyncd installed + m_which.side_effect = iter([None, '/lib/systemd/systemd-timesyncd', + None, None, None]) + distro = 'ubuntu' + mycloud = self._get_cloud(distro) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_client = 'systemd-timesyncd' + expected_cfg = distro_configs[expected_client] + expected_calls = [] + # we only get to timesyncd + for client in mycloud.distro.preferred_ntp_clients[0:2]: + cfg = distro_configs[client] + expected_calls.append(mock.call(cfg['check_exe'])) + result = cc_ntp.select_ntp_client(None, mycloud.distro) + m_which.assert_has_calls(expected_calls) + self.assertEqual(sorted(expected_cfg), sorted(cfg)) + self.assertEqual(sorted(expected_cfg), sorted(result)) + + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_ntp_distro_searches_all_preferred_clients(self, m_which): + """Test select_ntp_client search all distro perferred clients """ + # nothing is installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_client = mycloud.distro.preferred_ntp_clients[0] + expected_cfg = distro_configs[expected_client] + expected_calls = [] + for client in mycloud.distro.preferred_ntp_clients: + cfg = distro_configs[client] + expected_calls.append(mock.call(cfg['check_exe'])) + cc_ntp.select_ntp_client({}, mycloud.distro) + m_which.assert_has_calls(expected_calls) + self.assertEqual(sorted(expected_cfg), sorted(cfg)) + + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_user_cfg_ntp_client_auto_uses_distro_clients(self, m_which): + """Test user_cfg.ntp_client='auto' defaults to distro search""" + # nothing is installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_client = mycloud.distro.preferred_ntp_clients[0] + expected_cfg = distro_configs[expected_client] + expected_calls = [] + for client in mycloud.distro.preferred_ntp_clients: + cfg = distro_configs[client] + expected_calls.append(mock.call(cfg['check_exe'])) + cc_ntp.select_ntp_client('auto', mycloud.distro) + m_which.assert_has_calls(expected_calls) + self.assertEqual(sorted(expected_cfg), sorted(cfg)) + + @mock.patch('cloudinit.config.cc_ntp.write_ntp_config_template') + @mock.patch('cloudinit.cloud.Cloud.get_template_filename') + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_ntp_custom_client_overrides_installed_clients(self, m_which, + m_tmpfn, m_write): + """Test user client is installed despite other clients present """ + client = 'ntpdate' + cfg = {'ntp': {'ntp_client': client}} + for distro in cc_ntp.distros: + # client is not installed + m_which.side_effect = iter([None]) + mycloud = self._get_cloud(distro) + with mock.patch.object(mycloud.distro, + 'install_packages') as m_install: + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + m_install.assert_called_with([client]) + m_which.assert_called_with(client) + + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_ntp_system_config_overrides_distro_builtin_clients(self, m_which): + """Test distro system_config overrides builtin preferred ntp clients""" + system_client = 'chrony' + sys_cfg = {'ntp_client': system_client} + # no clients installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro, sys_cfg=sys_cfg) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_cfg = distro_configs[system_client] + result = cc_ntp.select_ntp_client(None, mycloud.distro) + self.assertEqual(sorted(expected_cfg), sorted(result)) + m_which.assert_has_calls([]) + + @mock.patch('cloudinit.config.cc_ntp.util.which') + def test_ntp_user_config_overrides_system_cfg(self, m_which): + """Test user-data overrides system_config ntp_client""" + system_client = 'chrony' + sys_cfg = {'ntp_client': system_client} + user_client = 'systemd-timesyncd' + # no clients installed + m_which.return_value = None + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro, sys_cfg=sys_cfg) + distro_configs = cc_ntp.distro_ntp_client_configs(distro) + expected_cfg = distro_configs[user_client] + result = cc_ntp.select_ntp_client(user_client, mycloud.distro) + self.assertEqual(sorted(expected_cfg), sorted(result)) + m_which.assert_has_calls([]) + + @mock.patch('cloudinit.config.cc_ntp.reload_ntp') + @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') + def test_ntp_user_provided_config_with_template(self, m_install, m_reload): + custom = r'\n#MyCustomTemplate' + user_template = NTP_TEMPLATE + custom + confpath = os.path.join(self.new_root, 'etc/myntp/myntp.conf') + cfg = { + 'ntp': { + 'pools': ['mypool.org'], + 'ntp_client': 'myntpd', + 'config': { + 'check_exe': 'myntpd', + 'confpath': confpath, + 'packages': ['myntp'], + 'service_name': 'myntp', + 'template': user_template, + } + } + } + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.handle('notimportant', cfg, mycloud, None, None) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "servers []\npools ['mypool.org']\n%s" % custom, + content.decode()) + + @mock.patch('cloudinit.config.cc_ntp.supplemental_schema_validation') + @mock.patch('cloudinit.config.cc_ntp.reload_ntp') + @mock.patch('cloudinit.config.cc_ntp.install_ntp_client') + @mock.patch('cloudinit.config.cc_ntp.select_ntp_client') + def test_ntp_user_provided_config_template_only(self, m_select, m_install, + m_reload, m_schema): + """Test custom template for default client""" + custom = r'\n#MyCustomTemplate' + user_template = NTP_TEMPLATE + custom + client = 'chrony' + cfg = { + 'pools': ['mypool.org'], + 'ntp_client': client, + 'config': { + 'template': user_template, + } + } + expected_merged_cfg = { + 'check_exe': 'chronyd', + 'confpath': '{tmpdir}/client.conf'.format(tmpdir=self.new_root), + 'template_name': 'client.conf', 'template': user_template, + 'service_name': 'chrony', 'packages': ['chrony']} + for distro in cc_ntp.distros: + mycloud = self._get_cloud(distro) + ntpconfig = self._mock_ntp_client_config(client=client, + distro=distro) + confpath = ntpconfig['confpath'] + m_select.return_value = ntpconfig + mock_path = 'cloudinit.config.cc_ntp.temp_utils._TMPDIR' + with mock.patch(mock_path, self.new_root): + cc_ntp.handle('notimportant', + {'ntp': cfg}, mycloud, None, None) + content = util.read_file_or_url('file://' + confpath).contents + self.assertEqual( + "servers []\npools ['mypool.org']\n%s" % custom, + content.decode()) + m_schema.assert_called_with(expected_merged_cfg) + + +class TestSupplementalSchemaValidation(CiTestCase): + + def test_error_on_missing_keys(self): + """ValueError raised reporting any missing required ntp:config keys""" + cfg = {} + match = (r'Invalid ntp configuration:\\nMissing required ntp:config' + ' keys: check_exe, confpath, packages, service_name') + with self.assertRaisesRegex(ValueError, match): + cc_ntp.supplemental_schema_validation(cfg) + + def test_error_requiring_either_template_or_template_name(self): + """ValueError raised if both template not template_name are None.""" + cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', + 'template': None, 'template_name': None, 'packages': []} + match = (r'Invalid ntp configuration:\\nEither ntp:config:template' + ' or ntp:config:template_name values are required') + with self.assertRaisesRegex(ValueError, match): + cc_ntp.supplemental_schema_validation(cfg) + + def test_error_on_non_list_values(self): + """ValueError raised when packages is not of type list.""" + cfg = {'confpath': 'someconf', 'check_exe': '', 'service_name': '', + 'template': 'asdf', 'template_name': None, 'packages': 'NOPE'} + match = (r'Invalid ntp configuration:\\nExpected a list of required' + ' package names for ntp:config:packages. Found \(NOPE\)') + with self.assertRaisesRegex(ValueError, match): + cc_ntp.supplemental_schema_validation(cfg) + + def test_error_on_non_string_values(self): + """ValueError raised for any values expected as string type.""" + cfg = {'confpath': 1, 'check_exe': 2, 'service_name': 3, + 'template': 4, 'template_name': 5, 'packages': []} + errors = [ + 'Expected a config file path ntp:config:confpath. Found (1)', + 'Expected a string type for ntp:config:check_exe. Found (2)', + 'Expected a string type for ntp:config:service_name. Found (3)', + 'Expected a string type for ntp:config:template. Found (4)', + 'Expected a string type for ntp:config:template_name. Found (5)'] + with self.assertRaises(ValueError) as context_mgr: + cc_ntp.supplemental_schema_validation(cfg) + error_msg = str(context_mgr.exception) + for error in errors: + self.assertIn(error, error_msg) # vi: ts=4 expandtab -- cgit v1.2.3 From acca826adf39ddfedde78cfbfc47e81a06c6f42a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 18 Apr 2018 03:35:41 -0600 Subject: pycodestyle: Fix invalid escape sequences in string literals. Python has deprecated these invalid string literals now https://bugs.python.org/issue27364 and pycodestyle is identifying them with a W605 warning. https://github.com/PyCQA/pycodestyle/pull/676 So basically, any use of \ not followed by one of [\'"abfnrtv] or \ooo (octal) \xhh (hex) or a newline is invalid. This is most comomnly seen for us in regex. To solve, you either: a.) use a raw string r'...' b.) correctly escape the \ that was not intended to be interpreted. --- cloudinit/analyze/__main__.py | 2 +- cloudinit/config/cc_apt_configure.py | 2 +- cloudinit/config/cc_power_state_change.py | 2 +- cloudinit/config/cc_rsyslog.py | 4 ++-- cloudinit/distros/freebsd.py | 6 +++--- cloudinit/util.py | 6 +++--- tests/cloud_tests/testcases/base.py | 2 +- tests/unittests/test_util.py | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) (limited to 'tests/cloud_tests') diff --git a/cloudinit/analyze/__main__.py b/cloudinit/analyze/__main__.py index 3ba5903f..f8613656 100644 --- a/cloudinit/analyze/__main__.py +++ b/cloudinit/analyze/__main__.py @@ -69,7 +69,7 @@ def analyze_blame(name, args): """ (infh, outfh) = configure_io(args) blame_format = ' %ds (%n)' - r = re.compile('(^\s+\d+\.\d+)', re.MULTILINE) + r = re.compile(r'(^\s+\d+\.\d+)', re.MULTILINE) for idx, record in enumerate(show.show_events(_get_events(infh), blame_format)): srecs = sorted(filter(r.match, record), reverse=True) diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 5b9cbca0..afaca464 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -121,7 +121,7 @@ and https protocols respectively. The ``proxy`` key also exists as an alias for All source entries in ``apt-sources`` that match regex in ``add_apt_repo_match`` will be added to the system using ``add-apt-repository``. If ``add_apt_repo_match`` is not specified, it defaults -to ``^[\w-]+:\w`` +to ``^[\\w-]+:\\w`` **Add source list entries:** diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 4da3a588..50b37470 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -74,7 +74,7 @@ def givecmdline(pid): if util.is_FreeBSD(): (output, _err) = util.subp(['procstat', '-c', str(pid)]) line = output.splitlines()[1] - m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line) + m = re.search(r'\d+ (\w|\.|-)+\s+(/\w.+)', line) return m.group(2) else: return util.load_file("/proc/%s/cmdline" % pid) diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index af08788c..27d2366c 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -203,8 +203,8 @@ LOG = logging.getLogger(__name__) COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*') HOST_PORT_RE = re.compile( r'^(?P[@]{0,2})' - '(([[](?P[^\]]*)[\]])|(?P[^:]*))' - '([:](?P[0-9]+))?$') + r'(([[](?P[^\]]*)[\]])|(?P[^:]*))' + r'([:](?P[0-9]+))?$') def reload_syslog(command=DEF_RELOAD, systemd=False): diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 754d3df6..099fac5c 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -110,7 +110,7 @@ class Distro(distros.Distro): if dev.startswith('lo'): return dev - n = re.search('\d+$', dev) + n = re.search(r'\d+$', dev) index = n.group(0) (out, err) = util.subp(['ifconfig', '-a']) @@ -118,7 +118,7 @@ class Distro(distros.Distro): if len(x.split()) > 0] bsddev = 'NOT_FOUND' for line in ifconfigoutput: - m = re.match('^\w+', line) + m = re.match(r'^\w+', line) if m: if m.group(0).startswith('lo'): continue @@ -128,7 +128,7 @@ class Distro(distros.Distro): break # Replace the index with the one we're after. - bsddev = re.sub('\d+$', index, bsddev) + bsddev = re.sub(r'\d+$', index, bsddev) LOG.debug("Using network interface %s", bsddev) return bsddev diff --git a/cloudinit/util.py b/cloudinit/util.py index acdc0d85..1717b529 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1446,7 +1446,7 @@ def get_config_logfiles(cfg): for fmt in get_output_cfg(cfg, None): if not fmt: continue - match = re.match('(?P\||>+)\s*(?P.*)', fmt) + match = re.match(r'(?P\||>+)\s*(?P.*)', fmt) if not match: continue target = match.group('target') @@ -2275,8 +2275,8 @@ def parse_mount(path): # the regex is a bit complex. to better understand this regex see: # https://regex101.com/r/2F6c1k/1 # https://regex101.com/r/T2en7a/1 - regex = r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) ' + \ - '(?=(?:type)[\s]+([\S]+)|\(([^,]*))' + regex = (r'^(/dev/[\S]+|.*zroot\S*?) on (/[\S]*) ' + r'(?=(?:type)[\s]+([\S]+)|\(([^,]*))') for line in mount_locs: m = re.search(regex, line) if not m: diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 7598d469..4fda8f91 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -235,7 +235,7 @@ class CloudTestCase(unittest.TestCase): 'found unexpected kvm availability-zone %s' % v1_data['availability-zone']) self.assertIsNotNone( - re.match('[\da-f]{8}(-[\da-f]{4}){3}-[\da-f]{12}', + re.match(r'[\da-f]{8}(-[\da-f]{4}){3}-[\da-f]{12}', v1_data['instance-id']), 'kvm instance-id is not a UUID: %s' % v1_data['instance-id']) self.assertIn('ubuntu', v1_data['local-hostname']) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 50101906..7cbd5538 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -800,7 +800,7 @@ class TestSubp(helpers.CiTestCase): os.chmod(noshebang, os.stat(noshebang).st_mode | stat.S_IEXEC) self.assertRaisesRegex(util.ProcessExecutionError, - 'Missing #! in script\?', + r'Missing #! in script\?', util.subp, (noshebang,)) def test_returns_none_if_no_capture(self): -- cgit v1.2.3 From 1081962eacf2814fea6f4fa3255c530de14e4a24 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 19 Apr 2018 21:30:08 -0600 Subject: pylint: pay attention to unused variable warnings. This enables warnings produced by pylint for unused variables (W0612), and fixes the existing errors. --- .pylintrc | 2 +- cloudinit/analyze/dump.py | 2 +- cloudinit/cmd/tests/test_main.py | 6 ++-- cloudinit/config/cc_apt_configure.py | 2 +- cloudinit/config/cc_emit_upstart.py | 2 +- cloudinit/config/cc_resizefs.py | 8 ++---- cloudinit/config/cc_rh_subscription.py | 18 ++++++------ cloudinit/config/cc_snap.py | 4 +-- cloudinit/config/cc_snappy.py | 4 +-- cloudinit/config/cc_ubuntu_advantage.py | 4 +-- cloudinit/config/schema.py | 4 +-- cloudinit/distros/freebsd.py | 2 +- cloudinit/distros/ubuntu.py | 2 +- cloudinit/net/__init__.py | 2 +- cloudinit/net/cmdline.py | 2 +- cloudinit/net/dhcp.py | 2 +- cloudinit/net/sysconfig.py | 2 +- cloudinit/reporting/events.py | 2 +- cloudinit/sources/DataSourceAliYun.py | 2 +- cloudinit/sources/DataSourceAzure.py | 33 +++++++++------------- cloudinit/sources/DataSourceMAAS.py | 2 +- cloudinit/sources/DataSourceOVF.py | 2 +- cloudinit/sources/DataSourceOpenStack.py | 4 +-- cloudinit/sources/helpers/digitalocean.py | 7 ++--- cloudinit/sources/helpers/openstack.py | 2 +- cloudinit/sources/helpers/vmware/imc/config_nic.py | 2 +- .../sources/helpers/vmware/imc/config_passwd.py | 4 +-- .../sources/helpers/vmware/imc/guestcust_util.py | 4 +-- cloudinit/sources/tests/test_init.py | 2 +- cloudinit/templater.py | 2 +- cloudinit/tests/helpers.py | 2 +- cloudinit/tests/test_util.py | 2 +- cloudinit/url_helper.py | 2 +- cloudinit/util.py | 2 +- tests/cloud_tests/bddeb.py | 2 +- tests/cloud_tests/collect.py | 3 +- tests/cloud_tests/platforms/instances.py | 2 +- tests/cloud_tests/platforms/lxd/instance.py | 10 +++---- tests/cloud_tests/setup_image.py | 11 ++++---- tests/cloud_tests/testcases/base.py | 2 +- .../testcases/examples/including_user_groups.py | 2 +- tests/cloud_tests/testcases/modules/user_groups.py | 2 +- tests/cloud_tests/util.py | 2 +- tests/unittests/test__init__.py | 2 +- tests/unittests/test_datasource/test_azure.py | 4 +-- tests/unittests/test_datasource/test_maas.py | 4 +-- tests/unittests/test_datasource/test_nocloud.py | 3 -- .../test_handler/test_handler_apt_source_v3.py | 2 +- tests/unittests/test_handler/test_handler_ntp.py | 2 +- tests/unittests/test_templating.py | 4 +-- tests/unittests/test_util.py | 6 ++-- 51 files changed, 95 insertions(+), 112 deletions(-) (limited to 'tests/cloud_tests') diff --git a/.pylintrc b/.pylintrc index 0bdfa59d..3bfa0c81 100644 --- a/.pylintrc +++ b/.pylintrc @@ -28,7 +28,7 @@ jobs=4 # W0703(broad-except) # W1401(anomalous-backslash-in-string) -disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0612, W0613, W0621, W0622, W0631, W0703, W1401 +disable=C, F, I, R, W0105, W0107, W0201, W0212, W0221, W0222, W0223, W0231, W0311, W0511, W0602, W0603, W0611, W0613, W0621, W0622, W0631, W0703, W1401 [REPORTS] diff --git a/cloudinit/analyze/dump.py b/cloudinit/analyze/dump.py index b071aa19..1f3060d0 100644 --- a/cloudinit/analyze/dump.py +++ b/cloudinit/analyze/dump.py @@ -112,7 +112,7 @@ def parse_ci_logline(line): return None event_description = stage_to_description[event_name] else: - (pymodloglvl, event_type, event_name) = eventstr.split()[0:3] + (_pymodloglvl, event_type, event_name) = eventstr.split()[0:3] event_description = eventstr.split(event_name)[1].strip() event = { diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py index dbe421c0..e2c54ae8 100644 --- a/cloudinit/cmd/tests/test_main.py +++ b/cloudinit/cmd/tests/test_main.py @@ -56,7 +56,7 @@ class TestMain(FilesystemMockingTestCase): cmdargs = myargs( debug=False, files=None, force=False, local=False, reporter=None, subcommand='init') - (item1, item2) = wrap_and_call( + (_item1, item2) = wrap_and_call( 'cloudinit.cmd.main', {'util.close_stdin': True, 'netinfo.debug_info': 'my net debug info', @@ -85,7 +85,7 @@ class TestMain(FilesystemMockingTestCase): cmdargs = myargs( debug=False, files=None, force=False, local=False, reporter=None, subcommand='init') - (item1, item2) = wrap_and_call( + (_item1, item2) = wrap_and_call( 'cloudinit.cmd.main', {'util.close_stdin': True, 'netinfo.debug_info': 'my net debug info', @@ -133,7 +133,7 @@ class TestMain(FilesystemMockingTestCase): self.assertEqual(main.LOG, log) self.assertIsNone(args) - (item1, item2) = wrap_and_call( + (_item1, item2) = wrap_and_call( 'cloudinit.cmd.main', {'util.close_stdin': True, 'netinfo.debug_info': 'my net debug info', diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index afaca464..e18944ec 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -378,7 +378,7 @@ def apply_debconf_selections(cfg, target=None): # get a complete list of packages listed in input pkgs_cfgd = set() - for key, content in selsets.items(): + for _key, content in selsets.items(): for line in content.splitlines(): if line.startswith("#"): continue diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py index 69dc2d5e..eb9fbe66 100644 --- a/cloudinit/config/cc_emit_upstart.py +++ b/cloudinit/config/cc_emit_upstart.py @@ -43,7 +43,7 @@ def is_upstart_system(): del myenv['UPSTART_SESSION'] check_cmd = ['initctl', 'version'] try: - (out, err) = util.subp(check_cmd, env=myenv) + (out, _err) = util.subp(check_cmd, env=myenv) return 'upstart' in out except util.ProcessExecutionError as e: LOG.debug("'%s' returned '%s', not using upstart", diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index 013e69b5..82f29e10 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -89,13 +89,11 @@ def _resize_zfs(mount_point, devpth): def _get_dumpfs_output(mount_point): - dumpfs_res, err = util.subp(['dumpfs', '-m', mount_point]) - return dumpfs_res + return util.subp(['dumpfs', '-m', mount_point])[0] def _get_gpart_output(part): - gpart_res, err = util.subp(['gpart', 'show', part]) - return gpart_res + return util.subp(['gpart', 'show', part])[0] def _can_skip_resize_ufs(mount_point, devpth): @@ -113,7 +111,7 @@ def _can_skip_resize_ufs(mount_point, devpth): if not line.startswith('#'): newfs_cmd = shlex.split(line) opt_value = 'O:Ua:s:b:d:e:f:g:h:i:jk:m:o:' - optlist, args = getopt.getopt(newfs_cmd[1:], opt_value) + optlist, _args = getopt.getopt(newfs_cmd[1:], opt_value) for o, a in optlist: if o == "-s": cur_fs_sz = int(a) diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 530808ce..1c679430 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -209,8 +209,7 @@ class SubscriptionManager(object): cmd.append("--serverurl={0}".format(self.server_hostname)) try: - return_out, return_err = self._sub_man_cli(cmd, - logstring_val=True) + return_out = self._sub_man_cli(cmd, logstring_val=True)[0] except util.ProcessExecutionError as e: if e.stdout == "": self.log_warn("Registration failed due " @@ -233,8 +232,7 @@ class SubscriptionManager(object): # Attempting to register the system only try: - return_out, return_err = self._sub_man_cli(cmd, - logstring_val=True) + return_out = self._sub_man_cli(cmd, logstring_val=True)[0] except util.ProcessExecutionError as e: if e.stdout == "": self.log_warn("Registration failed due " @@ -257,7 +255,7 @@ class SubscriptionManager(object): .format(self.servicelevel)] try: - return_out, return_err = self._sub_man_cli(cmd) + return_out = self._sub_man_cli(cmd)[0] except util.ProcessExecutionError as e: if e.stdout.rstrip() != '': for line in e.stdout.split("\n"): @@ -275,7 +273,7 @@ class SubscriptionManager(object): def _set_auto_attach(self): cmd = ['attach', '--auto'] try: - return_out, return_err = self._sub_man_cli(cmd) + return_out = self._sub_man_cli(cmd)[0] except util.ProcessExecutionError as e: self.log_warn("Auto-attach failed with: {0}".format(e)) return False @@ -294,12 +292,12 @@ class SubscriptionManager(object): # Get all available pools cmd = ['list', '--available', '--pool-only'] - results, errors = self._sub_man_cli(cmd) + results = self._sub_man_cli(cmd)[0] available = (results.rstrip()).split("\n") # Get all consumed pools cmd = ['list', '--consumed', '--pool-only'] - results, errors = self._sub_man_cli(cmd) + results = self._sub_man_cli(cmd)[0] consumed = (results.rstrip()).split("\n") return available, consumed @@ -311,14 +309,14 @@ class SubscriptionManager(object): ''' cmd = ['repos', '--list-enabled'] - return_out, return_err = self._sub_man_cli(cmd) + return_out = self._sub_man_cli(cmd)[0] active_repos = [] for repo in return_out.split("\n"): if "Repo ID:" in repo: active_repos.append((repo.split(':')[1]).strip()) cmd = ['repos', '--list-disabled'] - return_out, return_err = self._sub_man_cli(cmd) + return_out = self._sub_man_cli(cmd)[0] inactive_repos = [] for repo in return_out.split("\n"): diff --git a/cloudinit/config/cc_snap.py b/cloudinit/config/cc_snap.py index a7a03214..90724b81 100644 --- a/cloudinit/config/cc_snap.py +++ b/cloudinit/config/cc_snap.py @@ -203,12 +203,12 @@ def maybe_install_squashfuse(cloud): return try: cloud.distro.update_package_sources() - except Exception as e: + except Exception: util.logexc(LOG, "Package update failed") raise try: cloud.distro.install_packages(['squashfuse']) - except Exception as e: + except Exception: util.logexc(LOG, "Failed to install squashfuse") raise diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py index bab80bbe..15bee2d3 100644 --- a/cloudinit/config/cc_snappy.py +++ b/cloudinit/config/cc_snappy.py @@ -213,7 +213,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None): def read_installed_packages(): ret = [] - for (name, date, version, dev) in read_pkg_data(): + for (name, _date, _version, dev) in read_pkg_data(): if dev: ret.append(NAMESPACE_DELIM.join([name, dev])) else: @@ -222,7 +222,7 @@ def read_installed_packages(): def read_pkg_data(): - out, err = util.subp([SNAPPY_CMD, "list"]) + out, _err = util.subp([SNAPPY_CMD, "list"]) pkg_data = [] for line in out.splitlines()[1:]: toks = line.split(sep=None, maxsplit=3) diff --git a/cloudinit/config/cc_ubuntu_advantage.py b/cloudinit/config/cc_ubuntu_advantage.py index 29d18c96..5e082bd6 100644 --- a/cloudinit/config/cc_ubuntu_advantage.py +++ b/cloudinit/config/cc_ubuntu_advantage.py @@ -148,12 +148,12 @@ def maybe_install_ua_tools(cloud): return try: cloud.distro.update_package_sources() - except Exception as e: + except Exception: util.logexc(LOG, "Package update failed") raise try: cloud.distro.install_packages(['ubuntu-advantage-tools']) - except Exception as e: + except Exception: util.logexc(LOG, "Failed to install ubuntu-advantage-tools") raise diff --git a/cloudinit/config/schema.py b/cloudinit/config/schema.py index ca7d0d5b..76826e05 100644 --- a/cloudinit/config/schema.py +++ b/cloudinit/config/schema.py @@ -297,8 +297,8 @@ def get_schema(): configs_dir = os.path.dirname(os.path.abspath(__file__)) potential_handlers = find_modules(configs_dir) - for (fname, mod_name) in potential_handlers.items(): - mod_locs, looked_locs = importer.find_module( + for (_fname, mod_name) in potential_handlers.items(): + mod_locs, _looked_locs = importer.find_module( mod_name, ['cloudinit.config'], ['schema']) if mod_locs: mod = importer.import_module(mod_locs[0]) diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 099fac5c..5b1718a4 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -113,7 +113,7 @@ class Distro(distros.Distro): n = re.search(r'\d+$', dev) index = n.group(0) - (out, err) = util.subp(['ifconfig', '-a']) + (out, _err) = util.subp(['ifconfig', '-a']) ifconfigoutput = [x for x in (out.strip()).splitlines() if len(x.split()) > 0] bsddev = 'NOT_FOUND' diff --git a/cloudinit/distros/ubuntu.py b/cloudinit/distros/ubuntu.py index fdc1f622..68154104 100644 --- a/cloudinit/distros/ubuntu.py +++ b/cloudinit/distros/ubuntu.py @@ -25,7 +25,7 @@ class Distro(debian.Distro): def preferred_ntp_clients(self): """The preferred ntp client is dependent on the version.""" if not self._preferred_ntp_clients: - (name, version, codename) = util.system_info()['dist'] + (_name, _version, codename) = util.system_info()['dist'] # Xenial cloud-init only installed ntp, UbuntuCore has timesyncd. if codename == "xenial" and not util.system_is_snappy(): self._preferred_ntp_clients = ['ntp'] diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index f69c0ef2..80054546 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -295,7 +295,7 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): def _version_2(netcfg): renames = [] - for key, ent in netcfg.get('ethernets', {}).items(): + for ent in netcfg.get('ethernets', {}).values(): # only rename if configured to do so name = ent.get('set-name') if not name: diff --git a/cloudinit/net/cmdline.py b/cloudinit/net/cmdline.py index 9e9fe0fe..f89a0f73 100755 --- a/cloudinit/net/cmdline.py +++ b/cloudinit/net/cmdline.py @@ -65,7 +65,7 @@ def _klibc_to_config_entry(content, mac_addrs=None): iface['mac_address'] = mac_addrs[name] # Handle both IPv4 and IPv6 values - for v, pre in (('ipv4', 'IPV4'), ('ipv6', 'IPV6')): + for pre in ('IPV4', 'IPV6'): # if no IPV4ADDR or IPV6ADDR, then go on. if pre + "ADDR" not in data: continue diff --git a/cloudinit/net/dhcp.py b/cloudinit/net/dhcp.py index 087c0c03..12cf5097 100644 --- a/cloudinit/net/dhcp.py +++ b/cloudinit/net/dhcp.py @@ -216,7 +216,7 @@ def networkd_get_option_from_leases(keyname, leases_d=None): if leases_d is None: leases_d = NETWORKD_LEASES_DIR leases = networkd_load_leases(leases_d=leases_d) - for ifindex, data in sorted(leases.items()): + for _ifindex, data in sorted(leases.items()): if data.get(keyname): return data[keyname] return None diff --git a/cloudinit/net/sysconfig.py b/cloudinit/net/sysconfig.py index 39d89c46..7a7f5093 100644 --- a/cloudinit/net/sysconfig.py +++ b/cloudinit/net/sysconfig.py @@ -364,7 +364,7 @@ class Renderer(renderer.Renderer): @classmethod def _render_subnet_routes(cls, iface_cfg, route_cfg, subnets): - for i, subnet in enumerate(subnets, start=len(iface_cfg.children)): + for _, subnet in enumerate(subnets, start=len(iface_cfg.children)): for route in subnet.get('routes', []): is_ipv6 = subnet.get('ipv6') or is_ipv6_addr(route['gateway']) diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py index 4f62d2f9..e5dfab33 100644 --- a/cloudinit/reporting/events.py +++ b/cloudinit/reporting/events.py @@ -192,7 +192,7 @@ class ReportEventStack(object): def _childrens_finish_info(self): for cand_result in (status.FAIL, status.WARN): - for name, (value, msg) in self.children.items(): + for _name, (value, _msg) in self.children.items(): if value == cand_result: return (value, self.message) return (self.result, self.message) diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 22279d09..858e0827 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -45,7 +45,7 @@ def _is_aliyun(): def parse_public_keys(public_keys): keys = [] - for key_id, key_body in public_keys.items(): + for _key_id, key_body in public_keys.items(): if isinstance(key_body, str): keys.append(key_body.strip()) elif isinstance(key_body, list): diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 0ee622e2..a71197a6 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -107,31 +107,24 @@ def find_dev_from_busdev(camcontrol_out, busdev): return None -def get_dev_storvsc_sysctl(): +def execute_or_debug(cmd, fail_ret=None): try: - sysctl_out, err = util.subp(['sysctl', 'dev.storvsc']) + return util.subp(cmd)[0] except util.ProcessExecutionError: - LOG.debug("Fail to execute sysctl dev.storvsc") - sysctl_out = "" - return sysctl_out + LOG.debug("Failed to execute: %s", ' '.join(cmd)) + return fail_ret + + +def get_dev_storvsc_sysctl(): + return execute_or_debug(["sysctl", "dev.storvsc"], fail_ret="") def get_camcontrol_dev_bus(): - try: - camcontrol_b_out, err = util.subp(['camcontrol', 'devlist', '-b']) - except util.ProcessExecutionError: - LOG.debug("Fail to execute camcontrol devlist -b") - return None - return camcontrol_b_out + return execute_or_debug(['camcontrol', 'devlist', '-b']) def get_camcontrol_dev(): - try: - camcontrol_out, err = util.subp(['camcontrol', 'devlist']) - except util.ProcessExecutionError: - LOG.debug("Fail to execute camcontrol devlist") - return None - return camcontrol_out + return execute_or_debug(['camcontrol', 'devlist']) def get_resource_disk_on_freebsd(port_id): @@ -474,7 +467,7 @@ class DataSourceAzure(sources.DataSource): before we go into our polling loop.""" try: get_metadata_from_fabric(None, lease['unknown-245']) - except Exception as exc: + except Exception: LOG.warning( "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) @@ -492,7 +485,7 @@ class DataSourceAzure(sources.DataSource): jump back into the polling loop in order to retrieve the ovf_env.""" if not ret: return False - (md, self.userdata_raw, cfg, files) = ret + (_md, self.userdata_raw, cfg, _files) = ret path = REPROVISION_MARKER_FILE if (cfg.get('PreprovisionedVm') is True or os.path.isfile(path)): @@ -528,7 +521,7 @@ class DataSourceAzure(sources.DataSource): self.ds_cfg['agent_command']) try: fabric_data = metadata_func() - except Exception as exc: + except Exception: LOG.warning( "Error communicating with Azure fabric; You may experience." "connectivity issues.", exc_info=True) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 6ac88635..aa56addb 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -204,7 +204,7 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, seed_url = seed_url[:-1] md = {} - for path, dictname, binary, optional in DS_FIELDS: + for path, _dictname, binary, optional in DS_FIELDS: if version is None: url = "%s/%s" % (seed_url, path) else: diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index dc914a72..178ccb0f 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -556,7 +556,7 @@ def search_file(dirpath, filename): if not dirpath or not filename: return None - for root, dirs, files in os.walk(dirpath): + for root, _dirs, files in os.walk(dirpath): if filename in files: return os.path.join(root, filename) diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index e55a7638..fb166ae1 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -86,7 +86,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): md_urls.append(md_url) url2base[md_url] = url - (max_wait, timeout, retries) = self._get_url_settings() + (max_wait, timeout, _retries) = self._get_url_settings() start_time = time.time() avail_url = url_helper.wait_for_url(urls=md_urls, max_wait=max_wait, timeout=timeout) @@ -106,7 +106,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): except IOError: return False - (max_wait, timeout, retries) = self._get_url_settings() + (_max_wait, timeout, retries) = self._get_url_settings() try: results = util.log_time(LOG.debug, diff --git a/cloudinit/sources/helpers/digitalocean.py b/cloudinit/sources/helpers/digitalocean.py index 693f8d5c..0e7cccac 100644 --- a/cloudinit/sources/helpers/digitalocean.py +++ b/cloudinit/sources/helpers/digitalocean.py @@ -41,10 +41,9 @@ def assign_ipv4_link_local(nic=None): "address") try: - (result, _err) = util.subp(ip_addr_cmd) + util.subp(ip_addr_cmd) LOG.debug("assigned ip4LL address '%s' to '%s'", addr, nic) - - (result, _err) = util.subp(ip_link_cmd) + util.subp(ip_link_cmd) LOG.debug("brought device '%s' up", nic) except Exception: util.logexc(LOG, "ip4LL address assignment of '%s' to '%s' failed." @@ -75,7 +74,7 @@ def del_ipv4_link_local(nic=None): ip_addr_cmd = ['ip', 'addr', 'flush', 'dev', nic] try: - (result, _err) = util.subp(ip_addr_cmd) + util.subp(ip_addr_cmd) LOG.debug("removed ip4LL addresses from %s", nic) except Exception as e: diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 26f3168d..a4cf0667 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -638,7 +638,7 @@ def convert_net_json(network_json=None, known_macs=None): known_macs = net.get_interfaces_by_mac() # go through and fill out the link_id_info with names - for link_id, info in link_id_info.items(): + for _link_id, info in link_id_info.items(): if info.get('name'): continue if info.get('mac') in known_macs: diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index 2d8900e2..3ef8c624 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -73,7 +73,7 @@ class NicConfigurator(object): The mac address(es) are in the lower case """ cmd = ['ip', 'addr', 'show'] - (output, err) = util.subp(cmd) + output, _err = util.subp(cmd) sections = re.split(r'\n\d+: ', '\n' + output)[1:] macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))' diff --git a/cloudinit/sources/helpers/vmware/imc/config_passwd.py b/cloudinit/sources/helpers/vmware/imc/config_passwd.py index 75cfbaaf..8c91fa41 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_passwd.py +++ b/cloudinit/sources/helpers/vmware/imc/config_passwd.py @@ -56,10 +56,10 @@ class PasswordConfigurator(object): LOG.info('Expiring password.') for user in uidUserList: try: - out, err = util.subp(['passwd', '--expire', user]) + util.subp(['passwd', '--expire', user]) except util.ProcessExecutionError as e: if os.path.exists('/usr/bin/chage'): - out, e = util.subp(['chage', '-d', '0', user]) + util.subp(['chage', '-d', '0', user]) else: LOG.warning('Failed to expire password for %s with error: ' '%s', user, e) diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index 44075255..a590f323 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -91,7 +91,7 @@ def enable_nics(nics): for attempt in range(0, enableNicsWaitRetries): logger.debug("Trying to connect interfaces, attempt %d", attempt) - (out, err) = set_customization_status( + (out, _err) = set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS, nics) @@ -104,7 +104,7 @@ def enable_nics(nics): return for count in range(0, enableNicsWaitCount): - (out, err) = set_customization_status( + (out, _err) = set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS, nics) diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index e7fda22a..452e9219 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -278,7 +278,7 @@ class TestDataSource(CiTestCase): base_args = get_args(DataSource.get_hostname) # pylint: disable=W1505 # Import all DataSource subclasses so we can inspect them. modules = util.find_modules(os.path.dirname(os.path.dirname(__file__))) - for loc, name in modules.items(): + for _loc, name in modules.items(): mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], []) if mod_locs: importer.import_module(mod_locs[0]) diff --git a/cloudinit/templater.py b/cloudinit/templater.py index 9a087e1c..7e7acb86 100644 --- a/cloudinit/templater.py +++ b/cloudinit/templater.py @@ -147,7 +147,7 @@ def render_string(content, params): Warning: py2 str with non-ascii chars will cause UnicodeDecodeError.""" if not params: params = {} - template_type, renderer, content = detect_template(content) + _template_type, renderer, content = detect_template(content) return renderer(content, params) # vi: ts=4 expandtab diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 82fd347b..5aada6e7 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -334,7 +334,7 @@ def dir2dict(startdir, prefix=None): flist = {} if prefix is None: prefix = startdir - for root, dirs, files in os.walk(startdir): + for root, _dirs, files in os.walk(startdir): for fname in files: fpath = os.path.join(root, fname) key = fpath[len(prefix):] diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index 3f37dbb6..76eed076 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -135,7 +135,7 @@ class TestGetHostnameFqdn(CiTestCase): def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): """Calls to cloud.get_hostname pass the metadata_only parameter.""" mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') - hostname, fqdn = util.get_hostname_fqdn( + _hn, _fqdn = util.get_hostname_fqdn( cfg={}, cloud=mycloud, metadata_only=True) self.assertEqual( [{'fqdn': True, 'metadata_only': True}, diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 03a573af..1de07b1c 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -519,7 +519,7 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, resource_owner_secret=token_secret, signature_method=oauth1.SIGNATURE_PLAINTEXT, timestamp=timestamp) - uri, signed_headers, body = client.sign(url) + _uri, signed_headers, _body = client.sign(url) return signed_headers # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 1717b529..310758dd 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2214,7 +2214,7 @@ def parse_mtab(path): def find_freebsd_part(label_part): if label_part.startswith("/dev/label/"): target_label = label_part[5:] - (label_part, err) = subp(['glabel', 'status', '-s']) + (label_part, _err) = subp(['glabel', 'status', '-s']) for labels in label_part.split("\n"): items = labels.split() if len(items) > 0 and items[0].startswith(target_label): diff --git a/tests/cloud_tests/bddeb.py b/tests/cloud_tests/bddeb.py index b9cfcfa6..f04d0cd4 100644 --- a/tests/cloud_tests/bddeb.py +++ b/tests/cloud_tests/bddeb.py @@ -113,7 +113,7 @@ def bddeb(args): @return_value: fail count """ LOG.info('preparing to build cloud-init deb') - (res, failed) = run_stage('build deb', [partial(setup_build, args)]) + _res, failed = run_stage('build deb', [partial(setup_build, args)]) return failed # vi: ts=4 expandtab diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index d4f9135b..1ba72856 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -25,7 +25,8 @@ def collect_script(instance, base_dir, script, script_name): script.encode(), rcs=False, description='collect: {}'.format(script_name)) if err: - LOG.debug("collect script %s had stderr: %s", script_name, err) + LOG.debug("collect script %s exited '%s' and had stderr: %s", + script_name, err, exit) if not isinstance(out, bytes): raise util.PlatformError( "Collection of '%s' returned type %s, expected bytes: %s" % diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py index 3bad021f..cc439d29 100644 --- a/tests/cloud_tests/platforms/instances.py +++ b/tests/cloud_tests/platforms/instances.py @@ -108,7 +108,7 @@ class Instance(TargetBase): return client except (ConnectionRefusedError, AuthenticationException, BadHostKeyException, ConnectionResetError, SSHException, - OSError) as e: + OSError): retries -= 1 time.sleep(10) diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py index 0d957bca..1c17c781 100644 --- a/tests/cloud_tests/platforms/lxd/instance.py +++ b/tests/cloud_tests/platforms/lxd/instance.py @@ -152,9 +152,8 @@ class LXDInstance(Instance): return fp.read() try: - stdout, stderr = subp( - ['lxc', 'console', '--show-log', self.name], decode=False) - return stdout + return subp(['lxc', 'console', '--show-log', self.name], + decode=False)[0] except ProcessExecutionError as e: raise PlatformError( "console log", @@ -214,11 +213,10 @@ def _has_proper_console_support(): reason = "LXD Driver version not 3.x+ (%s)" % dver else: try: - stdout, stderr = subp(['lxc', 'console', '--help'], - decode=False) + stdout = subp(['lxc', 'console', '--help'], decode=False)[0] if not (b'console' in stdout and b'log' in stdout): reason = "no '--log' in lxc console --help" - except ProcessExecutionError as e: + except ProcessExecutionError: reason = "no 'console' command in lxc client" if reason: diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py index 6d242115..4e195709 100644 --- a/tests/cloud_tests/setup_image.py +++ b/tests/cloud_tests/setup_image.py @@ -25,10 +25,9 @@ def installed_package_version(image, package, ensure_installed=True): else: raise NotImplementedError - msg = 'query version for package: {}'.format(package) - (out, err, exit) = image.execute( - cmd, description=msg, rcs=(0,) if ensure_installed else range(0, 256)) - return out.strip() + return image.execute( + cmd, description='query version for package: {}'.format(package), + rcs=(0,) if ensure_installed else range(0, 256))[0].strip() def install_deb(args, image): @@ -54,7 +53,7 @@ def install_deb(args, image): remote_path], description=msg) # check installed deb version matches package fmt = ['-W', "--showformat=${Version}"] - (out, err, exit) = image.execute(['dpkg-deb'] + fmt + [remote_path]) + out = image.execute(['dpkg-deb'] + fmt + [remote_path])[0] expected_version = out.strip() found_version = installed_package_version(image, 'cloud-init') if expected_version != found_version: @@ -85,7 +84,7 @@ def install_rpm(args, image): image.execute(['rpm', '-U', remote_path], description=msg) fmt = ['--queryformat', '"%{VERSION}"'] - (out, err, exit) = image.execute(['rpm', '-q'] + fmt + [remote_path]) + (out, _err, _exit) = image.execute(['rpm', '-q'] + fmt + [remote_path]) expected_version = out.strip() found_version = installed_package_version(image, 'cloud-init') if expected_version != found_version: diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 4fda8f91..0d1916b4 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -159,7 +159,7 @@ class CloudTestCase(unittest.TestCase): expected_net_keys = [ 'public-ipv4s', 'ipv4-associations', 'local-hostname', 'public-hostname'] - for mac, mac_data in macs.items(): + for mac_data in macs.values(): for key in expected_net_keys: self.assertIn(key, mac_data) self.assertIsNotNone( diff --git a/tests/cloud_tests/testcases/examples/including_user_groups.py b/tests/cloud_tests/testcases/examples/including_user_groups.py index 93b7a82d..4067348d 100644 --- a/tests/cloud_tests/testcases/examples/including_user_groups.py +++ b/tests/cloud_tests/testcases/examples/including_user_groups.py @@ -42,7 +42,7 @@ class TestUserGroups(base.CloudTestCase): def test_user_root_in_secret(self): """Test root user is in 'secret' group.""" - user, _, groups = self.get_data_file('root_groups').partition(":") + _user, _, groups = self.get_data_file('root_groups').partition(":") self.assertIn("secret", groups.split(), msg="User root is not in group 'secret'") diff --git a/tests/cloud_tests/testcases/modules/user_groups.py b/tests/cloud_tests/testcases/modules/user_groups.py index 93b7a82d..4067348d 100644 --- a/tests/cloud_tests/testcases/modules/user_groups.py +++ b/tests/cloud_tests/testcases/modules/user_groups.py @@ -42,7 +42,7 @@ class TestUserGroups(base.CloudTestCase): def test_user_root_in_secret(self): """Test root user is in 'secret' group.""" - user, _, groups = self.get_data_file('root_groups').partition(":") + _user, _, groups = self.get_data_file('root_groups').partition(":") self.assertIn("secret", groups.split(), msg="User root is not in group 'secret'") diff --git a/tests/cloud_tests/util.py b/tests/cloud_tests/util.py index 3dd4996d..06f7d865 100644 --- a/tests/cloud_tests/util.py +++ b/tests/cloud_tests/util.py @@ -358,7 +358,7 @@ class TargetBase(object): # when sh is invoked with '-c', then the first argument is "$0" # which is commonly understood as the "program name". # 'read_data' is the program name, and 'remote_path' is '$1' - stdout, stderr, rc = self._execute( + stdout, _stderr, rc = self._execute( ["sh", "-c", 'exec cat "$1"', 'read_data', remote_path]) if rc != 0: raise RuntimeError("Failed to read file '%s'" % remote_path) diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py index 25878d7a..f1ab02e9 100644 --- a/tests/unittests/test__init__.py +++ b/tests/unittests/test__init__.py @@ -214,7 +214,7 @@ class TestCmdlineUrl(CiTestCase): def test_no_key_found(self, m_read): cmdline = "ro mykey=http://example.com/foo root=foo" fpath = self.tmp_path("ccpath") - lvl, msg = main.attempt_cmdline_url( + lvl, _msg = main.attempt_cmdline_url( fpath, network=True, cmdline=cmdline) m_read.assert_not_called() diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 3e8b7913..88fe76c7 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -214,7 +214,7 @@ scbus-1 on xpt0 bus 0 self.assertIn(tag, x) def tags_equal(x, y): - for x_tag, x_val in x.items(): + for x_val in x.values(): y_val = y.get(x_val.tag) self.assertEqual(x_val.text, y_val.text) @@ -1216,7 +1216,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): fake_resp.return_value = mock.MagicMock(status_code=200, text=content, content=content) dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) - md, ud, cfg, d = dsa._reprovision() + md, _ud, cfg, _d = dsa._reprovision() self.assertEqual(md['local-hostname'], hostname) self.assertEqual(cfg['system_info']['default_user']['name'], username) self.assertEqual(fake_resp.call_args_list, diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index 6e4031cf..c84d067e 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -53,7 +53,7 @@ class TestMAASDataSource(CiTestCase): my_d = os.path.join(self.tmp, "valid_extra") populate_dir(my_d, data) - ud, md, vd = DataSourceMAAS.read_maas_seed_dir(my_d) + ud, md, _vd = DataSourceMAAS.read_maas_seed_dir(my_d) self.assertEqual(userdata, ud) for key in ('instance-id', 'local-hostname'): @@ -149,7 +149,7 @@ class TestMAASDataSource(CiTestCase): 'meta-data/local-hostname': 'test-hostname', 'meta-data/vendor-data': yaml.safe_dump(expected_vd).encode(), } - ud, md, vd = self.mock_read_maas_seed_url( + _ud, md, vd = self.mock_read_maas_seed_url( valid, "http://example.com/foo") self.assertEqual(valid['meta-data/instance-id'], md['instance-id']) diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 70d50de4..cdbd1e1a 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -51,9 +51,6 @@ class TestNoCloudDataSource(CiTestCase): class PsuedoException(Exception): pass - def my_find_devs_with(*args, **kwargs): - raise PsuedoException - self.mocks.enter_context( mock.patch.object(util, 'find_devs_with', side_effect=PsuedoException)) diff --git a/tests/unittests/test_handler/test_handler_apt_source_v3.py b/tests/unittests/test_handler/test_handler_apt_source_v3.py index 7bb1b7c4..e486862d 100644 --- a/tests/unittests/test_handler/test_handler_apt_source_v3.py +++ b/tests/unittests/test_handler/test_handler_apt_source_v3.py @@ -528,7 +528,7 @@ class TestAptSourceConfig(t_help.FilesystemMockingTestCase): expected = sorted([npre + suff for opre, npre, suff in files]) # create files - for (opre, npre, suff) in files: + for (opre, _npre, suff) in files: fpath = os.path.join(apt_lists_d, opre + suff) util.write_file(fpath, content=fpath) diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index 02676aa6..17c53559 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -76,7 +76,7 @@ class TestNtp(FilesystemMockingTestCase): template = TIMESYNCD_TEMPLATE else: template = NTP_TEMPLATE - (confpath, template_fn) = self._generate_template(template=template) + (confpath, _template_fn) = self._generate_template(template=template) ntpconfig = copy.deepcopy(dcfg[client]) ntpconfig['confpath'] = confpath ntpconfig['template_name'] = os.path.basename(confpath) diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index 1080e135..20c87efa 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -50,12 +50,12 @@ class TestTemplates(test_helpers.CiTestCase): def test_detection(self): blob = "## template:cheetah" - (template_type, renderer, contents) = templater.detect_template(blob) + (template_type, _renderer, contents) = templater.detect_template(blob) self.assertIn("cheetah", template_type) self.assertEqual("", contents.strip()) blob = "blahblah $blah" - (template_type, renderer, contents) = templater.detect_template(blob) + (template_type, _renderer, _contents) = templater.detect_template(blob) self.assertIn("cheetah", template_type) self.assertEqual(blob, contents) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index e04ea031..84941c7d 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -774,11 +774,11 @@ class TestSubp(helpers.CiTestCase): def test_subp_reads_env(self): with mock.patch.dict("os.environ", values={'FOO': 'BAR'}): - out, err = util.subp(self.printenv + ['FOO'], capture=True) + out, _err = util.subp(self.printenv + ['FOO'], capture=True) self.assertEqual('FOO=BAR', out.splitlines()[0]) def test_subp_env_and_update_env(self): - out, err = util.subp( + out, _err = util.subp( self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, env={'FOO': 'BAR'}, update_env={'HOME': '/myhome', 'K2': 'V2'}) @@ -788,7 +788,7 @@ class TestSubp(helpers.CiTestCase): def test_subp_update_env(self): extra = {'FOO': 'BAR', 'HOME': '/root', 'K1': 'V1'} with mock.patch.dict("os.environ", values=extra): - out, err = util.subp( + out, _err = util.subp( self.printenv + ['FOO', 'HOME', 'K1', 'K2'], capture=True, update_env={'HOME': '/myhome', 'K2': 'V2'}) -- cgit v1.2.3 From 323eb30940cae2069daf74517089220fccc4afb9 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 9 May 2018 09:36:56 -0600 Subject: tests: fix package and ca_cert cloud_tests on bionic package_update_upgrade_install was failing as htop is now included in Bionic images. Switch this test to install 'sl' instead. ca_certs integration test fails on cert_count test because bionic update-ca-certificates on bionic generates less symlinks for a given cert. Integration tests now collect dpkg-query --show output on every instance. Add a new assertPackageInstalled helper method which finds the package or package version installed on the instance. Adapt existing byobu, package_update_upgrade_install, ntp and salt_minion tests to use assertPackageInstalled method. LP: #1769985 --- tests/cloud_tests/testcases.yaml | 4 ++-- tests/cloud_tests/testcases/base.py | 21 +++++++++++++++++++++ tests/cloud_tests/testcases/modules/byobu.py | 3 +-- tests/cloud_tests/testcases/modules/byobu.yaml | 3 --- tests/cloud_tests/testcases/modules/ca_certs.py | 21 +++++++++++++++++---- tests/cloud_tests/testcases/modules/ca_certs.yaml | 8 ++++++-- tests/cloud_tests/testcases/modules/ntp.py | 5 ++--- .../modules/package_update_upgrade_install.py | 14 ++++++-------- .../modules/package_update_upgrade_install.yaml | 9 +++------ tests/cloud_tests/testcases/modules/salt_minion.py | 3 +-- .../cloud_tests/testcases/modules/salt_minion.yaml | 3 --- 11 files changed, 59 insertions(+), 35 deletions(-) (limited to 'tests/cloud_tests') diff --git a/tests/cloud_tests/testcases.yaml b/tests/cloud_tests/testcases.yaml index a3e29900..a16d1ddf 100644 --- a/tests/cloud_tests/testcases.yaml +++ b/tests/cloud_tests/testcases.yaml @@ -24,9 +24,9 @@ base_test_data: status.json: | #!/bin/sh cat /run/cloud-init/status.json - cloud-init-version: | + package-versions: | #!/bin/sh - dpkg-query -W -f='${Version}' cloud-init + dpkg-query --show system.journal.gz: | #!/bin/sh [ -d /run/systemd ] || { echo "not systemd."; exit 0; } diff --git a/tests/cloud_tests/testcases/base.py b/tests/cloud_tests/testcases/base.py index 0d1916b4..696db8dd 100644 --- a/tests/cloud_tests/testcases/base.py +++ b/tests/cloud_tests/testcases/base.py @@ -31,6 +31,27 @@ class CloudTestCase(unittest.TestCase): def is_distro(self, distro_name): return self.os_cfg['os'] == distro_name + def assertPackageInstalled(self, name, version=None): + """Check dpkg-query --show output for matching package name. + + @param name: package base name + @param version: string representing a package version or part of a + version. + """ + pkg_out = self.get_data_file('package-versions') + pkg_match = re.search( + '^%s\t(?P.*)$' % name, pkg_out, re.MULTILINE) + if pkg_match: + installed_version = pkg_match.group('version') + if not version: + return # Success + if installed_version.startswith(version): + return # Success + raise AssertionError( + 'Expected package version %s-%s not found. Found %s' % + name, version, installed_version) + raise AssertionError('Package not installed: %s' % name) + def os_version_cmp(self, cmp_version): """Compare the version of the test to comparison_version. diff --git a/tests/cloud_tests/testcases/modules/byobu.py b/tests/cloud_tests/testcases/modules/byobu.py index 005ca014..74d0529a 100644 --- a/tests/cloud_tests/testcases/modules/byobu.py +++ b/tests/cloud_tests/testcases/modules/byobu.py @@ -9,8 +9,7 @@ class TestByobu(base.CloudTestCase): def test_byobu_installed(self): """Test byobu installed.""" - out = self.get_data_file('byobu_installed') - self.assertIn('/usr/bin/byobu', out) + self.assertPackageInstalled('byobu') def test_byobu_profile_enabled(self): """Test byobu profile.d file exists.""" diff --git a/tests/cloud_tests/testcases/modules/byobu.yaml b/tests/cloud_tests/testcases/modules/byobu.yaml index a9aa1f3f..d002a611 100644 --- a/tests/cloud_tests/testcases/modules/byobu.yaml +++ b/tests/cloud_tests/testcases/modules/byobu.yaml @@ -7,9 +7,6 @@ cloud_config: | #cloud-config byobu_by_default: enable collect_scripts: - byobu_installed: | - #!/bin/bash - which byobu byobu_profile_enabled: | #!/bin/bash ls /etc/profile.d/Z97-byobu.sh diff --git a/tests/cloud_tests/testcases/modules/ca_certs.py b/tests/cloud_tests/testcases/modules/ca_certs.py index e75f0413..6b56f639 100644 --- a/tests/cloud_tests/testcases/modules/ca_certs.py +++ b/tests/cloud_tests/testcases/modules/ca_certs.py @@ -7,10 +7,23 @@ from tests.cloud_tests.testcases import base class TestCaCerts(base.CloudTestCase): """Test ca certs module.""" - def test_cert_count(self): - """Test the count is proper.""" - out = self.get_data_file('cert_count') - self.assertEqual(5, int(out)) + def test_certs_updated(self): + """Test certs have been updated in /etc/ssl/certs.""" + out = self.get_data_file('cert_links') + # Bionic update-ca-certificates creates less links debian #895075 + unlinked_files = [] + links = {} + for cert_line in out.splitlines(): + if '->' in cert_line: + fname, _sep, link = cert_line.split() + links[fname] = link + else: + unlinked_files.append(cert_line) + self.assertEqual(['ca-certificates.crt'], unlinked_files) + self.assertEqual('cloud-init-ca-certs.pem', links['a535c1f3.0']) + self.assertEqual( + '/usr/share/ca-certificates/cloud-init-ca-certs.crt', + links['cloud-init-ca-certs.pem']) def test_cert_installed(self): """Test line from our cert exists.""" diff --git a/tests/cloud_tests/testcases/modules/ca_certs.yaml b/tests/cloud_tests/testcases/modules/ca_certs.yaml index d939f435..2cd91551 100644 --- a/tests/cloud_tests/testcases/modules/ca_certs.yaml +++ b/tests/cloud_tests/testcases/modules/ca_certs.yaml @@ -43,9 +43,13 @@ cloud_config: | DiH5uEqBXExjrj0FslxcVKdVj5glVcSmkLwZKbEU1OKwleT/iXFhvooWhQ== -----END CERTIFICATE----- collect_scripts: - cert_count: | + cert_links: | #!/bin/bash - ls -l /etc/ssl/certs | wc -l + # links printed -> + # non-links printed + for file in `ls /etc/ssl/certs`; do + [ -h /etc/ssl/certs/$file ] && echo -n $file ' -> ' && readlink /etc/ssl/certs/$file || echo $file; + done cert: | #!/bin/bash md5sum /etc/ssl/certs/ca-certificates.crt diff --git a/tests/cloud_tests/testcases/modules/ntp.py b/tests/cloud_tests/testcases/modules/ntp.py index b50e52fe..c63cc15e 100644 --- a/tests/cloud_tests/testcases/modules/ntp.py +++ b/tests/cloud_tests/testcases/modules/ntp.py @@ -9,15 +9,14 @@ class TestNtp(base.CloudTestCase): def test_ntp_installed(self): """Test ntp installed""" - out = self.get_data_file('ntp_installed') - self.assertEqual(0, int(out)) + self.assertPackageInstalled('ntp') def test_ntp_dist_entries(self): """Test dist config file is empty""" out = self.get_data_file('ntp_conf_dist_empty') self.assertEqual(0, int(out)) - def test_ntp_entires(self): + def test_ntp_entries(self): """Test config entries""" out = self.get_data_file('ntp_conf_pool_list') self.assertIn('pool.ntp.org iburst', out) diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py index a92dec22..fecad768 100644 --- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py +++ b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.py @@ -7,15 +7,13 @@ from tests.cloud_tests.testcases import base class TestPackageInstallUpdateUpgrade(base.CloudTestCase): """Test package install update upgrade module.""" - def test_installed_htop(self): - """Test htop got installed.""" - out = self.get_data_file('dpkg_htop') - self.assertEqual(1, int(out)) + def test_installed_sl(self): + """Test sl got installed.""" + self.assertPackageInstalled('sl') def test_installed_tree(self): """Test tree got installed.""" - out = self.get_data_file('dpkg_tree') - self.assertEqual(1, int(out)) + self.assertPackageInstalled('tree') def test_apt_history(self): """Test apt history for update command.""" @@ -23,13 +21,13 @@ class TestPackageInstallUpdateUpgrade(base.CloudTestCase): self.assertIn( 'Commandline: /usr/bin/apt-get --option=Dpkg::Options' '::=--force-confold --option=Dpkg::options::=--force-unsafe-io ' - '--assume-yes --quiet install htop tree', out) + '--assume-yes --quiet install sl tree', out) def test_cloud_init_output(self): """Test cloud-init-output for install & upgrade stuff.""" out = self.get_data_file('cloud-init-output.log') self.assertIn('Setting up tree (', out) - self.assertIn('Setting up htop (', out) + self.assertIn('Setting up sl (', out) self.assertIn('Reading package lists...', out) self.assertIn('Building dependency tree...', out) self.assertIn('Reading state information...', out) diff --git a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml index 71d24b83..dd79e438 100644 --- a/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml +++ b/tests/cloud_tests/testcases/modules/package_update_upgrade_install.yaml @@ -15,7 +15,7 @@ required_features: cloud_config: | #cloud-config packages: - - htop + - sl - tree package_update: true package_upgrade: true @@ -23,11 +23,8 @@ collect_scripts: apt_history_cmdline: | #!/bin/bash grep ^Commandline: /var/log/apt/history.log - dpkg_htop: | + dpkg_show: | #!/bin/bash - dpkg -l | grep htop | wc -l - dpkg_tree: | - #!/bin/bash - dpkg -l | grep tree | wc -l + dpkg-query --show # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/salt_minion.py b/tests/cloud_tests/testcases/modules/salt_minion.py index 70917a4c..fc9688ed 100644 --- a/tests/cloud_tests/testcases/modules/salt_minion.py +++ b/tests/cloud_tests/testcases/modules/salt_minion.py @@ -33,7 +33,6 @@ class Test(base.CloudTestCase): def test_minion_installed(self): """Test if the salt-minion package is installed""" - out = self.get_data_file('minion_installed') - self.assertEqual(1, int(out)) + self.assertPackageInstalled('salt-minion') # vi: ts=4 expandtab diff --git a/tests/cloud_tests/testcases/modules/salt_minion.yaml b/tests/cloud_tests/testcases/modules/salt_minion.yaml index f20b9765..c24aa177 100644 --- a/tests/cloud_tests/testcases/modules/salt_minion.yaml +++ b/tests/cloud_tests/testcases/modules/salt_minion.yaml @@ -35,8 +35,5 @@ collect_scripts: grains: | #!/bin/bash cat /etc/salt/grains - minion_installed: | - #!/bin/bash - dpkg -l | grep salt-minion | grep ii | wc -l # vi: ts=4 expandtab -- cgit v1.2.3 From 589b542bfb3b6630b931a506ca017635059cef1d Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Wed, 16 May 2018 08:16:10 -0600 Subject: tests: restructure SSH and initial connections The SSH function was retrying and waiting for SSH for over an hour when an SSH connection was failing to be established. This reduces the amount of retries and time between each retry to prevent tests from running for hours. Also restructures how waiting for the system works: the system will attempt to SSH up to the boot timeout time by catching SSH connection failures and retrying until the timeout is reached. If the limit is reached now an exception is thrown to abort the test. Drive by - this also fixes printing of the instance name when collecting the console log, rather than showing a Python object address. Fixes LP: #1758409 --- tests/cloud_tests/collect.py | 2 +- tests/cloud_tests/platforms/instances.py | 39 ++++++++++++++++++++++++-------- 2 files changed, 30 insertions(+), 11 deletions(-) (limited to 'tests/cloud_tests') diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index 1ba72856..78263bf5 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -42,7 +42,7 @@ def collect_console(instance, base_dir): @param base_dir: directory to write console log to """ logfile = os.path.join(base_dir, 'console.log') - LOG.debug('getting console log for %s to %s', instance, logfile) + LOG.debug('getting console log for %s to %s', instance.name, logfile) try: data = instance.console_log() except NotImplementedError as e: diff --git a/tests/cloud_tests/platforms/instances.py b/tests/cloud_tests/platforms/instances.py index cc439d29..95bc3b16 100644 --- a/tests/cloud_tests/platforms/instances.py +++ b/tests/cloud_tests/platforms/instances.py @@ -87,7 +87,12 @@ class Instance(TargetBase): self._ssh_client = None def _ssh_connect(self): - """Connect via SSH.""" + """Connect via SSH. + + Attempt to SSH to the client on the specific IP and port. If it + fails in some manner, then retry 2 more times for a total of 3 + attempts; sleeping a few seconds between attempts. + """ if self._ssh_client: return self._ssh_client @@ -98,21 +103,22 @@ class Instance(TargetBase): client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) private_key = paramiko.RSAKey.from_private_key_file(self.ssh_key_file) - retries = 30 + retries = 3 while retries: try: client.connect(username=self.ssh_username, hostname=self.ssh_ip, port=self.ssh_port, - pkey=private_key, banner_timeout=30) + pkey=private_key) self._ssh_client = client return client except (ConnectionRefusedError, AuthenticationException, BadHostKeyException, ConnectionResetError, SSHException, OSError): retries -= 1 - time.sleep(10) + LOG.debug('Retrying ssh connection on connect failure') + time.sleep(3) - ssh_cmd = 'Failed ssh connection to %s@%s:%s after 300 seconds' % ( + ssh_cmd = 'Failed ssh connection to %s@%s:%s after 3 retries' % ( self.ssh_username, self.ssh_ip, self.ssh_port ) raise util.InTargetExecuteError(b'', b'', 1, ssh_cmd, 'ssh') @@ -128,18 +134,31 @@ class Instance(TargetBase): return ' '.join(l for l in test.strip().splitlines() if not l.lstrip().startswith('#')) - time = self.config['boot_timeout'] + boot_timeout = self.config['boot_timeout'] tests = [self.config['system_ready_script']] if wait_for_cloud_init: tests.append(self.config['cloud_init_ready_script']) formatted_tests = ' && '.join(clean_test(t) for t in tests) cmd = ('i=0; while [ $i -lt {time} ] && i=$(($i+1)); do {test} && ' - 'exit 0; sleep 1; done; exit 1').format(time=time, + 'exit 0; sleep 1; done; exit 1').format(time=boot_timeout, test=formatted_tests) - if self.execute(cmd, rcs=(0, 1))[-1] != 0: - raise OSError('timeout: after {}s system not started'.format(time)) - + end_time = time.time() + boot_timeout + while True: + try: + return_code = self.execute( + cmd, rcs=(0, 1), description='wait for instance start' + )[-1] + if return_code == 0: + break + except util.InTargetExecuteError: + LOG.warning("failed to connect via SSH") + + if time.time() < end_time: + time.sleep(3) + else: + raise util.PlatformError('ssh', 'after %ss instance is not ' + 'reachable' % boot_timeout) # vi: ts=4 expandtab -- cgit v1.2.3 From 2dab7046f88e540836498b363c90ad46302a7cc4 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 17 May 2018 11:57:37 -0400 Subject: cloud_tests: help pylint pylint missed finding a typo in the lxd platform because it could not determine that the variable was being used was a string. The variable was set by loading a yaml file which pylint couldn't know that it would be a string. In these cases, we can be more explicit. --- tests/cloud_tests/platforms/lxd/instance.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tests/cloud_tests') diff --git a/tests/cloud_tests/platforms/lxd/instance.py b/tests/cloud_tests/platforms/lxd/instance.py index 1c17c781..d396519f 100644 --- a/tests/cloud_tests/platforms/lxd/instance.py +++ b/tests/cloud_tests/platforms/lxd/instance.py @@ -208,7 +208,7 @@ def _has_proper_console_support(): if 'console' not in info.get('api_extensions', []): reason = "LXD server does not support console api extension" else: - dver = info.get('environment', {}).get('driver_version', "") + dver = str(info.get('environment', {}).get('driver_version', "")) if dver.startswith("2.") or dver.startswith("1."): reason = "LXD Driver version not 3.x+ (%s)" % dver else: -- cgit v1.2.3 From 327af4a7e64a7e52f2bab7c6d80b79a2474d93eb Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Tue, 22 May 2018 09:58:07 -0400 Subject: tests: enable Ubuntu Cosmic in integration tests --- tests/cloud_tests/releases.yaml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'tests/cloud_tests') diff --git a/tests/cloud_tests/releases.yaml b/tests/cloud_tests/releases.yaml index c7dcbe83..defae02b 100644 --- a/tests/cloud_tests/releases.yaml +++ b/tests/cloud_tests/releases.yaml @@ -129,6 +129,22 @@ features: releases: # UBUNTU ================================================================= + cosmic: + # EOL: Jul 2019 + default: + enabled: true + release: cosmic + version: 18.10 + os: ubuntu + feature_groups: + - base + - debian_base + - ubuntu_specific + lxd: + sstreams_server: https://cloud-images.ubuntu.com/daily + alias: cosmic + setup_overrides: null + override_templates: false bionic: # EOL: Apr 2023 default: -- cgit v1.2.3 From 7b3c21615dac3b0d9163c9883309a2e7b675622a Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 12 Jun 2018 10:14:07 -0600 Subject: test: add optional --preserve-instance arg to integraiton tests By default, integration tests destroy the test instances after each test run. To aid debug and development of integration tests, support a --preserve-instance argument which will leave the modified test instance in a stopped state for further debug. --- doc/rtd/topics/tests.rst | 7 ++++++- tests/cloud_tests/args.py | 3 +++ tests/cloud_tests/collect.py | 3 ++- tests/cloud_tests/stage.py | 15 ++++++++++++--- 4 files changed, 23 insertions(+), 5 deletions(-) (limited to 'tests/cloud_tests') diff --git a/doc/rtd/topics/tests.rst b/doc/rtd/topics/tests.rst index cac4a6e4..b83bd899 100644 --- a/doc/rtd/topics/tests.rst +++ b/doc/rtd/topics/tests.rst @@ -58,7 +58,8 @@ explaining how to run one or the other independently. $ tox -e citest -- run --verbose \ --os-name stretch --os-name xenial \ --deb cloud-init_0.7.8~my_patch_all.deb \ - --preserve-data --data-dir ~/collection + --preserve-data --data-dir ~/collection \ + --preserve-instance The above command will do the following: @@ -76,6 +77,10 @@ The above command will do the following: * ``--preserve-data`` always preserve collected data, do not remove data after successful test run +* ``--preserve-instance`` do not destroy the instance after test to allow + for debugging the stopped instance during integration test development. By + default, test instances are destroyed after the test completes. + * ``--data-dir ~/collection`` write collected data into `~/collection`, rather than using a temporary directory diff --git a/tests/cloud_tests/args.py b/tests/cloud_tests/args.py index c6c1877b..ab345491 100644 --- a/tests/cloud_tests/args.py +++ b/tests/cloud_tests/args.py @@ -62,6 +62,9 @@ ARG_SETS = { (('-d', '--data-dir'), {'help': 'directory to store test data in', 'action': 'store', 'metavar': 'DIR', 'required': False}), + (('--preserve-instance',), + {'help': 'do not destroy the instance under test', + 'action': 'store_true', 'default': False, 'required': False}), (('--preserve-data',), {'help': 'do not remove collected data after successful run', 'action': 'store_true', 'default': False, 'required': False}),), diff --git a/tests/cloud_tests/collect.py b/tests/cloud_tests/collect.py index 78263bf5..75b50616 100644 --- a/tests/cloud_tests/collect.py +++ b/tests/cloud_tests/collect.py @@ -93,7 +93,8 @@ def collect_test_data(args, snapshot, os_name, test_name): # create test instance component = PlatformComponent( partial(platforms.get_instance, snapshot, user_data, - block=True, start=False, use_desc=test_name)) + block=True, start=False, use_desc=test_name), + preserve_instance=args.preserve_instance) LOG.info('collecting test data for test: %s', test_name) with component as instance: diff --git a/tests/cloud_tests/stage.py b/tests/cloud_tests/stage.py index 74a7d46d..d64a1dcc 100644 --- a/tests/cloud_tests/stage.py +++ b/tests/cloud_tests/stage.py @@ -12,9 +12,15 @@ from tests.cloud_tests import LOG class PlatformComponent(object): """Context manager to safely handle platform components.""" - def __init__(self, get_func): - """Store get_ function as partial with no args.""" + def __init__(self, get_func, preserve_instance=False): + """Store get_ function as partial with no args. + + @param get_func: Callable returning an instance from the platform. + @param preserve_instance: Boolean, when True, do not destroy instance + after test. Used for test development. + """ self.get_func = get_func + self.preserve_instance = preserve_instance def __enter__(self): """Create instance of platform component.""" @@ -24,7 +30,10 @@ class PlatformComponent(object): def __exit__(self, etype, value, trace): """Destroy instance.""" if self.instance is not None: - self.instance.destroy() + if self.preserve_instance: + LOG.info('Preserving test instance %s', self.instance.name) + else: + self.instance.destroy() def run_single(name, call): -- cgit v1.2.3 From 5ffcb511db8783aa9d32895f7017a7278d546f2f Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 12 Jun 2018 11:54:30 -0600 Subject: tests: skip chrony integration tests on lxd running artful or older A fix for chrony support per LP: #1589780 is not expected in Artful or older series. Skip the chrony suite of tests when running on a container and ubuntu series represented is <= artful as errors are expected. --- tests/cloud_tests/testcases/modules/ntp_chrony.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'tests/cloud_tests') diff --git a/tests/cloud_tests/testcases/modules/ntp_chrony.py b/tests/cloud_tests/testcases/modules/ntp_chrony.py index 461630a8..7d341773 100644 --- a/tests/cloud_tests/testcases/modules/ntp_chrony.py +++ b/tests/cloud_tests/testcases/modules/ntp_chrony.py @@ -1,13 +1,24 @@ # This file is part of cloud-init. See LICENSE file for license information. """cloud-init Integration Test Verify Script.""" +import unittest + from tests.cloud_tests.testcases import base class TestNtpChrony(base.CloudTestCase): """Test ntp module with chrony client""" - def test_chrony_entires(self): + def setUp(self): + """Skip this suite of tests on lxd and artful or older.""" + if self.platform == 'lxd': + if self.is_distro('ubuntu') and self.os_version_cmp('artful') <= 0: + raise unittest.SkipTest( + 'No support for chrony on containers <= artful.' + ' LP: #1589780') + return super(TestNtpChrony, self).setUp() + + def test_chrony_entries(self): """Test chrony config entries""" out = self.get_data_file('chrony_conf') self.assertIn('.pool.ntp.org', out) -- cgit v1.2.3 From d0f6c4602f9cc412d372e10bd7411ff0214c1435 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 12 Jun 2018 13:15:31 -0600 Subject: tests: provide human-readable integration test summary when --verbose Integration tests will now provide a brief summary for test failures listed by platform and distribution. The failure summary will only consist of failed test name and assert error message. Drop the verbose dictionary of all integration test output because this content is unreadable given the large number of integration test results listed within this dictionary. --- tests/cloud_tests/verify.py | 47 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) (limited to 'tests/cloud_tests') diff --git a/tests/cloud_tests/verify.py b/tests/cloud_tests/verify.py index 5a68a484..bfb27444 100644 --- a/tests/cloud_tests/verify.py +++ b/tests/cloud_tests/verify.py @@ -56,6 +56,51 @@ def verify_data(data_dir, platform, os_name, tests): return res +def format_test_failures(test_result): + """Return a human-readable printable format of test failures.""" + if not test_result['failures']: + return '' + failure_hdr = ' test failures:' + failure_fmt = ' * {module}.{class}.{function}\n {error}' + output = [] + for failure in test_result['failures']: + if not output: + output = [failure_hdr] + output.append(failure_fmt.format(**failure)) + return '\n'.join(output) + + +def format_results(res): + """Return human-readable results as a string""" + platform_hdr = 'Platform: {platform}' + distro_hdr = ' Distro: {distro}' + distro_summary_fmt = ( + ' test modules passed:{passed} tests failed:{failed}') + output = [''] + counts = {} + for platform, platform_data in res.items(): + output.append(platform_hdr.format(platform=platform)) + counts[platform] = {} + for distro, distro_data in platform_data.items(): + distro_failure_output = [] + output.append(distro_hdr.format(distro=distro)) + counts[platform][distro] = {'passed': 0, 'failed': 0} + for _, test_result in distro_data.items(): + if test_result['passed']: + counts[platform][distro]['passed'] += 1 + else: + counts[platform][distro]['failed'] += len( + test_result['failures']) + failure_output = format_test_failures(test_result) + if failure_output: + distro_failure_output.append(failure_output) + output.append( + distro_summary_fmt.format(**counts[platform][distro])) + if distro_failure_output: + output.extend(distro_failure_output) + return '\n'.join(output) + + def verify(args): """Verify test data. @@ -90,7 +135,7 @@ def verify(args): failed += len(fail_list) # dump results - LOG.debug('verify results: %s', res) + LOG.debug('\n---- Verify summarized results:\n%s', format_results(res)) if args.result: util.merge_results({'verify': res}, args.result) -- cgit v1.2.3 From 27283c31f4bf85f40588cfa3b31389d70ec00243 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 12 Jun 2018 16:42:54 -0600 Subject: tests: fix salt_minion integration test on bionic and later In ubuntu, the salt-minion package version 2017.7.4+dfsg1-1 or later automatically moves any seed keys from /etc/salt/pki/minion/ to /var/lib/salt/pki/minion/. Fix integration tests to collect either files in either /etc/salt/pki/minion/ or /var/lib/salt/pki/minion/. --- tests/cloud_tests/testcases/modules/salt_minion.yaml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'tests/cloud_tests') diff --git a/tests/cloud_tests/testcases/modules/salt_minion.yaml b/tests/cloud_tests/testcases/modules/salt_minion.yaml index c24aa177..9227147c 100644 --- a/tests/cloud_tests/testcases/modules/salt_minion.yaml +++ b/tests/cloud_tests/testcases/modules/salt_minion.yaml @@ -28,10 +28,20 @@ collect_scripts: cat /etc/salt/minion_id minion.pem: | #!/bin/bash - cat /etc/salt/pki/minion/minion.pem + PRIV_KEYFILE=/etc/salt/pki/minion/minion.pem + if [ ! -f $PRIV_KEYFILE ]; then + # Bionic and later automatically moves /etc/salt/pki/minion/* + PRIV_KEYFILE=/var/lib/salt/pki/minion/minion.pem + fi + cat $PRIV_KEYFILE minion.pub: | #!/bin/bash - cat /etc/salt/pki/minion/minion.pub + PUB_KEYFILE=/etc/salt/pki/minion/minion.pub + if [ ! -f $PUB_KEYFILE ]; then + # Bionic and later automatically moves /etc/salt/pki/minion/* + PUB_KEYFILE=/var/lib/salt/pki/minion/minion.pub + fi + cat $PUB_KEYFILE grains: | #!/bin/bash cat /etc/salt/grains -- cgit v1.2.3